From 4480aa04491c620f2dd4ae29c3acf7ceb2d8f54b Mon Sep 17 00:00:00 2001 From: Prithvish Baidya Date: Thu, 3 Jul 2025 08:01:57 +0530 Subject: [PATCH 1/8] multilane queue --- aa-core/src/userop/builder.rs | 3 +- twmq/src/lib.rs | 4 +- twmq/src/multilane.rs | 1348 +++++++++++++++++++++++++++++ twmq/src/shutdown.rs | 42 +- twmq/tests/multilane_batch_pop.rs | 492 +++++++++++ 5 files changed, 1873 insertions(+), 16 deletions(-) create mode 100644 twmq/src/multilane.rs create mode 100644 twmq/tests/multilane_batch_pop.rs diff --git a/aa-core/src/userop/builder.rs b/aa-core/src/userop/builder.rs index 355f883..65807a9 100644 --- a/aa-core/src/userop/builder.rs +++ b/aa-core/src/userop/builder.rs @@ -3,7 +3,6 @@ use std::sync::Arc; use alloy::{ hex, primitives::{Address, Bytes, U256}, - providers::Provider, rpc::types::{PackedUserOperation, UserOperation}, }; use engine_aa_types::VersionedUserOp; @@ -236,7 +235,7 @@ impl<'a, C: Chain> UserOpBuilderV0_7<'a, C> { // .estimate_eip1559_fees() // .await // .map_err(|err| err.to_engine_error(self.chain))?; - + // TODO: modularize this so only used with thirdweb paymaster let prices = self .chain diff --git a/twmq/src/lib.rs b/twmq/src/lib.rs index e516e1a..8e7b7d9 100644 --- a/twmq/src/lib.rs +++ b/twmq/src/lib.rs @@ -1,6 +1,7 @@ pub mod error; pub mod hooks; pub mod job; +pub mod multilane; pub mod queue; pub mod shutdown; @@ -14,6 +15,7 @@ use job::{ PushableJob, RequeuePosition, }; pub use job::BorrowedJob; +pub use multilane::{MultilaneQueue, MultilanePushableJob}; use queue::QueueOptions; use redis::Pipeline; use redis::{AsyncCommands, RedisResult, aio::ConnectionManager}; @@ -447,7 +449,7 @@ impl Queue { } } - pub fn work(self: &Arc) -> WorkerHandle { + pub fn work(self: &Arc) -> WorkerHandle> { let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel::<()>(); // Local semaphore to limit concurrency per instance let semaphore = Arc::new(Semaphore::new(self.options.local_concurrency)); diff --git a/twmq/src/multilane.rs b/twmq/src/multilane.rs new file mode 100644 index 0000000..497cb37 --- /dev/null +++ b/twmq/src/multilane.rs @@ -0,0 +1,1348 @@ +use std::collections::HashMap; +use std::sync::Arc; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + +use redis::{AsyncCommands, Pipeline, RedisResult, aio::ConnectionManager}; +use tokio::sync::Semaphore; +use tokio::time::sleep; +use tracing::Instrument; + +use crate::{ + CancelResult, DurableExecution, FailHookData, NackHookData, QueueInternalErrorHookData, + SuccessHookData, UserCancellable, + error::TwmqError, + hooks::TransactionContext, + job::{ + BorrowedJob, DelayOptions, Job, JobError, JobErrorRecord, JobErrorType, JobOptions, + JobResult, JobStatus, RequeuePosition, + }, + queue::QueueOptions, + shutdown::WorkerHandle, +}; + +/// A multilane queue that provides fair load balancing across multiple lanes +/// while maintaining the same reliability guarantees as the single-lane queue. +pub struct MultilaneQueue +where + H: DurableExecution, +{ + pub redis: ConnectionManager, + handler: Arc, + options: QueueOptions, + /// Unique identifier for this multilane queue instance + queue_id: String, +} + +/// Represents a job that can be pushed to a specific lane +pub struct MultilanePushableJob +where + H: DurableExecution, +{ + options: JobOptions, + queue: Arc>, + lane_id: String, +} + +impl MultilaneQueue { + pub async fn new( + redis_url: &str, + queue_id: &str, + options: Option, + handler: H, + ) -> Result { + let client = redis::Client::open(redis_url)?; + let redis = client.get_connection_manager().await?; + + let queue = Self { + redis, + queue_id: queue_id.to_string(), + options: options.unwrap_or_default(), + handler: Arc::new(handler), + }; + + Ok(queue) + } + + pub fn arc(self) -> Arc { + Arc::new(self) + } + + /// Create a job for a specific lane + pub fn job_for_lane( + self: Arc, + lane_id: &str, + data: H::JobData, + ) -> MultilanePushableJob { + MultilanePushableJob { + options: JobOptions::new(data), + queue: self, + lane_id: lane_id.to_string(), + } + } + + pub fn queue_id(&self) -> &str { + &self.queue_id + } + + // Redis key naming methods with proper multilane namespacing + pub fn lanes_zset_name(&self) -> String { + format!("twmq_multilane:{}:lanes", self.queue_id) + } + + pub fn lane_pending_list_name(&self, lane_id: &str) -> String { + format!("twmq_multilane:{}:lane:{}:pending", self.queue_id, lane_id) + } + + pub fn lane_delayed_zset_name(&self, lane_id: &str) -> String { + format!("twmq_multilane:{}:lane:{}:delayed", self.queue_id, lane_id) + } + + pub fn lane_active_hash_name(&self, lane_id: &str) -> String { + format!("twmq_multilane:{}:lane:{}:active", self.queue_id, lane_id) + } + + pub fn success_list_name(&self) -> String { + format!("twmq_multilane:{}:success", self.queue_id) + } + + pub fn failed_list_name(&self) -> String { + format!("twmq_multilane:{}:failed", self.queue_id) + } + + pub fn job_data_hash_name(&self) -> String { + format!("twmq_multilane:{}:jobs:data", self.queue_id) + } + + pub fn job_meta_hash_name(&self, job_id: &str) -> String { + format!("twmq_multilane:{}:job:{}:meta", self.queue_id, job_id) + } + + pub fn job_errors_list_name(&self, job_id: &str) -> String { + format!("twmq_multilane:{}:job:{}:errors", self.queue_id, job_id) + } + + pub fn job_result_hash_name(&self) -> String { + format!("twmq_multilane:{}:jobs:result", self.queue_id) + } + + pub fn dedupe_set_name(&self) -> String { + format!("twmq_multilane:{}:dedup", self.queue_id) + } + + pub fn pending_cancellation_set_name(&self) -> String { + format!("twmq_multilane:{}:pending_cancellations", self.queue_id) + } + + pub fn lease_key_name(&self, job_id: &str, lease_token: &str) -> String { + format!( + "twmq_multilane:{}:job:{}:lease:{}", + self.queue_id, job_id, lease_token + ) + } + + /// Push a job to a specific lane + pub async fn push_to_lane( + &self, + lane_id: &str, + job_options: JobOptions, + ) -> Result, TwmqError> { + let script = redis::Script::new( + r#" + local queue_id = ARGV[1] + local lane_id = ARGV[2] + local job_id = ARGV[3] + local job_data = ARGV[4] + local now = ARGV[5] + local delay = ARGV[6] + local reentry_position = ARGV[7] -- "first" or "last" + + local lanes_zset_name = KEYS[1] + local lane_delayed_zset_name = KEYS[2] + local lane_pending_list_name = KEYS[3] + local job_data_hash_name = KEYS[4] + local job_meta_hash_name = KEYS[5] + local dedupe_set_name = KEYS[6] + + -- Check if job already exists in any queue + if redis.call('SISMEMBER', dedupe_set_name, job_id) == 1 then + -- Job with this ID already exists, skip + return { 0, job_id } + end + + -- Store job data + redis.call('HSET', job_data_hash_name, job_id, job_data) + + -- Store job metadata as a hash + redis.call('HSET', job_meta_hash_name, 'created_at', now) + redis.call('HSET', job_meta_hash_name, 'attempts', 0) + redis.call('HSET', job_meta_hash_name, 'lane_id', lane_id) + + -- Add to deduplication set + redis.call('SADD', dedupe_set_name, job_id) + + -- Add lane to lanes zset if not exists (score 0 means never processed) + redis.call('ZADD', lanes_zset_name, 'NX', 0, lane_id) + + -- Add to appropriate queue based on delay + if tonumber(delay) > 0 then + local process_at = now + tonumber(delay) + -- Store position information for this delayed job + redis.call('HSET', job_meta_hash_name, 'reentry_position', reentry_position) + redis.call('ZADD', lane_delayed_zset_name, process_at, job_id) + else + -- Non-delayed job always goes to end of pending + redis.call('RPUSH', lane_pending_list_name, job_id) + end + + return { 1, job_id } + "#, + ); + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + + let job = Job { + id: job_options.id.clone(), + data: job_options.data, + attempts: 0, + created_at: now, + processed_at: None, + finished_at: None, + }; + + let job_data = serde_json::to_string(&job.data)?; + + let delay = job_options.delay.unwrap_or(DelayOptions { + delay: Duration::ZERO, + position: RequeuePosition::Last, + }); + + let delay_secs = delay.delay.as_secs(); + let position_string = delay.position.to_string(); + + let _result: (i32, String) = script + .key(self.lanes_zset_name()) + .key(self.lane_delayed_zset_name(lane_id)) + .key(self.lane_pending_list_name(lane_id)) + .key(self.job_data_hash_name()) + .key(self.job_meta_hash_name(&job.id)) + .key(self.dedupe_set_name()) + .arg(&self.queue_id) + .arg(lane_id) + .arg(&job_options.id) + .arg(job_data) + .arg(now) + .arg(delay_secs) + .arg(position_string) + .invoke_async(&mut self.redis.clone()) + .await?; + + Ok(job) + } + + /// Get job by ID (works across all lanes) + pub async fn get_job(&self, job_id: &str) -> Result>, TwmqError> { + let mut conn = self.redis.clone(); + let job_data_t_json: Option = conn.hget(self.job_data_hash_name(), job_id).await?; + + if let Some(data_json) = job_data_t_json { + let data_t: H::JobData = serde_json::from_str(&data_json)?; + + // Fetch metadata + let meta_map: HashMap = + conn.hgetall(self.job_meta_hash_name(job_id)).await?; + + let attempts: u32 = meta_map + .get("attempts") + .and_then(|s| s.parse().ok()) + .unwrap_or(0); + let created_at: u64 = meta_map + .get("created_at") + .and_then(|s| s.parse().ok()) + .unwrap_or(0); + let processed_at: Option = + meta_map.get("processed_at").and_then(|s| s.parse().ok()); + let finished_at: Option = meta_map.get("finished_at").and_then(|s| s.parse().ok()); + + Ok(Some(Job { + id: job_id.to_string(), + data: data_t, + attempts, + created_at, + processed_at, + finished_at, + })) + } else { + Ok(None) + } + } + + /// Count jobs by status across all lanes or for a specific lane + pub async fn count( + &self, + status: JobStatus, + lane_id: Option<&str>, + ) -> Result { + let mut conn = self.redis.clone(); + + let count = match status { + JobStatus::Pending => { + if let Some(lane) = lane_id { + let count: usize = conn.llen(self.lane_pending_list_name(lane)).await?; + count + } else { + // Sum across all active lanes + let lanes: Vec = conn.zrange(self.lanes_zset_name(), 0, -1).await?; + let mut total = 0; + for lane in lanes { + let count: usize = conn.llen(self.lane_pending_list_name(&lane)).await?; + total += count; + } + total + } + } + JobStatus::Active => { + if let Some(lane) = lane_id { + let count: usize = conn.hlen(self.lane_active_hash_name(lane)).await?; + count + } else { + // Sum across all active lanes + let lanes: Vec = conn.zrange(self.lanes_zset_name(), 0, -1).await?; + let mut total = 0; + for lane in lanes { + let count: usize = conn.hlen(self.lane_active_hash_name(&lane)).await?; + total += count; + } + total + } + } + JobStatus::Delayed => { + if let Some(lane) = lane_id { + let count: usize = conn.zcard(self.lane_delayed_zset_name(lane)).await?; + count + } else { + // Sum across all active lanes + let lanes: Vec = conn.zrange(self.lanes_zset_name(), 0, -1).await?; + let mut total = 0; + for lane in lanes { + let count: usize = conn.zcard(self.lane_delayed_zset_name(&lane)).await?; + total += count; + } + total + } + } + JobStatus::Success => { + let count: usize = conn.llen(self.success_list_name()).await?; + count + } + JobStatus::Failed => { + let count: usize = conn.llen(self.failed_list_name()).await?; + count + } + }; + + Ok(count) + } + + pub async fn lanes_count(&self) -> Result { + let mut conn = self.redis.clone(); + let count: usize = conn.zcard(self.lanes_zset_name()).await?; + Ok(count) + } + + /// Cancel a job by ID (works across all lanes) + pub async fn cancel_job(&self, job_id: &str) -> Result { + let script = redis::Script::new( + r#" + local queue_id = ARGV[1] + local job_id = ARGV[2] + local now = ARGV[3] + + local lanes_zset = KEYS[1] + local failed_list = KEYS[2] + local pending_cancellation_set = KEYS[3] + local job_meta_hash = KEYS[4] + local job_data_hash = KEYS[5] + + -- Get the lane for this job + local lane_id = redis.call('HGET', job_meta_hash, 'lane_id') + if not lane_id then + return "not_found" + end + + local lane_pending_list = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':pending' + local lane_delayed_zset = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':delayed' + local lane_active_hash = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':active' + + -- Try to remove from pending queue + if redis.call('LREM', lane_pending_list, 0, job_id) > 0 then + -- Move to failed state with cancellation + redis.call('LPUSH', failed_list, job_id) + redis.call('HSET', job_meta_hash, 'finished_at', now) + return "cancelled_immediately" + end + + -- Try to remove from delayed queue + if redis.call('ZREM', lane_delayed_zset, job_id) > 0 then + -- Move to failed state with cancellation + redis.call('LPUSH', failed_list, job_id) + redis.call('HSET', job_meta_hash, 'finished_at', now) + return "cancelled_immediately" + end + + -- Check if job is active + if redis.call('HEXISTS', lane_active_hash, job_id) == 1 then + -- Add to pending cancellations set + redis.call('SADD', pending_cancellation_set, job_id) + return "cancellation_pending" + end + + return "not_found" + "#, + ); + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + + let result: String = script + .key(self.lanes_zset_name()) + .key(self.failed_list_name()) + .key(self.pending_cancellation_set_name()) + .key(self.job_meta_hash_name(job_id)) + .key(self.job_data_hash_name()) + .arg(&self.queue_id) + .arg(job_id) + .arg(now) + .invoke_async(&mut self.redis.clone()) + .await?; + + match result.as_str() { + "cancelled_immediately" => { + if let Err(e) = self.process_cancelled_job(job_id).await { + tracing::error!( + job_id = %job_id, + error = ?e, + "Failed to process immediately cancelled job" + ); + } + Ok(CancelResult::CancelledImmediately) + } + "cancellation_pending" => Ok(CancelResult::CancellationPending), + "not_found" => Ok(CancelResult::NotFound), + _ => Err(TwmqError::Runtime { + message: format!("Unexpected cancel result: {}", result), + }), + } + } + + /// Start the multilane worker + pub fn work(self: &Arc) -> WorkerHandle> { + let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel::<()>(); + let semaphore = Arc::new(Semaphore::new(self.options.local_concurrency)); + let handler = self.handler.clone(); + let outer_queue_clone = self.clone(); + + let join_handle = tokio::spawn(async move { + let mut interval = tokio::time::interval(outer_queue_clone.options.polling_interval); + let handler_clone = handler.clone(); + let always_poll = outer_queue_clone.options.always_poll; + + tracing::info!( + "Multilane worker started for queue: {}", + outer_queue_clone.queue_id() + ); + + loop { + tokio::select! { + _ = &mut shutdown_rx => { + tracing::info!("Shutdown signal received for multilane queue: {}", outer_queue_clone.queue_id()); + break; + } + + _ = interval.tick() => { + let queue_clone = outer_queue_clone.clone(); + let available_permits = semaphore.available_permits(); + + if available_permits == 0 && !always_poll { + tracing::trace!("No permits available, waiting..."); + continue; + } + + tracing::trace!("Available permits: {}", available_permits); + + match queue_clone.pop_batch_jobs(available_permits).await { + Ok(jobs) => { + tracing::trace!("Got {} jobs across lanes", jobs.len()); + + for (lane_id, job) in jobs { + let permit = semaphore.clone().acquire_owned().await.unwrap(); + let queue_clone = queue_clone.clone(); + let job_id = job.id().to_string(); + let handler_clone = handler_clone.clone(); + + tokio::spawn(async move { + let result = handler_clone.process(&job).await; + + if let Err(e) = queue_clone.complete_job(&job, result).await { + tracing::error!( + "Failed to complete job {} handling: {:?}", + job.id(), + e + ); + } + + drop(permit); + }.instrument(tracing::info_span!("twmq_multilane_worker", job_id, lane_id))); + } + } + Err(e) => { + tracing::error!("Failed to pop batch jobs: {:?}", e); + sleep(Duration::from_millis(1000)).await; + } + }; + } + } + } + + // Graceful shutdown + tracing::info!( + "Waiting for {} active jobs to complete for multilane queue: {}", + outer_queue_clone.options.local_concurrency - semaphore.available_permits(), + outer_queue_clone.queue_id() + ); + + let _permits: Vec<_> = (0..outer_queue_clone.options.local_concurrency) + .map(|_| semaphore.clone().acquire_owned()) + .collect::>() + .await + .into_iter() + .collect::, _>>() + .map_err(|e| TwmqError::Runtime { + message: format!("Failed to acquire permits during shutdown: {}", e), + })?; + + tracing::info!( + "All jobs completed, multilane worker shutdown complete for queue: {}", + outer_queue_clone.queue_id() + ); + Ok(()) + }); + + WorkerHandle { + join_handle, + shutdown_tx, + queue: self.clone(), + } + } + + /// Pop jobs from multiple lanes in a fair round-robin manner with full atomicity + pub async fn pop_batch_jobs( + self: &Arc, + batch_size: usize, + ) -> RedisResult)>> { + let script = redis::Script::new( + r#" + local queue_id = ARGV[1] + local now = tonumber(ARGV[2]) + local batch_size = tonumber(ARGV[3]) + local lease_seconds = tonumber(ARGV[4]) + + local lanes_zset_name = KEYS[1] + local job_data_hash_name = KEYS[2] + local pending_cancellation_set = KEYS[3] + local failed_list_name = KEYS[4] + local success_list_name = KEYS[5] + + local result_jobs = {} + local timed_out_jobs = {} + local cancelled_jobs = {} + + -- Helper function to cleanup expired leases for a specific lane + local function cleanup_lane_leases(lane_id) + local lane_active_hash = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':active' + local lane_pending_list = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':pending' + + local active_jobs = redis.call('HGETALL', lane_active_hash) + + for i = 1, #active_jobs, 2 do + local job_id = active_jobs[i] + local attempts = active_jobs[i + 1] + local job_meta_hash = 'twmq_multilane:' .. queue_id .. ':job:' .. job_id .. ':meta' + + local current_lease_token = redis.call('HGET', job_meta_hash, 'lease_token') + + if current_lease_token then + local lease_key = 'twmq_multilane:' .. queue_id .. ':job:' .. job_id .. ':lease:' .. current_lease_token + local lease_exists = redis.call('EXISTS', lease_key) + + if lease_exists == 0 then + redis.call('HINCRBY', job_meta_hash, 'attempts', 1) + redis.call('HDEL', job_meta_hash, 'lease_token') + redis.call('HDEL', lane_active_hash, job_id) + redis.call('LPUSH', lane_pending_list, job_id) + table.insert(timed_out_jobs, {lane_id, job_id}) + end + else + redis.call('HINCRBY', job_meta_hash, 'attempts', 1) + redis.call('HDEL', lane_active_hash, job_id) + redis.call('LPUSH', lane_pending_list, job_id) + table.insert(timed_out_jobs, {lane_id, job_id}) + end + end + end + + -- Helper function to move delayed jobs to pending for a specific lane + local function process_delayed_jobs(lane_id) + local lane_delayed_zset = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':delayed' + local lane_pending_list = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':pending' + + local delayed_jobs = redis.call('ZRANGEBYSCORE', lane_delayed_zset, 0, now) + for i, job_id in ipairs(delayed_jobs) do + local job_meta_hash = 'twmq_multilane:' .. queue_id .. ':job:' .. job_id .. ':meta' + local reentry_position = redis.call('HGET', job_meta_hash, 'reentry_position') or 'last' + + redis.call('ZREM', lane_delayed_zset, job_id) + redis.call('HDEL', job_meta_hash, 'reentry_position') + + if reentry_position == 'first' then + redis.call('LPUSH', lane_pending_list, job_id) + else + redis.call('RPUSH', lane_pending_list, job_id) + end + end + end + + -- Helper function to pop one job from a lane + local function pop_job_from_lane(lane_id) + local lane_pending_list = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':pending' + local lane_active_hash = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':active' + + local job_id = redis.call('RPOP', lane_pending_list) + if not job_id then + return nil + end + + local job_data = redis.call('HGET', job_data_hash_name, job_id) + if not job_data then + return nil + end + + local job_meta_hash = 'twmq_multilane:' .. queue_id .. ':job:' .. job_id .. ':meta' + redis.call('HSET', job_meta_hash, 'processed_at', now) + local created_at = redis.call('HGET', job_meta_hash, 'created_at') or now + local attempts = redis.call('HINCRBY', job_meta_hash, 'attempts', 1) + + local lease_token = now .. '_' .. job_id .. '_' .. attempts + local lease_key = 'twmq_multilane:' .. queue_id .. ':job:' .. job_id .. ':lease:' .. lease_token + + redis.call('SET', lease_key, '1') + redis.call('EXPIRE', lease_key, lease_seconds) + redis.call('HSET', job_meta_hash, 'lease_token', lease_token) + redis.call('HSET', lane_active_hash, job_id, attempts) + + return {job_id, job_data, tostring(attempts), tostring(created_at), tostring(now), lease_token} + end + + -- Step 1: Process pending cancellations first + local cancel_requests = redis.call('SMEMBERS', pending_cancellation_set) + + for i, job_id in ipairs(cancel_requests) do + local job_meta_hash = 'twmq_multilane:' .. queue_id .. ':job:' .. job_id .. ':meta' + local lane_id = redis.call('HGET', job_meta_hash, 'lane_id') + + if lane_id then + local lane_active_hash = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':active' + + if redis.call('HEXISTS', lane_active_hash, job_id) == 1 then + -- Still processing, keep in cancellation set + else + -- Job finished processing, check outcome + local success_count = redis.call('LREM', success_list_name, 0, job_id) + if success_count > 0 then + -- Job succeeded, add it back to success list + redis.call('LPUSH', success_list_name, job_id) + else + redis.call('LPUSH', failed_list_name, job_id) + redis.call('HSET', job_meta_hash, 'finished_at', now) + table.insert(cancelled_jobs, {lane_id, job_id}) + end + redis.call('SREM', pending_cancellation_set, job_id) + end + end + end + + -- Step 2: Efficient lane processing + local jobs_popped = 0 + local lanes_with_scores = redis.call('ZRANGE', lanes_zset_name, 0, -1, 'WITHSCORES') + local total_lanes = #lanes_with_scores / 2 + + if total_lanes == 0 then + return {result_jobs, cancelled_jobs, timed_out_jobs} + end + + local lane_index = 1 + local empty_lanes_count = 0 + + while jobs_popped < batch_size and empty_lanes_count < total_lanes do + local lane_id = lanes_with_scores[lane_index * 2 - 1] + + -- Skip if we've already marked this lane as empty + if lane_id == nil then + lane_index = lane_index + 1 + if lane_index > total_lanes then + lane_index = 1 + end + else + local last_score = tonumber(lanes_with_scores[lane_index * 2]) + + -- Only cleanup if not visited this batch (score != now) + if last_score ~= now then + cleanup_lane_leases(lane_id) + process_delayed_jobs(lane_id) + redis.call('ZADD', lanes_zset_name, now, lane_id) + lanes_with_scores[lane_index * 2] = tostring(now) + end + + -- Try to pop a job from this lane + local job_result = pop_job_from_lane(lane_id) + + if job_result then + table.insert(result_jobs, {lane_id, job_result[1], job_result[2], job_result[3], job_result[4], job_result[5], job_result[6]}) + jobs_popped = jobs_popped + 1 + else + -- Lane is empty, mark it and count it + lanes_with_scores[lane_index * 2 - 1] = nil + lanes_with_scores[lane_index * 2] = nil + empty_lanes_count = empty_lanes_count + 1 + + -- Check if lane should be removed from Redis + local lane_pending_list = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':pending' + local lane_delayed_zset = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':delayed' + local lane_active_hash = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':active' + + local pending_count = redis.call('LLEN', lane_pending_list) + local delayed_count = redis.call('ZCARD', lane_delayed_zset) + local active_count = redis.call('HLEN', lane_active_hash) + + if pending_count == 0 and delayed_count == 0 and active_count == 0 then + redis.call('ZREM', lanes_zset_name, lane_id) + end + end + + -- Move to next lane + lane_index = lane_index + 1 + if lane_index > total_lanes then + lane_index = 1 + end + end + end + + return {result_jobs, cancelled_jobs, timed_out_jobs} + "#, + ); + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + + let results_from_lua: ( + Vec<(String, String, String, String, String, String, String)>, + Vec<(String, String)>, + Vec<(String, String)>, + ) = script + .key(self.lanes_zset_name()) + .key(self.job_data_hash_name()) + .key(self.pending_cancellation_set_name()) + .key(self.failed_list_name()) + .key(self.success_list_name()) + .arg(&self.queue_id) + .arg(now) + .arg(batch_size) + .arg(self.options.lease_duration.as_secs()) + .invoke_async(&mut self.redis.clone()) + .await?; + + let (job_results, cancelled_jobs, timed_out_jobs) = results_from_lua; + + // Log lease timeouts and cancellations with lane context + for (lane_id, job_id) in &timed_out_jobs { + tracing::warn!(job_id = %job_id, lane_id = %lane_id, "Job lease expired, moved back to pending"); + } + for (lane_id, job_id) in &cancelled_jobs { + tracing::info!(job_id = %job_id, lane_id = %lane_id, "Job cancelled by user request"); + } + + let mut jobs = Vec::new(); + for ( + lane_id, + job_id_str, + job_data_t_json, + attempts_str, + created_at_str, + processed_at_str, + lease_token, + ) in job_results + { + match serde_json::from_str::(&job_data_t_json) { + Ok(data_t) => { + let attempts: u32 = attempts_str.parse().unwrap_or(1); + let created_at: u64 = created_at_str.parse().unwrap_or(now); + let processed_at: u64 = processed_at_str.parse().unwrap_or(now); + + let job = Job { + id: job_id_str, + data: data_t, + attempts, + created_at, + processed_at: Some(processed_at), + finished_at: None, + }; + + jobs.push((lane_id, BorrowedJob::new(job, lease_token))); + } + Err(e) => { + tracing::error!( + job_id = job_id_str, + lane_id = lane_id, + error = ?e, + "Failed to deserialize job data. Spawning task to move job to failed state.", + ); + + let queue_clone = self.clone(); + tokio::spawn(async move { + let mut pipeline = redis::pipe(); + pipeline.atomic(); + + let mut _tx_context = TransactionContext::new( + &mut pipeline, + queue_clone.queue_id().to_string(), + ); + + let job: Job> = Job { + id: job_id_str.to_string(), + data: None, + attempts: attempts_str.parse().unwrap_or(1), + created_at: created_at_str.parse().unwrap_or(now), + processed_at: processed_at_str.parse().ok(), + finished_at: Some(now), + }; + + let twmq_error: TwmqError = e.into(); + + if let Err(e) = queue_clone + .complete_job_queue_error(&job, &lease_token, &twmq_error.into()) + .await + { + tracing::error!( + job_id = job.id, + lane_id = lane_id, + error = ?e, + "Failed to complete job fail handling successfully", + ); + } + }); + } + } + } + + // Process cancelled jobs through hook system + for (lane_id, job_id) in cancelled_jobs { + let queue_clone = self.clone(); + tokio::spawn(async move { + if let Err(e) = queue_clone.process_cancelled_job(&job_id).await { + tracing::error!( + job_id = %job_id, + lane_id = %lane_id, + error = ?e, + "Failed to process cancelled job" + ); + } + }); + } + + Ok(jobs) + } + + /// Process a cancelled job through the hook system with user cancellation error + async fn process_cancelled_job(&self, job_id: &str) -> Result<(), TwmqError> { + match self.get_job(job_id).await? { + Some(job) => { + let cancellation_error = H::ErrorData::user_cancelled(); + + let mut pipeline = redis::pipe(); + pipeline.atomic(); + + let mut tx_context = + TransactionContext::new(&mut pipeline, self.queue_id().to_string()); + + let fail_hook_data = FailHookData { + error: &cancellation_error, + }; + + let borrowed_job = BorrowedJob::new(job, "cancelled".to_string()); + self.handler + .on_fail(&borrowed_job, fail_hook_data, &mut tx_context) + .await; + + pipeline.query_async::<()>(&mut self.redis.clone()).await?; + + tracing::info!( + job_id = %job_id, + "Successfully processed job cancellation hooks" + ); + + Ok(()) + } + None => { + tracing::warn!( + job_id = %job_id, + "Cancelled job not found when trying to process hooks" + ); + Ok(()) + } + } + } + + // Job completion methods (same as single-lane queue but with multilane naming) + fn add_success_operations( + &self, + job: &BorrowedJob, + result: &H::Output, + pipeline: &mut Pipeline, + ) -> Result<(), TwmqError> { + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + + let lease_key = self.lease_key_name(&job.job.id, &job.lease_token); + + pipeline.del(&lease_key); + + // Get lane_id from job metadata to remove from correct lane active hash + let job_meta_hash = self.job_meta_hash_name(&job.job.id); + + // We need to get lane_id first, then remove from that lane's active hash + // This requires a separate Redis call before the pipeline, but ensures atomicity within the pipeline + pipeline + .lpush(self.success_list_name(), &job.job.id) + .hset(&job_meta_hash, "finished_at", now) + .hdel(&job_meta_hash, "lease_token"); + + let result_json = serde_json::to_string(result)?; + pipeline.hset(self.job_result_hash_name(), &job.job.id, result_json); + + Ok(()) + } + + async fn post_success_completion(&self) -> Result<(), TwmqError> { + let trim_script = redis::Script::new( + r#" + local queue_id = KEYS[1] + local list_name = KEYS[2] + local job_data_hash = KEYS[3] + local results_hash = KEYS[4] + local dedupe_set_name = KEYS[5] + + local max_len = tonumber(ARGV[1]) + + local job_ids_to_delete = redis.call('LRANGE', list_name, max_len, -1) + + if #job_ids_to_delete > 0 then + for _, j_id in ipairs(job_ids_to_delete) do + local job_meta_hash = 'twmq_multilane:' .. queue_id .. ':job:' .. j_id .. ':meta' + local errors_list_name = 'twmq_multilane:' .. queue_id .. ':job:' .. j_id .. ':errors' + + redis.call('SREM', dedupe_set_name, j_id) + redis.call('HDEL', job_data_hash, j_id) + redis.call('DEL', job_meta_hash) + redis.call('HDEL', results_hash, j_id) + redis.call('DEL', errors_list_name) + end + redis.call('LTRIM', list_name, 0, max_len - 1) + end + return #job_ids_to_delete + "#, + ); + + let trimmed_count: usize = trim_script + .key(self.queue_id()) + .key(self.success_list_name()) + .key(self.job_data_hash_name()) + .key(self.job_result_hash_name()) + .key(self.dedupe_set_name()) + .arg(self.options.max_success) + .invoke_async(&mut self.redis.clone()) + .await?; + + if trimmed_count > 0 { + tracing::info!("Pruned {} successful jobs", trimmed_count); + } + + Ok(()) + } + + fn add_nack_operations( + &self, + job: &BorrowedJob, + error: &H::ErrorData, + delay: Option, + position: RequeuePosition, + pipeline: &mut Pipeline, + ) -> Result<(), TwmqError> { + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + + let lease_key = self.lease_key_name(&job.job.id, &job.lease_token); + let job_meta_hash = self.job_meta_hash_name(&job.job.id); + + pipeline.del(&lease_key); + pipeline.hdel(&job_meta_hash, "lease_token"); + + let error_record = JobErrorRecord { + attempt: job.job.attempts, + error, + details: JobErrorType::nack(delay, position), + created_at: now, + }; + + let error_json = serde_json::to_string(&error_record)?; + pipeline.lpush(self.job_errors_list_name(&job.job.id), error_json); + + // Note: The actual requeuing logic needs to be handled by a separate operation + // since we need the lane_id from metadata. This will be done in the complete_job method. + + Ok(()) + } + + async fn post_nack_completion(&self) -> Result<(), TwmqError> { + Ok(()) + } + + fn add_fail_operations( + &self, + job: &BorrowedJob, + error: &H::ErrorData, + pipeline: &mut Pipeline, + ) -> Result<(), TwmqError> { + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + + let lease_key = self.lease_key_name(&job.job.id, &job.lease_token); + let job_meta_hash = self.job_meta_hash_name(&job.job.id); + + pipeline.del(&lease_key); + pipeline + .lpush(self.failed_list_name(), &job.job.id) + .hset(&job_meta_hash, "finished_at", now) + .hdel(&job_meta_hash, "lease_token"); + + let error_record = JobErrorRecord { + attempt: job.job.attempts, + error, + details: JobErrorType::fail(), + created_at: now, + }; + let error_json = serde_json::to_string(&error_record)?; + pipeline.lpush(self.job_errors_list_name(&job.job.id), error_json); + + Ok(()) + } + + async fn post_fail_completion(&self) -> Result<(), TwmqError> { + let trim_script = redis::Script::new( + r#" + local queue_id = KEYS[1] + local list_name = KEYS[2] + local job_data_hash = KEYS[3] + local dedupe_set_name = KEYS[4] + + local max_len = tonumber(ARGV[1]) + + local job_ids_to_delete = redis.call('LRANGE', list_name, max_len, -1) + + if #job_ids_to_delete > 0 then + for _, j_id in ipairs(job_ids_to_delete) do + local errors_list_name = 'twmq_multilane:' .. queue_id .. ':job:' .. j_id .. ':errors' + local job_meta_hash = 'twmq_multilane:' .. queue_id .. ':job:' .. j_id .. ':meta' + + redis.call('SREM', dedupe_set_name, j_id) + redis.call('HDEL', job_data_hash, j_id) + redis.call('DEL', job_meta_hash) + redis.call('DEL', errors_list_name) + end + redis.call('LTRIM', list_name, 0, max_len - 1) + end + return #job_ids_to_delete + "#, + ); + + let trimmed_count: usize = trim_script + .key(self.queue_id()) + .key(self.failed_list_name()) + .key(self.job_data_hash_name()) + .key(self.dedupe_set_name()) + .arg(self.options.max_failed) + .invoke_async(&mut self.redis.clone()) + .await?; + + if trimmed_count > 0 { + tracing::info!("Pruned {} failed jobs", trimmed_count); + } + + Ok(()) + } + + #[tracing::instrument(level = "debug", skip_all, fields(job_id = job.id(), queue = self.queue_id()))] + async fn complete_job( + &self, + job: &BorrowedJob, + result: JobResult, + ) -> Result<(), TwmqError> { + // First, we need to get the lane_id and remove from appropriate lane's active hash + let mut conn = self.redis.clone(); + let lane_id: Option = conn + .hget(self.job_meta_hash_name(&job.job.id), "lane_id") + .await?; + + let lane_id = lane_id.ok_or_else(|| TwmqError::Runtime { + message: format!("Job {} missing lane_id in metadata", job.job.id), + })?; + + // Build pipeline with hooks and operations + let mut hook_pipeline = redis::pipe(); + let mut tx_context = + TransactionContext::new(&mut hook_pipeline, self.queue_id().to_string()); + + match &result { + Ok(output) => { + let success_hook_data = SuccessHookData { result: output }; + self.handler + .on_success(job, success_hook_data, &mut tx_context) + .await; + self.add_success_operations(job, output, &mut hook_pipeline)?; + // Remove from lane's active hash + hook_pipeline.hdel(self.lane_active_hash_name(&lane_id), &job.job.id); + } + Err(JobError::Nack { + error, + delay, + position, + }) => { + let nack_hook_data = NackHookData { + error, + delay: *delay, + position: *position, + }; + self.handler + .on_nack(job, nack_hook_data, &mut tx_context) + .await; + self.add_nack_operations(job, error, *delay, *position, &mut hook_pipeline)?; + + // Remove from lane's active hash and requeue to appropriate lane queue + hook_pipeline.hdel(self.lane_active_hash_name(&lane_id), &job.job.id); + + if let Some(delay_duration) = delay { + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + let delay_until = now + delay_duration.as_secs(); + let pos_str = position.to_string(); + + hook_pipeline + .hset( + self.job_meta_hash_name(&job.job.id), + "reentry_position", + pos_str, + ) + .zadd( + self.lane_delayed_zset_name(&lane_id), + &job.job.id, + delay_until, + ); + } else { + match position { + RequeuePosition::First => { + hook_pipeline.lpush(self.lane_pending_list_name(&lane_id), &job.job.id); + } + RequeuePosition::Last => { + hook_pipeline.rpush(self.lane_pending_list_name(&lane_id), &job.job.id); + } + } + } + } + Err(JobError::Fail(error)) => { + let fail_hook_data = FailHookData { error }; + self.handler + .on_fail(job, fail_hook_data, &mut tx_context) + .await; + self.add_fail_operations(job, error, &mut hook_pipeline)?; + // Remove from lane's active hash + hook_pipeline.hdel(self.lane_active_hash_name(&lane_id), &job.job.id); + } + } + + // Execute with lease protection (same pattern as single-lane queue) + let lease_key = self.lease_key_name(&job.job.id, &job.lease_token); + + loop { + let mut conn = self.redis.clone(); + + redis::cmd("WATCH") + .arg(&lease_key) + .query_async::<()>(&mut conn) + .await?; + + let lease_exists: bool = conn.exists(&lease_key).await?; + if !lease_exists { + redis::cmd("UNWATCH").query_async::<()>(&mut conn).await?; + tracing::warn!(job_id = %job.job.id, "Lease no longer exists, job was cancelled or timed out"); + return Ok(()); + } + + let mut atomic_pipeline = hook_pipeline.clone(); + atomic_pipeline.atomic(); + + match atomic_pipeline + .query_async::>(&mut conn) + .await + { + Ok(_) => { + match &result { + Ok(_) => self.post_success_completion().await?, + Err(JobError::Nack { .. }) => self.post_nack_completion().await?, + Err(JobError::Fail(_)) => self.post_fail_completion().await?, + } + + tracing::debug!(job_id = %job.job.id, lane_id = %lane_id, "Job completion successful"); + return Ok(()); + } + Err(_) => { + tracing::debug!(job_id = %job.job.id, "WATCH failed during completion, retrying"); + continue; + } + } + } + } + + #[tracing::instrument(level = "debug", skip_all, fields(job_id = job.id, queue = self.queue_id()))] + async fn complete_job_queue_error( + &self, + job: &Job>, + lease_token: &str, + error: &H::ErrorData, + ) -> Result<(), TwmqError> { + // Get lane_id for proper cleanup + let mut conn = self.redis.clone(); + let lane_id: Option = conn + .hget(self.job_meta_hash_name(&job.id), "lane_id") + .await?; + + let lane_id = lane_id.unwrap_or_else(|| "unknown".to_string()); + + let mut hook_pipeline = redis::pipe(); + let mut tx_context = + TransactionContext::new(&mut hook_pipeline, self.queue_id().to_string()); + + let twmq_error = TwmqError::Runtime { + message: "Job processing failed with user error".to_string(), + }; + let queue_error_hook_data = QueueInternalErrorHookData { error: &twmq_error }; + self.handler + .on_queue_error(job, queue_error_hook_data, &mut tx_context) + .await; + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + + let lease_key = self.lease_key_name(&job.id, lease_token); + let job_meta_hash = self.job_meta_hash_name(&job.id); + + hook_pipeline.del(&lease_key); + hook_pipeline + .hdel(self.lane_active_hash_name(&lane_id), &job.id) + .lpush(self.failed_list_name(), &job.id) + .hset(&job_meta_hash, "finished_at", now) + .hdel(&job_meta_hash, "lease_token"); + + let error_record = JobErrorRecord { + attempt: job.attempts, + error, + details: JobErrorType::fail(), + created_at: now, + }; + let error_json = serde_json::to_string(&error_record)?; + hook_pipeline.lpush(self.job_errors_list_name(&job.id), error_json); + + // Execute with lease protection + loop { + let mut conn = self.redis.clone(); + + redis::cmd("WATCH") + .arg(&lease_key) + .query_async::<()>(&mut conn) + .await?; + + let lease_exists: bool = conn.exists(&lease_key).await?; + if !lease_exists { + redis::cmd("UNWATCH").query_async::<()>(&mut conn).await?; + tracing::warn!(job_id = %job.id, "Lease no longer exists, job was cancelled or timed out"); + return Ok(()); + } + + let mut atomic_pipeline = hook_pipeline.clone(); + atomic_pipeline.atomic(); + + match atomic_pipeline + .query_async::>(&mut conn) + .await + { + Ok(_) => { + self.post_fail_completion().await?; + tracing::debug!(job_id = %job.id, lane_id = %lane_id, "Queue error job completion successful"); + return Ok(()); + } + Err(_) => { + tracing::debug!(job_id = %job.id, "WATCH failed during queue error completion, retrying"); + continue; + } + } + } + } +} + +impl MultilanePushableJob { + pub fn delay(mut self, delay: Duration) -> Self { + self.options.delay = Some(DelayOptions { + delay, + position: RequeuePosition::Last, + }); + self + } + + pub fn delay_with_position(mut self, delay: Duration, position: RequeuePosition) -> Self { + self.options.delay = Some(DelayOptions { delay, position }); + self + } + + pub fn id(mut self, id: String) -> Self { + self.options.id = id; + self + } + + pub async fn push(self) -> Result, TwmqError> { + self.queue.push_to_lane(&self.lane_id, self.options).await + } +} diff --git a/twmq/src/shutdown.rs b/twmq/src/shutdown.rs index 7046552..04341c3 100644 --- a/twmq/src/shutdown.rs +++ b/twmq/src/shutdown.rs @@ -2,25 +2,41 @@ use crate::error::TwmqError; use std::sync::Arc; /// Handle for a single worker that can be shut down gracefully -pub struct WorkerHandle { +pub struct WorkerHandle { pub join_handle: tokio::task::JoinHandle>, pub shutdown_tx: tokio::sync::oneshot::Sender<()>, - pub queue: Arc>, + pub queue: Arc, } -impl WorkerHandle { +pub trait QueueIdentifier { + fn queue_name(&self) -> &str; +} + +impl QueueIdentifier for crate::Queue { + fn queue_name(&self) -> &str { + self.name() + } +} + +impl QueueIdentifier for crate::MultilaneQueue { + fn queue_name(&self) -> &str { + self.queue_id() + } +} + +impl WorkerHandle { /// Shutdown this worker gracefully pub async fn shutdown(self) -> Result<(), TwmqError> { tracing::info!( "Initiating graceful shutdown of worker for queue: {}", - self.queue.name() + self.queue.queue_name() ); // Signal shutdown to the worker if self.shutdown_tx.send(()).is_err() { tracing::warn!( "Worker for queue {} was already shutting down", - self.queue.name() + self.queue.queue_name() ); } @@ -29,14 +45,14 @@ impl WorkerHandle { Ok(Ok(())) => { tracing::info!( "Worker for queue {} shut down gracefully", - self.queue.name() + self.queue.queue_name() ); Ok(()) } Ok(Err(e)) => { tracing::error!( "Worker for queue {} shut down with error: {:?}", - self.queue.name(), + self.queue.queue_name(), e ); Err(e) @@ -44,7 +60,7 @@ impl WorkerHandle { Err(e) => { tracing::error!( "Worker task for queue {} panicked during shutdown: {:?}", - self.queue.name(), + self.queue.queue_name(), e ); Err(TwmqError::Runtime { message: format!("Worker panic: {}", e) }) @@ -69,7 +85,7 @@ impl ShutdownHandle { } /// Add a worker to be managed by this shutdown handle - pub fn add_worker(&mut self, worker: WorkerHandle) { + pub fn add_worker(&mut self, worker: WorkerHandle) { self.join_handles.push(worker.join_handle); self.shutdown_txs.push(worker.shutdown_tx); } @@ -133,16 +149,16 @@ impl Default for ShutdownHandle { // Convenience methods to make collecting workers easier impl ShutdownHandle { /// Create a new shutdown handle with a single worker - pub fn with_worker(worker: WorkerHandle) -> Self { + pub fn with_worker(worker: WorkerHandle) -> Self { let mut handle = Self::new(); handle.add_worker(worker); handle } /// Add multiple workers at once - pub fn add_workers( + pub fn add_workers( &mut self, - workers: impl IntoIterator>, + workers: impl IntoIterator>, ) { for worker in workers { self.add_worker(worker); @@ -150,7 +166,7 @@ impl ShutdownHandle { } /// Builder-style method to add a worker - pub fn and_worker(mut self, worker: WorkerHandle) -> Self { + pub fn and_worker(mut self, worker: WorkerHandle) -> Self { self.add_worker(worker); self } diff --git a/twmq/tests/multilane_batch_pop.rs b/twmq/tests/multilane_batch_pop.rs new file mode 100644 index 0000000..bc9e111 --- /dev/null +++ b/twmq/tests/multilane_batch_pop.rs @@ -0,0 +1,492 @@ +use std::collections::HashMap; +use std::sync::Arc; +use std::time::Duration; + +use serde::{Deserialize, Serialize}; +use tokio::time::timeout; + +use twmq::error::TwmqError; +use twmq::job::{BorrowedJob, JobResult}; +use twmq::{DurableExecution, MultilaneQueue, UserCancellable}; + +const REDIS_URL: &str = "redis://127.0.0.1:6379/"; + +// Simple test job that just holds an ID +#[derive(Serialize, Deserialize, Debug, Clone)] +struct TestJob { + id: u32, + data: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +struct TestOutput { + processed_id: u32, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +struct TestError { + message: String, +} + +impl From for TestError { + fn from(error: TwmqError) -> Self { + TestError { + message: error.to_string(), + } + } +} + +impl UserCancellable for TestError { + fn user_cancelled() -> Self { + TestError { + message: "Cancelled".to_string(), + } + } +} + +// Dummy handler - these tests focus on batch pop logic, not processing +struct DummyHandler; + +impl DurableExecution for DummyHandler { + type Output = TestOutput; + type ErrorData = TestError; + type JobData = TestJob; + + async fn process( + &self, + job: &BorrowedJob, + ) -> JobResult { + Ok(TestOutput { + processed_id: job.job.data.id, + }) + } +} + +/// Test harness for multilane queue batch operations +struct MultilaneTestHarness { + pub queue: Arc>, + pub queue_id: String, +} + +impl MultilaneTestHarness { + async fn new() -> Self { + let queue_id = format!("test_multilane_{}", nanoid::nanoid!(8)); + let handler = DummyHandler; + + let queue = Arc::new( + MultilaneQueue::new(REDIS_URL, &queue_id, None, handler) + .await + .expect("Failed to create multilane queue"), + ); + + // warm up redis connection + let _ = queue.count(twmq::job::JobStatus::Active, None).await; + + let harness = Self { queue, queue_id }; + harness.cleanup().await; + harness + } + + /// Clean up all Redis keys for this test + async fn cleanup(&self) { + let mut conn = self.queue.redis.clone(); + let keys_pattern = format!("twmq_multilane:{}:*", self.queue_id); + + let keys: Vec = redis::cmd("KEYS") + .arg(&keys_pattern) + .query_async(&mut conn) + .await + .unwrap_or_default(); + + if !keys.is_empty() { + redis::cmd("DEL") + .arg(keys) + .query_async::<()>(&mut conn) + .await + .unwrap_or_default(); + } + } + + /// Add jobs to specific lanes + async fn add_jobs_to_lanes(&self, jobs_per_lane: &HashMap>) { + for (lane_id, jobs) in jobs_per_lane { + for job in jobs { + self.queue + .clone() + .job_for_lane(lane_id, job.clone()) + .id(format!("job_{}_{}", lane_id, job.id)) + .push() + .await + .expect("Failed to push job"); + } + } + } + + /// Batch pop jobs and return the results grouped by lane + async fn batch_pop(&self, batch_size: usize) -> HashMap> { + let jobs = self + .queue + .pop_batch_jobs(batch_size) + .await + .expect("Failed to pop batch jobs"); + + let mut results = HashMap::new(); + for (lane_id, job) in jobs { + results + .entry(lane_id) + .or_insert_with(Vec::new) + .push(job.job.data.id); + } + results + } + + /// Count total jobs across all lanes by status + async fn count_total_jobs(&self, status: twmq::job::JobStatus) -> usize { + self.queue + .count(status, None) + .await + .expect("Failed to count jobs") + } + + /// Count jobs in specific lane by status + async fn count_lane_jobs(&self, lane_id: &str, status: twmq::job::JobStatus) -> usize { + self.queue + .count(status, Some(lane_id)) + .await + .expect("Failed to count lane jobs") + } +} + +impl Drop for MultilaneTestHarness { + fn drop(&mut self) { + // Cleanup in background since we can't await in drop + let queue_id = self.queue_id.clone(); + let redis = self.queue.clone().redis.clone(); + + tokio::spawn(async move { + let mut conn = redis; + let keys_pattern = format!("twmq_multilane:{}:*", queue_id); + let keys: Vec = redis::cmd("KEYS") + .arg(&keys_pattern) + .query_async(&mut conn) + .await + .unwrap_or_default(); + + if !keys.is_empty() { + redis::cmd("DEL") + .arg(keys) + .query_async::<()>(&mut conn) + .await + .unwrap_or_default(); + } + }); + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn multilane_test_batch_pop_single_lane_with_100k_empty_lanes() { + // Test: 100,000 lanes, only 1 has 100 jobs, batch pop 100 + println!("🧪 Testing batch pop with 100,000 lanes, 1 active lane with 100 jobs"); + + let harness = MultilaneTestHarness::new().await; + + // Create jobs for the single active lane + let active_lane = "lane_active".to_string(); + let mut jobs_per_lane = HashMap::new(); + + let mut jobs = Vec::new(); + for i in 0..100 { + jobs.push(TestJob { + id: i, + data: format!("job_{}", i), + }); + } + jobs_per_lane.insert(active_lane.clone(), jobs); + + // Add 99,999 empty lanes by creating them in Redis lanes zset + // We do this by adding empty lanes to the zset directly + let mut conn = harness.queue.redis.clone(); + for i in 0..99_999 { + let lane_id = format!("empty_lane_{}", i); + // Add lane to lanes zset with score 0 (never processed) + redis::cmd("ZADD") + .arg(harness.queue.lanes_zset_name()) + .arg("NX") // Only add if not exists + .arg(0) + .arg(&lane_id) + .query_async::<()>(&mut conn) + .await + .expect("Failed to add empty lane"); + } + + // Add the actual jobs + harness.add_jobs_to_lanes(&jobs_per_lane).await; + + // Verify setup + let pending_count = harness + .count_total_jobs(twmq::job::JobStatus::Pending) + .await; + assert_eq!(pending_count, 100, "Should have 100 pending jobs"); + + let active_lane_count = harness + .count_lane_jobs(&active_lane, twmq::job::JobStatus::Pending) + .await; + assert_eq!( + active_lane_count, 100, + "Active lane should have 100 pending jobs" + ); + + // Test batch pop with timeout to ensure it doesn't hang + println!("⏱️ Executing batch pop (should complete quickly despite 100k lanes)..."); + let start = std::time::Instant::now(); + + let result = timeout(Duration::from_secs(10), harness.batch_pop(100)) + .await + .expect("Batch pop should complete within 10 seconds"); + + let duration = start.elapsed(); + println!("✅ Batch pop completed in {:?}", duration); + + // Verify results + assert_eq!(result.len(), 1, "Should get jobs from exactly 1 lane"); + assert!( + result.contains_key(&active_lane), + "Should get jobs from active lane" + ); + + let jobs_from_active = &result[&active_lane]; + assert_eq!(jobs_from_active.len(), 100, "Should get all 100 jobs"); + + // Verify all job IDs are present + let mut expected_ids: Vec = (0..100).collect(); + let mut actual_ids = jobs_from_active.clone(); + expected_ids.sort(); + actual_ids.sort(); + assert_eq!(actual_ids, expected_ids, "Should get all expected job IDs"); + + // Verify no jobs left pending + let remaining_pending = harness + .count_total_jobs(twmq::job::JobStatus::Pending) + .await; + assert_eq!(remaining_pending, 0, "Should have no pending jobs left"); + + // Performance assertion - should complete in reasonable time even with 100k lanes + assert!( + duration < Duration::from_secs(5), + "Should complete within 5 seconds even with 100k lanes" + ); + + println!("✅ Test passed: Single lane with 100k empty lanes"); +} + +#[tokio::test(flavor = "multi_thread")] +async fn multilane_test_batch_pop_distributed_jobs_across_100k_lanes() { + // Test: 200 jobs distributed randomly across 100,000 lanes, batch pop 100 three times + println!("🧪 Testing batch pop with 200 jobs distributed across 100,000 lanes"); + + let harness = MultilaneTestHarness::new().await; + + // Create 200 jobs distributed across 200 different lanes (1 job per lane) + let mut jobs_per_lane = HashMap::new(); + for i in 0..200 { + let lane_id = format!("lane_{}", i); + let job = TestJob { + id: i, + data: format!("job_{}", i), + }; + jobs_per_lane.insert(lane_id, vec![job]); + } + + // Add 99,800 empty lanes to reach 100,000 total + let mut conn = harness.queue.redis.clone(); + for i in 200..100_000 { + let lane_id = format!("empty_lane_{}", i); + redis::cmd("ZADD") + .arg(harness.queue.lanes_zset_name()) + .arg("NX") + .arg(0) + .arg(&lane_id) + .query_async::<()>(&mut conn) + .await + .expect("Failed to add empty lane"); + } + + // Add the jobs + harness.add_jobs_to_lanes(&jobs_per_lane).await; + + // Verify setup + let pending_count = harness + .count_total_jobs(twmq::job::JobStatus::Pending) + .await; + assert_eq!(pending_count, 200, "Should have 200 pending jobs"); + + // First batch pop - should get 100 jobs + println!("[200 jobs - 200/100k lanes] ⏱️ First batch pop (100 jobs)..."); + let start = std::time::Instant::now(); + let result1 = timeout(Duration::from_secs(10), harness.batch_pop(100)) + .await + .expect("First batch pop should complete within 10 seconds"); + let duration1 = start.elapsed(); + println!( + "[200 jobs - 200/100k lanes] ✅ First batch pop completed in {:?}", + duration1 + ); + + let new_lanes_count = harness.queue.lanes_count().await.unwrap(); + println!( + "[200 jobs - 200/100k lanes] New lanes count after initial batch pop: {}", + new_lanes_count + ); + + let total_jobs_1: usize = result1.values().map(|jobs| jobs.len()).sum(); + assert_eq!(total_jobs_1, 100, "First batch should return 100 jobs"); + + // Verify remaining pending jobs + let remaining_after_1 = harness + .count_total_jobs(twmq::job::JobStatus::Pending) + .await; + assert_eq!( + remaining_after_1, 100, + "Should have 100 pending jobs after first batch" + ); + + // Second batch pop - should get 100 jobs + println!("[200 jobs - 200/100k lanes] ⏱️ Second batch pop (100 jobs)..."); + let start = std::time::Instant::now(); + let result2 = timeout(Duration::from_secs(10), harness.batch_pop(100)) + .await + .expect("Second batch pop should complete within 10 seconds"); + let duration2 = start.elapsed(); + println!( + "[200 jobs - 200/100k lanes] ✅ Second batch pop completed in {:?}", + duration2 + ); + + let total_jobs_2: usize = result2.values().map(|jobs| jobs.len()).sum(); + assert_eq!(total_jobs_2, 100, "Second batch should return 100 jobs"); + + // Verify no remaining pending jobs + let remaining_after_2 = harness + .count_total_jobs(twmq::job::JobStatus::Pending) + .await; + assert_eq!( + remaining_after_2, 0, + "Should have 0 pending jobs after second batch" + ); + + // Third batch pop - should get 0 jobs + println!("⏱️ Third batch pop (should get 0 jobs)..."); + let start = std::time::Instant::now(); + let result3 = timeout(Duration::from_secs(10), harness.batch_pop(100)) + .await + .expect("Third batch pop should complete within 10 seconds"); + let duration3 = start.elapsed(); + println!("✅ Third batch pop completed in {:?}", duration3); + + let total_jobs_3: usize = result3.values().map(|jobs| jobs.len()).sum(); + assert_eq!(total_jobs_3, 0, "Third batch should return 0 jobs"); + + // Verify all unique job IDs were returned across both batches + let mut all_job_ids: Vec = Vec::new(); + for jobs in result1.values() { + all_job_ids.extend(jobs); + } + for jobs in result2.values() { + all_job_ids.extend(jobs); + } + + all_job_ids.sort(); + let expected_ids: Vec = (0..200).collect(); + assert_eq!( + all_job_ids, expected_ids, + "Should get all 200 unique job IDs across two batches" + ); + + // Performance assertions + assert!( + duration1 < Duration::from_secs(5), + "First batch should complete quickly" + ); + assert!( + duration2 < Duration::from_secs(5), + "Second batch should complete quickly" + ); + assert!( + duration3 < Duration::from_secs(2), + "Third batch should complete very quickly (no jobs)" + ); + + println!("✅ Test passed: Distributed jobs across 100k lanes"); +} + +#[tokio::test(flavor = "multi_thread")] +async fn multilane_test_batch_pop_fairness_across_lanes() { + // Test fairness: ensure round-robin behavior across multiple lanes with jobs + println!("🧪 Testing batch pop fairness across multiple active lanes"); + + let harness = MultilaneTestHarness::new().await; + + // Create 10 lanes, each with 10 jobs (100 total) + let mut jobs_per_lane = HashMap::new(); + for lane_num in 0..10 { + let lane_id = format!("lane_{}", lane_num); + let mut jobs = Vec::new(); + for job_num in 0..10 { + jobs.push(TestJob { + id: lane_num * 10 + job_num, + data: format!("job_{}_{}", lane_num, job_num), + }); + } + jobs_per_lane.insert(lane_id, jobs); + } + + harness.add_jobs_to_lanes(&jobs_per_lane).await; + + // Batch pop 10 jobs - should get 1 from each lane (fairness) + let result = harness.batch_pop(10).await; + + assert_eq!(result.len(), 10, "Should get jobs from all 10 lanes"); + + for lane_num in 0..10 { + let lane_id = format!("lane_{}", lane_num); + assert!( + result.contains_key(&lane_id), + "Should have job from lane {}", + lane_num + ); + assert_eq!( + result[&lane_id].len(), + 1, + "Should get exactly 1 job from lane {}", + lane_num + ); + } + + // Verify remaining jobs + let remaining = harness + .count_total_jobs(twmq::job::JobStatus::Pending) + .await; + assert_eq!(remaining, 90, "Should have 90 jobs remaining"); + + println!("✅ Test passed: Fairness across multiple lanes"); +} + +#[tokio::test(flavor = "multi_thread")] +async fn multilane_test_batch_pop_empty_queue() { + // Edge case: batch pop from completely empty queue + println!("🧪 Testing batch pop from empty queue"); + + let harness = MultilaneTestHarness::new().await; + + // Don't add any jobs + let result = harness.batch_pop(100).await; + + assert_eq!(result.len(), 0, "Should get no jobs from empty queue"); + + let pending = harness + .count_total_jobs(twmq::job::JobStatus::Pending) + .await; + assert_eq!(pending, 0, "Should have no pending jobs"); + + println!("✅ Test passed: Empty queue handling"); +} From 64dbb84c7b2c42ccf9fd24a3e2da45798e88271c Mon Sep 17 00:00:00 2001 From: Prithvish Baidya Date: Thu, 3 Jul 2025 18:24:12 +0530 Subject: [PATCH 2/8] ci fixes + wip eoa execution refactor to single lane queue, non batch id --- .github/workflows/ci-twmq.yaml | 2 +- .github/workflows/coverage-twmq.yaml | 2 +- Cargo.lock | 1352 ++++++++++-------- Cargo.toml | 10 +- aa-core/Cargo.toml | 4 +- core/Cargo.toml | 4 +- core/src/signer.rs | 61 +- executors/Cargo.toml | 2 +- executors/src/eoa/confirm.rs | 592 ++++++++ executors/src/eoa/eoa_confirmation_worker.rs | 575 ++++++++ executors/src/eoa/error_classifier.rs | 304 ++++ executors/src/eoa/mod.rs | 19 + executors/src/eoa/nonce_manager.rs | 550 +++++++ executors/src/eoa/send.rs | 783 ++++++++++ executors/src/eoa/transaction_store.rs | 445 ++++++ executors/src/lib.rs | 3 +- 16 files changed, 4062 insertions(+), 646 deletions(-) create mode 100644 executors/src/eoa/confirm.rs create mode 100644 executors/src/eoa/eoa_confirmation_worker.rs create mode 100644 executors/src/eoa/error_classifier.rs create mode 100644 executors/src/eoa/mod.rs create mode 100644 executors/src/eoa/nonce_manager.rs create mode 100644 executors/src/eoa/send.rs create mode 100644 executors/src/eoa/transaction_store.rs diff --git a/.github/workflows/ci-twmq.yaml b/.github/workflows/ci-twmq.yaml index 8aec294..cb8eab5 100644 --- a/.github/workflows/ci-twmq.yaml +++ b/.github/workflows/ci-twmq.yaml @@ -38,7 +38,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #checkout@v4 - name: Install CI dependencies - uses: taiki-e/install-action@ab3728c7ba6948b9b429627f4d55a68842b27f18 + uses: taiki-e/install-action@9185c192a96ba09167ad8663015b3fbbf007ec79 #@2.56.2 with: tool: cargo-nextest diff --git a/.github/workflows/coverage-twmq.yaml b/.github/workflows/coverage-twmq.yaml index 3f153fd..8c20654 100644 --- a/.github/workflows/coverage-twmq.yaml +++ b/.github/workflows/coverage-twmq.yaml @@ -38,7 +38,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #checkout@v4 - name: Install CI dependencies - uses: taiki-e/install-action@ab3728c7ba6948b9b429627f4d55a68842b27f18 + uses: taiki-e/install-action@9185c192a96ba09167ad8663015b3fbbf007ec79 #@2.56.2 with: tool: cargo-tarpaulin diff --git a/Cargo.lock b/Cargo.lock index 22d11cd..4440c46 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "adler2" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "aead" @@ -47,7 +47,7 @@ dependencies = [ "bytes", "cfg-if", "http 1.3.1", - "indexmap 2.9.0", + "indexmap 2.10.0", "schemars 0.8.22", "serde", "serde_json", @@ -66,7 +66,7 @@ checksum = "be8e0d4af7cc08353807aaf80722125a229bf2d67be7fe0b89163c648db3d223" dependencies = [ "darling", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -77,41 +77,41 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy" -version = "0.15.10" +version = "0.15.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a0be470ab41e3aaed6c54dbb2b6224d3048de467d8009cf9d5d32a8b8957ef7" +checksum = "2b064bd1cea105e70557a258cd2b317731896753ec08edf51da2d1fced587b05" dependencies = [ - "alloy-consensus 0.15.8", + "alloy-consensus 0.15.11", "alloy-core", - "alloy-eips 0.15.8", - "alloy-serde 0.15.8", - "alloy-signer 0.15.8", - "alloy-signer-aws 0.15.8", - "alloy-signer-gcp 0.15.8", - "alloy-signer-ledger 0.15.8", + "alloy-eips 0.15.11", + "alloy-serde 0.15.11", + "alloy-signer 0.15.11", + "alloy-signer-aws 0.15.11", + "alloy-signer-gcp 0.15.11", + "alloy-signer-ledger 0.15.11", ] [[package]] name = "alloy" -version = "1.0.9" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0093d23bf026b580c1f66ed3a053d8209c104a446c5264d3ad99587f6edef24e" +checksum = "4e0d1aecf3cab3d0e7383064ce488616434b4ade10d8904dff422e74203c712f" dependencies = [ - "alloy-consensus 1.0.8", + "alloy-consensus 1.0.17", "alloy-contract", "alloy-core", - "alloy-eips 1.0.8", + "alloy-eips 1.0.17", "alloy-genesis", - "alloy-json-rpc 1.0.8", - "alloy-network 1.0.8", + "alloy-json-rpc 1.0.17", + "alloy-network 1.0.17", "alloy-provider", "alloy-rpc-client", "alloy-rpc-types", - "alloy-serde 1.0.8", - "alloy-signer 1.0.8", - "alloy-signer-aws 1.0.9", - "alloy-signer-gcp 1.0.9", - "alloy-signer-ledger 1.0.9", + "alloy-serde 1.0.17", + "alloy-signer 1.0.17", + "alloy-signer-aws 1.0.17", + "alloy-signer-gcp 1.0.17", + "alloy-signer-ledger 1.0.17", "alloy-signer-local", "alloy-transport", "alloy-transport-http", @@ -119,9 +119,9 @@ dependencies = [ [[package]] name = "alloy-chains" -version = "0.2.0" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7734aecfc58a597dde036e4c5cace2ae43e2f8bf3d406b022a1ef34da178dd49" +checksum = "5674914c2cfdb866c21cb0c09d82374ee39a1395cf512e7515f4c014083b3fff" dependencies = [ "alloy-primitives", "num_enum", @@ -130,15 +130,15 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.15.8" +version = "0.15.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c103f18381c4f17b5691cbea7baa3bafa7da3bf9c9b7d90f94f48715c6cc054" +checksum = "32c3f3bc4f2a6b725970cd354e78e9738ea1e8961a91898f57bf6317970b1915" dependencies = [ - "alloy-eips 0.15.8", + "alloy-eips 0.15.11", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.15.8", - "alloy-trie", + "alloy-serde 0.15.11", + "alloy-trie 0.8.1", "auto_impl", "c-kzg", "derive_more", @@ -154,15 +154,16 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78090ff96d0d1b648dbcebc63b5305296b76ad4b5d4810f755d7d1224ced6247" +checksum = "e9c6ad411efe0f49e0e99b9c7d8749a1eb55f6dbf74a1bc6953ab285b02c4f67" dependencies = [ - "alloy-eips 1.0.8", + "alloy-eips 1.0.17", "alloy-primitives", "alloy-rlp", - "alloy-serde 1.0.8", - "alloy-trie", + "alloy-serde 1.0.17", + "alloy-trie 0.9.0", + "alloy-tx-macros", "auto_impl", "c-kzg", "derive_more", @@ -178,58 +179,59 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "0.15.8" +version = "0.15.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c13988a8d23dfd00a49dc6702bc704000d34853951f23b9c125a342ee537443" +checksum = "dda014fb5591b8d8d24cab30f52690117d238e52254c6fb40658e91ea2ccd6c3" dependencies = [ - "alloy-consensus 0.15.8", - "alloy-eips 0.15.8", + "alloy-consensus 0.15.11", + "alloy-eips 0.15.11", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.15.8", + "alloy-serde 0.15.11", "serde", ] [[package]] name = "alloy-consensus-any" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcdfc3b2f202e3c6284685e6d3dcfbb532b39552d9e1021276e68e2389037616" +checksum = "0bf397edad57b696501702d5887e4e14d7d0bbae9fbb6439e148d361f7254f45" dependencies = [ - "alloy-consensus 1.0.8", - "alloy-eips 1.0.8", + "alloy-consensus 1.0.17", + "alloy-eips 1.0.17", "alloy-primitives", "alloy-rlp", - "alloy-serde 1.0.8", + "alloy-serde 1.0.17", "serde", ] [[package]] name = "alloy-contract" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f4a5b6c7829e8aa048f5b23defa21706b675e68e612cf88d9f509771fecc806" +checksum = "977b97d271159578afcb26e39e1ca5ce1a7f937697793d7d571b0166dd8b8225" dependencies = [ - "alloy-consensus 1.0.8", + "alloy-consensus 1.0.17", "alloy-dyn-abi", "alloy-json-abi", - "alloy-network 1.0.8", - "alloy-network-primitives 1.0.8", + "alloy-network 1.0.17", + "alloy-network-primitives 1.0.17", "alloy-primitives", "alloy-provider", - "alloy-rpc-types-eth 1.0.8", + "alloy-rpc-types-eth 1.0.17", "alloy-sol-types", "alloy-transport", "futures", "futures-util", + "serde_json", "thiserror 2.0.12", ] [[package]] name = "alloy-core" -version = "1.0.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d020a85ae8cf79b9c897a86d617357817bbc9a7d159dd7e6fedf1bc90f64d35" +checksum = "ad31216895d27d307369daa1393f5850b50bbbd372478a9fa951c095c210627e" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -240,15 +242,14 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "1.0.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884a5d4560f7e5e34ec3c5e54a60223c56352677dd049b495fbb59384cf72a90" +checksum = "7b95b3deca680efc7e9cba781f1a1db352fa1ea50e6384a514944dcf4419e652" dependencies = [ "alloy-json-abi", "alloy-primitives", "alloy-sol-type-parser", "alloy-sol-types", - "const-hex", "derive_more", "itoa", "serde", @@ -294,16 +295,16 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.15.8" +version = "0.15.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86eba345a4e6ff5684cb98c0aedc69eca4db77cbca3a456e7930ab7d086febdd" +checksum = "2f7b2f7010581f29bcace81776cf2f0e022008d05a7d326884763f16f3044620" dependencies = [ "alloy-eip2124", "alloy-eip2930", "alloy-eip7702", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.15.8", + "alloy-serde 0.15.11", "auto_impl", "c-kzg", "derive_more", @@ -314,16 +315,16 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fb7646210355c36b07886c91cac52e4727191e2b0ee1415cce8f953f6019dd2" +checksum = "749b8449e4daf7359bdf1dabdba6ce424ff8b1bdc23bdb795661b2e991a08d87" dependencies = [ "alloy-eip2124", "alloy-eip2930", "alloy-eip7702", "alloy-primitives", "alloy-rlp", - "alloy-serde 1.0.8", + "alloy-serde 1.0.17", "auto_impl", "c-kzg", "derive_more", @@ -334,22 +335,23 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b14b506d7a4f739dd57ad5026d65eb64d842f4e971f71da5e9be5067ecbdc9" +checksum = "5fcbae2107f3f2df2b02bb7d9e81e8aa730ae371ca9dd7fd0c81c3d0cb78a452" dependencies = [ - "alloy-eips 1.0.8", + "alloy-eips 1.0.17", "alloy-primitives", - "alloy-serde 1.0.8", - "alloy-trie", + "alloy-serde 1.0.17", + "alloy-trie 0.9.0", "serde", + "serde_with", ] [[package]] name = "alloy-json-abi" -version = "1.0.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5189fa9a8797e92396bc4b4454c5f2073a4945f7c2b366af9af60f9536558f7a" +checksum = "15516116086325c157c18261d768a20677f0f699348000ed391d4ad0dcb82530" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -359,9 +361,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.15.8" +version = "0.15.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b3429d59feb5c72e1e5f92efd6f67def0f6a8de5cb610aea56c35eff1cf60d" +checksum = "ca1e31b50f4ed9a83689ae97263d366b15b935a67c4acb5dd46d5b1c3b27e8e6" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -373,12 +375,13 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a7ed339a633ba1a2af3eb9847dc90936d1b3c380a223cfca7a45be1713d8ab0" +checksum = "bc30b0e20fcd0843834ecad2a716661c7b9d5aca2486f8e96b93d5246eb83e06" dependencies = [ "alloy-primitives", "alloy-sol-types", + "http 1.3.1", "serde", "serde_json", "thiserror 2.0.12", @@ -387,20 +390,20 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.15.8" +version = "0.15.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb5acf3ae77e8e9775673d9f81f96c1f5596824c35b4f158c5f8eb30e4d565f" +checksum = "879afc0f4a528908c8fe6935b2ab0bc07f77221a989186f71583f7592831689e" dependencies = [ - "alloy-consensus 0.15.8", - "alloy-consensus-any 0.15.8", - "alloy-eips 0.15.8", - "alloy-json-rpc 0.15.8", - "alloy-network-primitives 0.15.8", + "alloy-consensus 0.15.11", + "alloy-consensus-any 0.15.11", + "alloy-eips 0.15.11", + "alloy-json-rpc 0.15.11", + "alloy-network-primitives 0.15.11", "alloy-primitives", - "alloy-rpc-types-any 0.15.8", - "alloy-rpc-types-eth 0.15.8", - "alloy-serde 0.15.8", - "alloy-signer 0.15.8", + "alloy-rpc-types-any 0.15.11", + "alloy-rpc-types-eth 0.15.11", + "alloy-serde 0.15.11", + "alloy-signer 0.15.11", "alloy-sol-types", "async-trait", "auto_impl", @@ -413,20 +416,20 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "691a4825b3d08f031b49aae3c11cb35abf2af376fc11146bf8e5930a432dbf40" +checksum = "eaeb681024cf71f5ca14f3d812c0a8d8b49f13f7124713538e66d74d3bfe6aff" dependencies = [ - "alloy-consensus 1.0.8", - "alloy-consensus-any 1.0.8", - "alloy-eips 1.0.8", - "alloy-json-rpc 1.0.8", - "alloy-network-primitives 1.0.8", + "alloy-consensus 1.0.17", + "alloy-consensus-any 1.0.17", + "alloy-eips 1.0.17", + "alloy-json-rpc 1.0.17", + "alloy-network-primitives 1.0.17", "alloy-primitives", - "alloy-rpc-types-any 1.0.8", - "alloy-rpc-types-eth 1.0.8", - "alloy-serde 1.0.8", - "alloy-signer 1.0.8", + "alloy-rpc-types-any 1.0.17", + "alloy-rpc-types-eth 1.0.17", + "alloy-serde 1.0.17", + "alloy-signer 1.0.17", "alloy-sol-types", "async-trait", "auto_impl", @@ -439,35 +442,35 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "0.15.8" +version = "0.15.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d770bdcca12fe2b3e973de3f17290065b42c6d9a10cbd9728877bbbbfb050b6" +checksum = "ec185bac9d32df79c1132558a450d48f6db0bfb5adef417dbb1a0258153f879b" dependencies = [ - "alloy-consensus 0.15.8", - "alloy-eips 0.15.8", + "alloy-consensus 0.15.11", + "alloy-eips 0.15.11", "alloy-primitives", - "alloy-serde 0.15.8", + "alloy-serde 0.15.11", "serde", ] [[package]] name = "alloy-network-primitives" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5713f40f9cbe4428292d095e8bbb38af82e63ad4247418b7f6d6fb7ef2d9d68b" +checksum = "a03ad273e1c55cc481889b4130e82860e33624e6969e9a08854e0f3ebe659295" dependencies = [ - "alloy-consensus 1.0.8", - "alloy-eips 1.0.8", + "alloy-consensus 1.0.17", + "alloy-eips 1.0.17", "alloy-primitives", - "alloy-serde 1.0.8", + "alloy-serde 1.0.17", "serde", ] [[package]] name = "alloy-primitives" -version = "1.0.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70b98b99c1dcfbe74d7f0b31433ff215e7d1555e367d90e62db904f3c9d4ff53" +checksum = "6177ed26655d4e84e00b65cb494d4e0b8830e7cae7ef5d63087d445a2600fb55" dependencies = [ "alloy-rlp", "bytes", @@ -475,8 +478,8 @@ dependencies = [ "const-hex", "derive_more", "foldhash", - "hashbrown 0.15.3", - "indexmap 2.9.0", + "hashbrown 0.15.4", + "indexmap 2.10.0", "itoa", "k256", "keccak-asm", @@ -492,20 +495,20 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1382ef9e0fa1ab3f5a3dbc0a0fa1193f3794d5c9d75fc22654bb6da1cf7a59cc" +checksum = "abc164acf8c41c756e76c7aea3be8f0fb03f8a3ef90a33e3ddcea5d1614d8779" dependencies = [ "alloy-chains", - "alloy-consensus 1.0.8", - "alloy-eips 1.0.8", - "alloy-json-rpc 1.0.8", - "alloy-network 1.0.8", - "alloy-network-primitives 1.0.8", + "alloy-consensus 1.0.17", + "alloy-eips 1.0.17", + "alloy-json-rpc 1.0.17", + "alloy-network 1.0.17", + "alloy-network-primitives 1.0.17", "alloy-primitives", "alloy-rpc-client", - "alloy-rpc-types-eth 1.0.8", - "alloy-signer 1.0.8", + "alloy-rpc-types-eth 1.0.17", + "alloy-signer 1.0.17", "alloy-sol-types", "alloy-transport", "alloy-transport-http", @@ -516,6 +519,7 @@ dependencies = [ "either", "futures", "futures-utils-wasm", + "http 1.3.1", "lru", "parking_lot", "pin-project 1.1.10", @@ -531,9 +535,9 @@ dependencies = [ [[package]] name = "alloy-rlp" -version = "0.3.11" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6c1d995bff8d011f7cd6c81820d51825e6e06d6db73914c1630ecf544d83d6" +checksum = "5f70d83b765fdc080dbcd4f4db70d8d23fe4761f2f02ebfa9146b833900634b4" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -542,22 +546,22 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.11" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a40e1ef334153322fd878d07e86af7a529bcb86b2439525920a88eba87bcf943" +checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "alloy-rpc-client" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "859ec46fb132175969a0101bdd2fe9ecd413c40feeb0383e98710a4a089cee77" +checksum = "03c44d31bcb9afad460915fe1fba004a2af5a07a3376c307b9bdfeec3678c209" dependencies = [ - "alloy-json-rpc 1.0.8", + "alloy-json-rpc 1.0.17", "alloy-primitives", "alloy-transport", "alloy-transport-http", @@ -578,53 +582,53 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f1512ec542339a72c263570644a56d685f20ce77be465fbd3f3f33fb772bcbd" +checksum = "2ba2cf3d3c6ece87f1c6bb88324a997f28cf0ad7e98d5e0b6fa91c4003c30916" dependencies = [ "alloy-primitives", - "alloy-rpc-types-eth 1.0.8", - "alloy-serde 1.0.8", + "alloy-rpc-types-eth 1.0.17", + "alloy-serde 1.0.17", "serde", ] [[package]] name = "alloy-rpc-types-any" -version = "0.15.8" +version = "0.15.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d1a67b833618d34929f343c764cd84b88fb76af48fe3f4581ac6919e453822f" +checksum = "7a5a8f1efd77116915dad61092f9ef9295accd0b0b251062390d9c4e81599344" dependencies = [ - "alloy-consensus-any 0.15.8", - "alloy-rpc-types-eth 0.15.8", - "alloy-serde 0.15.8", + "alloy-consensus-any 0.15.11", + "alloy-rpc-types-eth 0.15.11", + "alloy-serde 0.15.11", ] [[package]] name = "alloy-rpc-types-any" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d87236623aafabbf7196bcde37a4d626c3e56b3b22d787310e6d5ea25239c5d8" +checksum = "ef5b22062142ce3b2ed3374337d4b343437e5de6959397f55d2c9fe2c2ce0162" dependencies = [ - "alloy-consensus-any 1.0.8", - "alloy-rpc-types-eth 1.0.8", - "alloy-serde 1.0.8", + "alloy-consensus-any 1.0.17", + "alloy-rpc-types-eth 1.0.17", + "alloy-serde 1.0.17", ] [[package]] name = "alloy-rpc-types-eth" -version = "0.15.8" +version = "0.15.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d08bc32276225c44bdc14b34ed244a1b4ccab0b4e39025bfc3dd60be5203af43" +checksum = "bc1323310d87f9d950fb3ff58d943fdf832f5e10e6f902f405c0eaa954ffbaf1" dependencies = [ - "alloy-consensus 0.15.8", - "alloy-consensus-any 0.15.8", - "alloy-eips 0.15.8", - "alloy-network-primitives 0.15.8", + "alloy-consensus 0.15.11", + "alloy-consensus-any 0.15.11", + "alloy-eips 0.15.11", + "alloy-network-primitives 0.15.11", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.15.8", + "alloy-serde 0.15.11", "alloy-sol-types", - "itertools 0.13.0", + "itertools 0.14.0", "serde", "serde_json", "thiserror 2.0.12", @@ -632,19 +636,19 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b8acc64d23e484a0a27375b57caba34569729560a29aa366933f0ae07b7786f" +checksum = "391e59f81bacbffc7bddd2da3a26d6eec0e2058e9237c279e9b1052bdf21b49e" dependencies = [ - "alloy-consensus 1.0.8", - "alloy-consensus-any 1.0.8", - "alloy-eips 1.0.8", - "alloy-network-primitives 1.0.8", + "alloy-consensus 1.0.17", + "alloy-consensus-any 1.0.17", + "alloy-eips 1.0.17", + "alloy-network-primitives 1.0.17", "alloy-primitives", "alloy-rlp", - "alloy-serde 1.0.8", + "alloy-serde 1.0.17", "alloy-sol-types", - "itertools 0.13.0", + "itertools 0.14.0", "serde", "serde_json", "thiserror 2.0.12", @@ -652,9 +656,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.15.8" +version = "0.15.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f448b0d9ee32ef23565ac4dfb0ea7544b03c01571ee187c471337a6f1b3cb203" +checksum = "d05ace2ef3da874544c3ffacfd73261cdb1405d8631765deb991436a53ec6069" dependencies = [ "alloy-primitives", "serde", @@ -663,9 +667,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "114c287eb4595f1e0844800efb0860dd7228fcf9bc77d52e303fb7a43eb766b2" +checksum = "0ea08bc854235d4dff08fd57df8033285c11b8d7548b20c6da218194e7e6035f" dependencies = [ "alloy-primitives", "serde", @@ -674,9 +678,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.15.8" +version = "0.15.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07abd80b2db2606a4d4566b8d71440534c46160f6bd34cf029be145ecc20fd46" +checksum = "67fdabad99ad3c71384867374c60bcd311fc1bb90ea87f5f9c779fd8c7ec36aa" dependencies = [ "alloy-dyn-abi", "alloy-primitives", @@ -691,9 +695,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afebd60fa84d9ce793326941509d8f26ce7b383f2aabd7a42ba215c1b92ea96b" +checksum = "bcb3759f85ef5f010a874d9ebd5ee6ce01cac65211510863124e0ebac6552db0" dependencies = [ "alloy-dyn-abi", "alloy-primitives", @@ -708,14 +712,14 @@ dependencies = [ [[package]] name = "alloy-signer-aws" -version = "0.15.8" +version = "0.15.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52a05eb43a52d6e685eed6dfbc8ba35520b1f6b29bbb97039607deba168c413b" +checksum = "a7469e93151629b7780d4d37b3adc463376e524af95f1fc1c2877f10837eb3e4" dependencies = [ - "alloy-consensus 0.15.8", - "alloy-network 0.15.8", + "alloy-consensus 0.15.11", + "alloy-network 0.15.11", "alloy-primitives", - "alloy-signer 0.15.8", + "alloy-signer 0.15.11", "async-trait", "aws-sdk-kms", "k256", @@ -726,14 +730,14 @@ dependencies = [ [[package]] name = "alloy-signer-aws" -version = "1.0.9" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6be3d371299b62eac5aa459fa58e8d1c761aabdc637573ae258ab744457fcc88" +checksum = "7942b850ec7be43de89b2680321d7921b7620b25be53b9981aae6fb29daa9e97" dependencies = [ - "alloy-consensus 1.0.8", - "alloy-network 1.0.8", + "alloy-consensus 1.0.17", + "alloy-network 1.0.17", "alloy-primitives", - "alloy-signer 1.0.8", + "alloy-signer 1.0.17", "async-trait", "aws-sdk-kms", "k256", @@ -744,14 +748,14 @@ dependencies = [ [[package]] name = "alloy-signer-gcp" -version = "0.15.8" +version = "0.15.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "977892f64653934d5674260f145b968bacbc644423f30cf54c117d184a2e5e46" +checksum = "110f26e897e58e1fa50c5eeddd3731c9b69d231f20687844a1c031cacc176b15" dependencies = [ - "alloy-consensus 0.15.8", - "alloy-network 0.15.8", + "alloy-consensus 0.15.11", + "alloy-network 0.15.11", "alloy-primitives", - "alloy-signer 0.15.8", + "alloy-signer 0.15.11", "async-trait", "gcloud-sdk", "k256", @@ -762,14 +766,14 @@ dependencies = [ [[package]] name = "alloy-signer-gcp" -version = "1.0.9" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df298e47bbb7d0a8e06b603046b91062c11ba70d22f8a6c9bab1c1468bd856d0" +checksum = "74809e45053bd43d24338e618202ebea68d5660aa9632d77b0244faa2dcaa9d1" dependencies = [ - "alloy-consensus 1.0.8", - "alloy-network 1.0.8", + "alloy-consensus 1.0.17", + "alloy-network 1.0.17", "alloy-primitives", - "alloy-signer 1.0.8", + "alloy-signer 1.0.17", "async-trait", "gcloud-sdk", "k256", @@ -780,15 +784,15 @@ dependencies = [ [[package]] name = "alloy-signer-ledger" -version = "0.15.8" +version = "0.15.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cd6d983cdd49649aff1d20010e8b70f3f04fb0b1be9d82ae6500c4afeb8fbd5" +checksum = "a3abc91dcedcdd72f950179df239ca838deaf74fe9ec5cd818c9ba682360325a" dependencies = [ - "alloy-consensus 0.15.8", + "alloy-consensus 0.15.11", "alloy-dyn-abi", - "alloy-network 0.15.8", + "alloy-network 0.15.11", "alloy-primitives", - "alloy-signer 0.15.8", + "alloy-signer 0.15.11", "alloy-sol-types", "async-trait", "coins-ledger", @@ -800,15 +804,15 @@ dependencies = [ [[package]] name = "alloy-signer-ledger" -version = "1.0.9" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b0e049299cc7e131a438a904f89a493bcea45cd92bbed3e50116a28bc27987c" +checksum = "63c7e67367bc2b1d5790236448d2402865a4f0bc2b53cfda06d71b7ba3dbdffd" dependencies = [ - "alloy-consensus 1.0.8", + "alloy-consensus 1.0.17", "alloy-dyn-abi", - "alloy-network 1.0.8", + "alloy-network 1.0.17", "alloy-primitives", - "alloy-signer 1.0.8", + "alloy-signer 1.0.17", "alloy-sol-types", "async-trait", "coins-ledger", @@ -820,14 +824,14 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f551042c11c4fa7cb8194d488250b8dc58035241c418d79f07980c4aee4fa5c9" +checksum = "14d95902d29e1290809e1c967a1e974145b44b78f6e3e12fc07a60c1225e3df0" dependencies = [ - "alloy-consensus 1.0.8", - "alloy-network 1.0.8", + "alloy-consensus 1.0.17", + "alloy-network 1.0.17", "alloy-primitives", - "alloy-signer 1.0.8", + "alloy-signer 1.0.17", "async-trait", "k256", "rand 0.8.5", @@ -836,42 +840,42 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "1.0.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60fcfa26956bcb22f66ab13407115197f26ef23abca5b48d39a1946897382d74" +checksum = "a14f21d053aea4c6630687c2f4ad614bed4c81e14737a9b904798b24f30ea849" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "alloy-sol-macro-expander" -version = "1.0.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72a9b402f0013f1ff8c24066eeafc2207a8e52810a2b18b77776ce7fead5af41" +checksum = "34d99282e7c9ef14eb62727981a985a01869e586d1dec729d3bb33679094c100" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", "const-hex", "heck", - "indexmap 2.9.0", + "indexmap 2.10.0", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "1.0.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d02d61741337bb6b3f4899c2e3173fe17ffa2810e143d3b28acd953197c8dd79" +checksum = "eda029f955b78e493360ee1d7bd11e1ab9f2a220a5715449babc79d6d0a01105" dependencies = [ "alloy-json-abi", "const-hex", @@ -881,15 +885,15 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.101", + "syn 2.0.104", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "251273c5aa1abb590852f795c938730fa641832fc8fa77b5478ed1bf11b6097e" +checksum = "10db1bd7baa35bc8d4a1b07efbf734e73e5ba09f2580fb8cee3483a36087ceb2" dependencies = [ "serde", "winnow", @@ -897,24 +901,23 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "1.0.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02635bce18205ff8149fb752c753b0a91ea3f3c8ee04c58846448be4811a640" +checksum = "58377025a47d8b8426b3e4846a251f2c1991033b27f517aade368146f6ab1dfe" dependencies = [ "alloy-json-abi", "alloy-primitives", "alloy-sol-macro", - "const-hex", "serde", ] [[package]] name = "alloy-transport" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46fb766c0bce9f62779a83048ca6d998c2ced4153d694027c66e537629f4fd61" +checksum = "dcdf4b7fc58ebb2605b2fc5a33dae5cf15527ea70476978351cc0db1c596ea93" dependencies = [ - "alloy-json-rpc 1.0.8", + "alloy-json-rpc 1.0.17", "alloy-primitives", "base64 0.22.1", "derive_more", @@ -933,11 +936,11 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "254bd59ca1abaf2da3e3201544a41924163b019414cce16f0dc6bc75d20c6612" +checksum = "4c4b0f3a9c28bcd3761504d9eb3578838d6d115c8959fc1ea05f59a3a8f691af" dependencies = [ - "alloy-json-rpc 1.0.8", + "alloy-json-rpc 1.0.17", "alloy-transport", "reqwest", "serde_json", @@ -956,12 +959,41 @@ dependencies = [ "alloy-rlp", "arrayvec", "derive_more", - "nybbles", + "nybbles 0.3.4", + "serde", + "smallvec", + "tracing", +] + +[[package]] +name = "alloy-trie" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bada1fc392a33665de0dc50d401a3701b62583c655e3522a323490a5da016962" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "arrayvec", + "derive_more", + "nybbles 0.4.0", "serde", "smallvec", "tracing", ] +[[package]] +name = "alloy-tx-macros" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79bf2869e66904b2148c809e7a75e23ca26f5d7b46663a149a1444fb98a69d1d" +dependencies = [ + "alloy-primitives", + "darling", + "proc-macro2", + "quote", + "syn 2.0.104", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -985,9 +1017,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstyle" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" +checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" [[package]] name = "anyhow" @@ -1142,9 +1174,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.23" +version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b37fc50485c4f3f736a4fb14199f6d5f5ba008d7f28fe710306c92780f004c07" +checksum = "40f6024f3f856663b45fd0c9b6f2024034a702f453549449e0d84a305900dad4" dependencies = [ "flate2", "futures-core", @@ -1183,7 +1215,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -1194,7 +1226,7 @@ checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -1211,14 +1243,14 @@ checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "autocfg" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-credential-types" @@ -1234,9 +1266,9 @@ dependencies = [ [[package]] name = "aws-runtime" -version = "1.5.7" +version = "1.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c4063282c69991e57faab9e5cb21ae557e59f5b0fb285c196335243df8dc25c" +checksum = "4f6c68419d8ba16d9a7463671593c54f81ba58cab466e9b759418da606dcc2e2" dependencies = [ "aws-credential-types", "aws-sigv4", @@ -1258,9 +1290,9 @@ dependencies = [ [[package]] name = "aws-sdk-kms" -version = "1.66.0" +version = "1.77.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "655097cd83ab1f15575890943135192560f77097413c6dd1733fdbdc453e81ac" +checksum = "6cd57d0c1a5bd6c7eaa2b26462e046d5ca7b72189346718d2435dfc48bfa988b" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1274,16 +1306,15 @@ dependencies = [ "bytes", "fastrand", "http 0.2.12", - "once_cell", "regex-lite", "tracing", ] [[package]] name = "aws-sigv4" -version = "1.3.1" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3503af839bd8751d0bdc5a46b9cac93a003a353e635b0c12cf2376b5b53e41ea" +checksum = "ddfb9021f581b71870a17eac25b52335b82211cdc092e02b6876b2bcefa61666" dependencies = [ "aws-credential-types", "aws-smithy-http", @@ -1334,9 +1365,9 @@ dependencies = [ [[package]] name = "aws-smithy-json" -version = "0.61.3" +version = "0.61.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92144e45819cae7dc62af23eac5a038a58aa544432d2102609654376a900bd07" +checksum = "a16e040799d29c17412943bdbf488fd75db04112d0c0d4b9290bacf5ae0014b9" dependencies = [ "aws-smithy-types", ] @@ -1375,9 +1406,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime-api" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e5d9e3a80a18afa109391fb5ad09c3daf887b516c6fd805a157c6ea7994a57" +checksum = "bd8531b6d8882fd8f48f82a9754e682e29dd44cff27154af51fa3eb730f59efb" dependencies = [ "aws-smithy-async", "aws-smithy-types", @@ -1392,9 +1423,9 @@ dependencies = [ [[package]] name = "aws-smithy-types" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40076bd09fadbc12d5e026ae080d0930defa606856186e31d83ccc6a255eeaf3" +checksum = "d498595448e43de7f4296b7b7a18a8a02c61ec9349128c80a368f7c3b4ab11a8" dependencies = [ "base64-simd", "bytes", @@ -1490,23 +1521,23 @@ checksum = "604fde5e028fea851ce1d8570bbdc034bec850d157f7569d10f347d06808c05c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "backon" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd0b50b1b78dbadd44ab18b3c794e496f3a139abb9fbc27d9c94c4eebbb96496" +checksum = "302eaff5357a264a2c42f127ecb8bac761cf99749fc3dc95677e2743991f99e7" dependencies = [ "fastrand", ] [[package]] name = "backtrace" -version = "0.3.74" +version = "0.3.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" dependencies = [ "addr2line", "cfg-if", @@ -1547,9 +1578,9 @@ dependencies = [ [[package]] name = "base64ct" -version = "1.7.3" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" [[package]] name = "bincode" @@ -1610,9 +1641,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" dependencies = [ "serde", ] @@ -1640,9 +1671,9 @@ dependencies = [ [[package]] name = "blst" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47c79a94619fade3c0b887670333513a67ac28a6a7e653eb260bf0d4103db38d" +checksum = "4fd49896f12ac9b6dcd7a5998466b9b58263a695a3dd1ecc1aaca2e12a90b080" dependencies = [ "cc", "glob", @@ -1652,9 +1683,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.17.0" +version = "3.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" [[package]] name = "byte-slice-cast" @@ -1710,18 +1741,18 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.21" +version = "1.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8691782945451c1c383942c4874dbe63814f61cb57ef773cda2972682b7bb3c0" +checksum = "d487aa071b5f64da6f19a3e848e3578944b726ee5a4854b82172f02aa876bfdc" dependencies = [ "shlex", ] [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" [[package]] name = "cfg_aliases" @@ -1808,18 +1839,18 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.38" +version = "4.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed93b9805f8ba930df42c2590f05453d5ec36cbb85d018868a5b24d31f6ac000" +checksum = "40b6887a1d8685cebccf115538db5c0efe625ccac9696ad45c409d96566e910f" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" -version = "4.5.38" +version = "4.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "379026ff283facf611b0ea629334361c4211d1b12ee01024eec1591133b04120" +checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e" dependencies = [ "anstyle", "clap_lex", @@ -1827,9 +1858,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" +checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" [[package]] name = "coins-ledger" @@ -1898,9 +1929,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.14.0" +version = "1.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b0485bab839b018a8f1723fc5391819fea5f8f0f32288ef8a735fd096b6160c" +checksum = "83e22e0ed40b96a48d3db274f72fd365bd78f67af39b6bbd47e8a15e1c6207ff" dependencies = [ "cfg-if", "cpufeatures", @@ -1976,9 +2007,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" dependencies = [ "core-foundation-sys", "libc", @@ -2001,9 +2032,9 @@ dependencies = [ [[package]] name = "crc" -version = "3.2.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" +checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675" dependencies = [ "crc-catalog", ] @@ -2093,9 +2124,9 @@ checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" [[package]] name = "crypto-bigint" @@ -2143,7 +2174,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2167,7 +2198,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2178,7 +2209,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2244,7 +2275,7 @@ checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "unicode-xid", ] @@ -2277,7 +2308,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2359,21 +2390,21 @@ dependencies = [ name = "engine-aa-core" version = "0.1.0" dependencies = [ - "alloy 1.0.9", + "alloy 1.0.17", "engine-aa-types", "engine-core", "serde", "tokio", "tracing", - "vault-sdk", - "vault-types", + "vault-sdk 0.1.0 (git+ssh://git@github.com/thirdweb-dev/vault.git?branch=pb%2Fupdate-alloy)", + "vault-types 0.1.0 (git+ssh://git@github.com/thirdweb-dev/vault.git?branch=pb%2Fupdate-alloy)", ] [[package]] name = "engine-aa-types" version = "0.1.0" dependencies = [ - "alloy 1.0.9", + "alloy 1.0.17", "schemars 0.8.22", "serde", "serde_json", @@ -2385,7 +2416,7 @@ dependencies = [ name = "engine-core" version = "0.1.0" dependencies = [ - "alloy 1.0.9", + "alloy 1.0.17", "engine-aa-types", "schemars 0.8.22", "serde", @@ -2398,15 +2429,15 @@ dependencies = [ "twmq", "utoipa", "uuid", - "vault-sdk", - "vault-types", + "vault-sdk 0.1.0 (git+ssh://git@github.com/thirdweb-dev/vault.git?branch=pb%2Fupdate-alloy)", + "vault-types 0.1.0 (git+ssh://git@github.com/thirdweb-dev/vault.git?branch=pb%2Fupdate-alloy)", ] [[package]] name = "engine-executors" version = "0.1.0" dependencies = [ - "alloy 1.0.9", + "alloy 1.0.17", "chrono", "engine-aa-core", "engine-aa-types", @@ -2432,12 +2463,12 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.11" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -2519,9 +2550,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" +checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" dependencies = [ "crc32fast", "miniz_oxide", @@ -2625,7 +2656,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2666,9 +2697,9 @@ checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" [[package]] name = "gcloud-sdk" -version = "0.27.0" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00c7dc8c1f6c0865d02a2d931f3a15ac919ef583077c5141fd9b8efa8b493c44" +checksum = "a3ec9c312db09dc0dac684dda2f18d76e9ce00effdd27fcaaa90fa811691cd6d" dependencies = [ "async-trait", "bytes", @@ -2726,15 +2757,15 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi 0.11.1+wasi-snapshot-preview1", "wasm-bindgen", ] [[package]] name = "getrandom" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" dependencies = [ "cfg-if", "js-sys", @@ -2769,9 +2800,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.9" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75249d144030531f8dee69fe9cea04d3edf809a017ae445e2abdff6629e86633" +checksum = "17da50a276f1e01e0ba6c029e47b7100754904ee8a278f886546e98575380785" dependencies = [ "atomic-waker", "bytes", @@ -2779,7 +2810,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.3.1", - "indexmap 2.9.0", + "indexmap 2.10.0", "slab", "tokio", "tokio-util", @@ -2810,9 +2841,9 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" [[package]] name = "hashbrown" -version = "0.15.3" +version = "0.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3" +checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" dependencies = [ "allocator-api2", "equivalent", @@ -2826,7 +2857,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" dependencies = [ - "hashbrown 0.15.3", + "hashbrown 0.15.4", ] [[package]] @@ -2837,9 +2868,9 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hermit-abi" -version = "0.3.9" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "hex" @@ -2980,11 +3011,10 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.5" +version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ - "futures-util", "http 1.3.1", "hyper", "hyper-util", @@ -2994,7 +3024,7 @@ dependencies = [ "tokio", "tokio-rustls", "tower-service", - "webpki-roots 0.26.10", + "webpki-roots", ] [[package]] @@ -3028,9 +3058,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c293b6b3d21eca78250dc7dbebd6b9210ec5530e038cbfe0661b5c47ab06e8" +checksum = "dc2fdfdbff08affe55bb779f33b053aa1fe5dd5b54c257343c17edfa55711bdb" dependencies = [ "base64 0.22.1", "bytes", @@ -3078,21 +3108,22 @@ dependencies = [ [[package]] name = "icu_collections" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" dependencies = [ "displaydoc", + "potential_utf", "yoke", "zerofrom", "zerovec", ] [[package]] -name = "icu_locid" -version = "1.5.0" +name = "icu_locale_core" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" dependencies = [ "displaydoc", "litemap", @@ -3101,31 +3132,11 @@ dependencies = [ "zerovec", ] -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" - [[package]] name = "icu_normalizer" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" dependencies = [ "displaydoc", "icu_collections", @@ -3133,67 +3144,54 @@ dependencies = [ "icu_properties", "icu_provider", "smallvec", - "utf16_iter", - "utf8_iter", - "write16", "zerovec", ] [[package]] name = "icu_normalizer_data" -version = "1.5.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] name = "icu_properties" -version = "1.5.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" dependencies = [ "displaydoc", "icu_collections", - "icu_locid_transform", + "icu_locale_core", "icu_properties_data", "icu_provider", - "tinystr", + "potential_utf", + "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" -version = "1.5.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" [[package]] name = "icu_provider" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" dependencies = [ "displaydoc", - "icu_locid", - "icu_provider_macros", + "icu_locale_core", "stable_deref_trait", "tinystr", "writeable", "yoke", "zerofrom", + "zerotrie", "zerovec", ] -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.101", -] - [[package]] name = "ident_case" version = "1.0.1" @@ -3213,9 +3211,9 @@ dependencies = [ [[package]] name = "idna_adapter" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" dependencies = [ "icu_normalizer", "icu_properties", @@ -3238,7 +3236,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -3254,12 +3252,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" dependencies = [ "equivalent", - "hashbrown 0.15.3", + "hashbrown 0.15.4", "serde", ] @@ -3272,6 +3270,17 @@ dependencies = [ "generic-array", ] +[[package]] +name = "io-uring" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b86e202f00093dcba4275d4636b93ef9dd75d025ae560d2521b45ea28ab49013" +dependencies = [ + "bitflags 2.9.1", + "cfg-if", + "libc", +] + [[package]] name = "ipnet" version = "2.11.0" @@ -3306,6 +3315,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.15" @@ -3389,15 +3407,15 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.172" +version = "0.2.174" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" [[package]] name = "libm" -version = "0.2.13" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9627da5196e5d8ed0b0495e61e518847578da83483c37288316d9b2e03a7f72" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "libusb1-sys" @@ -3419,15 +3437,15 @@ checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" [[package]] name = "litemap" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" dependencies = [ "autocfg", "scopeguard", @@ -3458,9 +3476,15 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "227748d55f2f0ab4735d87fd623798cb6b664512fe979705f829c9f81c934465" dependencies = [ - "hashbrown 0.15.3", + "hashbrown 0.15.4", ] +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + [[package]] name = "macro-string" version = "0.1.4" @@ -3469,7 +3493,7 @@ checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -3489,9 +3513,9 @@ checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" [[package]] name = "memchr" -version = "2.7.4" +version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" [[package]] name = "memoffset" @@ -3520,22 +3544,22 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", ] [[package]] name = "mio" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.52.0", + "wasi 0.11.1+wasi-snapshot-preview1", + "windows-sys 0.59.0", ] [[package]] @@ -3646,9 +3670,9 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" dependencies = [ "hermit-abi", "libc", @@ -3656,22 +3680,23 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" +checksum = "a973b4e44ce6cad84ce69d797acf9a044532e4184c4f267913d1b546a0727b7a" dependencies = [ "num_enum_derive", + "rustversion", ] [[package]] name = "num_enum_derive" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" +checksum = "77e878c846a8abae00dd069496dbe8751b16ac1c3d6bd2a7283a938e8228f90d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -3687,6 +3712,19 @@ dependencies = [ "smallvec", ] +[[package]] +name = "nybbles" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d51b0175c49668a033fe7cc69080110d9833b291566cdf332905f3ad9c68a0" +dependencies = [ + "alloy-rlp", + "proptest", + "ruint", + "serde", + "smallvec", +] + [[package]] name = "object" version = "0.36.7" @@ -3716,11 +3754,11 @@ checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "openssl" -version = "0.10.72" +version = "0.10.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fedfea7d58a1f73118430a55da6a286e7b044961736ce96a16a17068ea25e5da" +checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "cfg-if", "foreign-types", "libc", @@ -3737,7 +3775,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -3748,9 +3786,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-sys" -version = "0.9.108" +version = "0.9.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e145e1651e858e820e4860f7b9c5e169bc1d8ce1c86043be79fa7b7634821847" +checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" dependencies = [ "cc", "libc", @@ -3782,9 +3820,9 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "parity-scale-codec" -version = "3.7.4" +version = "3.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9fde3d0718baf5bc92f577d652001da0f8d54cd03a7974e118d04fc888dc23d" +checksum = "799781ae679d79a948e13d4824a40970bfa500058d245760dd857301059810fa" dependencies = [ "arrayvec", "bitvec", @@ -3798,14 +3836,14 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.7.4" +version = "3.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581c837bb6b9541ce7faa9377c20616e4fb7650f6b0f68bc93c827ee504fb7b3" +checksum = "34b4653168b563151153c9e4c08ebed57fb8262bebfa79711552fa983c623e7a" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -3816,9 +3854,9 @@ checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" dependencies = [ "lock_api", "parking_lot_core", @@ -3826,9 +3864,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ "cfg-if", "libc", @@ -3876,9 +3914,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "198db74531d58c70a361c42201efde7e2591e976d518caf7662a47dc5720e7b6" +checksum = "1db05f56d34358a8b1066f67cbb203ee3e7ed2ba674a6263a1d5ec6db2204323" dependencies = [ "memchr", "thiserror 2.0.12", @@ -3887,9 +3925,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d725d9cfd79e87dccc9341a2ef39d1b6f6353d68c4b33c177febbe1a402c97c5" +checksum = "bb056d9e8ea77922845ec74a1c4e8fb17e7c218cc4fc11a15c5d25e189aa40bc" dependencies = [ "pest", "pest_generator", @@ -3897,24 +3935,23 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db7d01726be8ab66ab32f9df467ae8b1148906685bbe75c82d1e65d7f5b3f841" +checksum = "87e404e638f781eb3202dc82db6760c8ae8a1eeef7fb3fa8264b2ef280504966" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "pest_meta" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9f832470494906d1fca5329f8ab5791cc60beb230c74815dff541cbd2b5ca0" +checksum = "edd1101f170f5903fde0914f899bb503d9ff5271d7ba76bbb70bea63690cc0d5" dependencies = [ - "once_cell", "pest", "sha2", ] @@ -3956,7 +3993,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -4028,9 +4065,18 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.11.0" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + +[[package]] +name = "potential_utf" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" +checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +dependencies = [ + "zerovec", +] [[package]] name = "powerfmt" @@ -4086,7 +4132,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -4100,17 +4146,17 @@ dependencies = [ [[package]] name = "proptest" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" +checksum = "6fcdab19deb5195a31cf7726a210015ff1496ba1464fd42cb4f537b8b01b471f" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.9.0", + "bitflags 2.9.1", "lazy_static", "num-traits", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand 0.9.1", + "rand_chacha 0.9.0", "rand_xorshift", "regex-syntax 0.8.5", "rusty-fork", @@ -4135,10 +4181,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ "anyhow", - "itertools 0.13.0", + "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -4158,9 +4204,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quinn" -version = "0.11.7" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3bd15a6f2967aef83887dcb9fec0014580467e33720d073560cf015a5683012" +checksum = "626214629cda6781b6dc1d316ba307189c85ba657213ce642d9c77670f8202c8" dependencies = [ "bytes", "cfg_aliases", @@ -4178,12 +4224,13 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.11" +version = "0.11.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcbafbbdbb0f638fe3f35f3c56739f77a8a1d070cb25603226c83339b391472b" +checksum = "49df843a9161c85bb8aae55f101bc0bac8bcafd637a620d9122fd7e0b2f7422e" dependencies = [ "bytes", - "getrandom 0.3.2", + "getrandom 0.3.3", + "lru-slab", "rand 0.9.1", "ring", "rustc-hash", @@ -4198,9 +4245,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.12" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee4e529991f949c5e25755532370b8af5d114acae52326361d68d47af64aa842" +checksum = "fcebb1209ee276352ef14ff8732e24cc2b02bbac986cd74a4c81bcb2f9881970" dependencies = [ "cfg_aliases", "libc", @@ -4221,9 +4268,9 @@ dependencies = [ [[package]] name = "r-efi" -version = "5.2.0" +version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] name = "radium" @@ -4289,17 +4336,17 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom 0.3.2", + "getrandom 0.3.3", "serde", ] [[package]] name = "rand_xorshift" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" dependencies = [ - "rand_core 0.6.4", + "rand_core 0.9.3", ] [[package]] @@ -4349,11 +4396,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.12" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" +checksum = "0d04b7d0ee6b4a0207a0a7adb104d23ecb0b47d6beae7152d0fa34b692b29fd6" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", ] [[package]] @@ -4373,7 +4420,7 @@ checksum = "1165225c21bff1f3bbce98f5a1f889949bc902d3575308cc7b0de30b4f6d27c7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -4428,9 +4475,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" -version = "0.12.18" +version = "0.12.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e98ff6b0dbbe4d5a37318f433d4fc82babd21631f194d370409ceb2e40b2f0b5" +checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531" dependencies = [ "async-compression", "base64 0.22.1", @@ -4446,13 +4493,11 @@ dependencies = [ "hyper-rustls", "hyper-tls", "hyper-util", - "ipnet", "js-sys", "log", "mime", "mime_guess", "native-tls", - "once_cell", "percent-encoding", "pin-project-lite", "quinn", @@ -4475,7 +4520,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 1.0.0", + "webpki-roots", ] [[package]] @@ -4519,16 +4564,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" dependencies = [ "base64 0.21.7", - "bitflags 2.9.0", + "bitflags 2.9.1", "serde", "serde_derive", ] [[package]] name = "ruint" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78a46eb779843b2c4f21fac5773e25d6d5b7c8f0922876c91541790d2ca27eef" +checksum = "11256b5fe8c68f56ac6f39ef0720e592f33d2367a4782740d9c9142e889c7fb4" dependencies = [ "alloy-rlp", "ark-ff 0.3.0", @@ -4569,20 +4614,19 @@ dependencies = [ [[package]] name = "rust-ini" -version = "0.21.1" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e310ef0e1b6eeb79169a1171daf9abcb87a2e17c03bee2c4bb100b55c75409f" +checksum = "e7295b7ce3bf4806b419dc3420745998b447178b7005e2011947b38fc5aa6791" dependencies = [ "cfg-if", "ordered-multimap", - "trim-in-place", ] [[package]] name = "rustc-demangle" -version = "0.1.24" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +checksum = "989e6739f80c4ad5b13e0fd7fe89531180375b18520cc8c82080e4dc4035b84f" [[package]] name = "rustc-hash" @@ -4620,7 +4664,7 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "errno", "libc", "linux-raw-sys", @@ -4629,9 +4673,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.26" +version = "0.23.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df51b5869f3a441595eac5e8ff14d486ff285f7b8c0df8770e49c3b56351f0f0" +checksum = "7160e3e10bf4535308537f3c4e1641468cd0e485175d6163087c0393c7d46643" dependencies = [ "log", "once_cell", @@ -4656,18 +4700,19 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" dependencies = [ "web-time", + "zeroize", ] [[package]] name = "rustls-webpki" -version = "0.103.1" +version = "0.103.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fef8b8769aaccf73098557a87cd1816b4f9c7c16811c9c77142aa695c16f2c03" +checksum = "e4a72fe2bcf7a6ac6fd7d0b9e5cb68aeb7d4c0a0271730218b3e92d43b4eb435" dependencies = [ "ring", "rustls-pki-types", @@ -4676,9 +4721,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" [[package]] name = "rusty-fork" @@ -4723,7 +4768,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fbf2ae1b8bc8e02df939598064d22402220cd5bbcca1c76f7d6a310974d5615" dependencies = [ "dyn-clone", - "indexmap 2.9.0", + "indexmap 2.10.0", "schemars_derive", "serde", "serde_json", @@ -4741,6 +4786,18 @@ dependencies = [ "serde_json", ] +[[package]] +name = "schemars" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1375ba8ef45a6f15d83fa8748f1079428295d403d6ea991d09ab100155fbc06d" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + [[package]] name = "schemars_derive" version = "0.8.22" @@ -4750,7 +4807,7 @@ dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -4820,7 +4877,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "core-foundation 0.9.4", "core-foundation-sys", "libc", @@ -4833,8 +4890,8 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.9.0", - "core-foundation 0.10.0", + "bitflags 2.9.1", + "core-foundation 0.10.1", "core-foundation-sys", "libc", "security-framework-sys", @@ -4900,7 +4957,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -4911,7 +4968,7 @@ checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -4951,9 +5008,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" dependencies = [ "serde", ] @@ -4972,16 +5029,17 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf65a400f8f66fb7b0552869ad70157166676db75ed8181f8104ea91cf9d0b42" +checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.9.0", + "indexmap 2.10.0", "schemars 0.9.0", + "schemars 1.0.3", "serde", "serde_derive", "serde_json", @@ -4991,14 +5049,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81679d9ed988d5e9a5e6531dc3f2c28efbd639cbd1dfb628df08edea6004da77" +checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -5096,27 +5154,24 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" [[package]] name = "smallvec" -version = "1.15.0" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" dependencies = [ "serde", ] [[package]] name = "socket2" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" dependencies = [ "libc", "windows-sys 0.52.0", @@ -5169,7 +5224,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -5191,9 +5246,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.101" +version = "2.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" dependencies = [ "proc-macro2", "quote", @@ -5202,14 +5257,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d0f0d4760f4c2a0823063b2c70e97aa2ad185f57be195172ccc0e23c4b787c4" +checksum = "b9ac494e7266fcdd2ad80bf4375d55d27a117ea5c866c26d0e97fe5b3caeeb75" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -5229,7 +5284,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -5238,7 +5293,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "core-foundation 0.9.4", "system-configuration-sys", ] @@ -5267,12 +5322,12 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.19.1" +version = "3.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7437ac7763b9b123ccf33c338a5cc1bac6f69b45a136c19bdd8a65e3916435bf" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" dependencies = [ "fastrand", - "getrandom 0.3.2", + "getrandom 0.3.3", "once_cell", "rustix", "windows-sys 0.59.0", @@ -5282,7 +5337,7 @@ dependencies = [ name = "thirdweb-core" version = "0.1.0" dependencies = [ - "alloy 1.0.9", + "alloy 1.0.17", "engine-aa-types", "moka", "reqwest", @@ -5300,7 +5355,7 @@ name = "thirdweb-engine" version = "0.1.0" dependencies = [ "aide", - "alloy 1.0.9", + "alloy 1.0.17", "anyhow", "axum", "config", @@ -5322,8 +5377,8 @@ dependencies = [ "utoipa", "utoipa-axum", "utoipa-scalar", - "vault-sdk", - "vault-types", + "vault-sdk 0.1.0 (git+ssh://git@github.com/thirdweb-dev/vault.git?branch=main)", + "vault-types 0.1.0 (git+ssh://git@github.com/thirdweb-dev/vault.git?branch=main)", ] [[package]] @@ -5352,7 +5407,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -5363,17 +5418,16 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "thread_local" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" dependencies = [ "cfg-if", - "once_cell", ] [[package]] @@ -5427,9 +5481,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.7.6" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" dependencies = [ "displaydoc", "zerovec", @@ -5462,17 +5516,19 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.45.0" +version = "1.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2513ca694ef9ede0fb23fe71a4ee4107cb102b9dc1930f6d0fd77aae068ae165" +checksum = "1140bb80481756a8cbe10541f37433b459c5aa1e727b4c020fbfebdc25bf3ec4" dependencies = [ "backtrace", "bytes", + "io-uring", "libc", "mio", "parking_lot", "pin-project-lite", "signal-hook-registry", + "slab", "socket2", "tokio-macros", "windows-sys 0.52.0", @@ -5486,7 +5542,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -5536,9 +5592,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.22" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ae329d1f08c4d17a59bed7ff5b5a769d062e64a62d34a3261b219e62cd5aae" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", "serde_spanned", @@ -5548,20 +5604,20 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.9" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.22.26" +version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.10.0", "serde", "serde_spanned", "toml_datetime", @@ -5570,9 +5626,9 @@ dependencies = [ [[package]] name = "tonic" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85839f0b32fd242bb3209262371d07feda6d780d16ee9d2bc88581b89da1549b" +checksum = "7e581ba15a835f4d9ea06c55ab1bd4dce26fc53752c69a04aac00703bfb49ba9" dependencies = [ "async-trait", "axum", @@ -5607,7 +5663,7 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", - "indexmap 2.9.0", + "indexmap 2.10.0", "pin-project-lite", "slab", "sync_wrapper", @@ -5620,11 +5676,11 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.4" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fdb0c213ca27a9f57ab69ddb290fd80d970922355b83ae380b395d3986b8a2e" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "bytes", "futures-util", "http 1.3.1", @@ -5675,20 +5731,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.28" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "tracing-core" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", "valuable", @@ -5748,12 +5804,6 @@ dependencies = [ "tracing-serde", ] -[[package]] -name = "trim-in-place" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "343e926fc669bc8cde4fa3129ab681c63671bae288b1f1081ceee6d9d37904fc" - [[package]] name = "try-lock" version = "0.2.5" @@ -5865,12 +5915,6 @@ dependencies = [ "percent-encoding", ] -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - [[package]] name = "utf8_iter" version = "1.0.4" @@ -5883,7 +5927,7 @@ version = "5.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2fcc29c80c21c31608227e0912b2d7fddba57ad76b606890627ba8ee7964e993" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.10.0", "serde", "serde_json", "utoipa-gen", @@ -5911,7 +5955,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.101", + "syn 2.0.104", "uuid", ] @@ -5933,7 +5977,7 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" dependencies = [ - "getrandom 0.3.2", + "getrandom 0.3.3", "js-sys", "serde", "wasm-bindgen", @@ -5950,7 +5994,28 @@ name = "vault-sdk" version = "0.1.0" source = "git+ssh://git@github.com/thirdweb-dev/vault.git?branch=main#a9d62a85ae69d47b2f341e886d16c12611644235" dependencies = [ - "alloy 0.15.10", + "alloy 0.15.11", + "chacha20poly1305", + "chrono", + "hex", + "hkdf", + "jsonwebtoken", + "reqwest", + "serde", + "serde_json", + "sha2", + "thiserror 2.0.12", + "uuid", + "vault-types 0.1.0 (git+ssh://git@github.com/thirdweb-dev/vault.git?branch=main)", + "x25519-dalek", +] + +[[package]] +name = "vault-sdk" +version = "0.1.0" +source = "git+ssh://git@github.com/thirdweb-dev/vault.git?branch=pb%2Fupdate-alloy#b0a72f93335ff05f722c070f32f0697c5478243a" +dependencies = [ + "alloy 1.0.17", "chacha20poly1305", "chrono", "hex", @@ -5962,7 +6027,7 @@ dependencies = [ "sha2", "thiserror 2.0.12", "uuid", - "vault-types", + "vault-types 0.1.0 (git+ssh://git@github.com/thirdweb-dev/vault.git?branch=pb%2Fupdate-alloy)", "x25519-dalek", ] @@ -5971,7 +6036,20 @@ name = "vault-types" version = "0.1.0" source = "git+ssh://git@github.com/thirdweb-dev/vault.git?branch=main#a9d62a85ae69d47b2f341e886d16c12611644235" dependencies = [ - "alloy 0.15.10", + "alloy 0.15.11", + "bincode", + "chrono", + "serde", + "serde_json", + "uuid", +] + +[[package]] +name = "vault-types" +version = "0.1.0" +source = "git+ssh://git@github.com/thirdweb-dev/vault.git?branch=pb%2Fupdate-alloy#b0a72f93335ff05f722c070f32f0697c5478243a" +dependencies = [ + "alloy 1.0.17", "bincode", "chrono", "serde", @@ -6033,9 +6111,9 @@ dependencies = [ [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" @@ -6068,7 +6146,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "wasm-bindgen-shared", ] @@ -6103,7 +6181,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6132,9 +6210,9 @@ dependencies = [ [[package]] name = "wasmtimer" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0048ad49a55b9deb3953841fa1fc5858f0efbcb7a18868c899a360269fac1b23" +checksum = "d8d49b5d6c64e8558d9b1b065014426f35c18de636895d24893dbbd329743446" dependencies = [ "futures", "js-sys", @@ -6166,18 +6244,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37493cadf42a2a939ed404698ded7fb378bf301b5011f973361779a3a74f8c93" -dependencies = [ - "rustls-pki-types", -] - -[[package]] -name = "webpki-roots" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2853738d1cc4f2da3a225c18ec6c3721abb31961096e9dbf5ab35fa88b19cfdb" +checksum = "8782dd5a41a24eed3a4f40b606249b3e236ca61adf1f25ea4d45c73de122b502" dependencies = [ "rustls-pki-types", ] @@ -6215,9 +6284,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.61.1" +version = "0.61.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5ee8f3d025738cb02bad7868bbb5f8a6327501e870bf51f1b455b0a2454a419" +checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" dependencies = [ "windows-collections", "windows-core", @@ -6237,25 +6306,26 @@ dependencies = [ [[package]] name = "windows-core" -version = "0.61.0" +version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4763c1de310c86d75a878046489e2e5ba02c649d185f21c67d4cf8a56d098980" +checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" dependencies = [ "windows-implement", "windows-interface", "windows-link", "windows-result", - "windows-strings 0.4.0", + "windows-strings", ] [[package]] name = "windows-future" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a1d6bbefcb7b60acd19828e1bc965da6fcf18a7e39490c5f8be71e54a19ba32" +checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" dependencies = [ "windows-core", "windows-link", + "windows-threading", ] [[package]] @@ -6266,7 +6336,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -6277,14 +6347,14 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "windows-link" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" [[package]] name = "windows-numerics" @@ -6298,38 +6368,29 @@ dependencies = [ [[package]] name = "windows-registry" -version = "0.4.0" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" +checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e" dependencies = [ + "windows-link", "windows-result", - "windows-strings 0.3.1", - "windows-targets 0.53.0", + "windows-strings", ] [[package]] name = "windows-result" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" -dependencies = [ - "windows-link", -] - -[[package]] -name = "windows-strings" -version = "0.3.1" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" dependencies = [ "windows-link", ] [[package]] name = "windows-strings" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2ba9642430ee452d5a7aa78d72907ebe8cfda358e8cb7918a2050581322f97" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" dependencies = [ "windows-link", ] @@ -6352,6 +6413,15 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.2", +] + [[package]] name = "windows-targets" version = "0.52.6" @@ -6370,9 +6440,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.53.0" +version = "0.53.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" +checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" dependencies = [ "windows_aarch64_gnullvm 0.53.0", "windows_aarch64_msvc 0.53.0", @@ -6384,6 +6454,15 @@ dependencies = [ "windows_x86_64_msvc 0.53.0", ] +[[package]] +name = "windows-threading" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6" +dependencies = [ + "windows-link", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" @@ -6482,9 +6561,9 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.7.9" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9fb597c990f03753e08d3c29efbfcf2019a003b4bf4ba19225c158e1549f0f3" +checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd" dependencies = [ "memchr", ] @@ -6495,20 +6574,14 @@ version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", ] -[[package]] -name = "write16" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" - [[package]] name = "writeable" -version = "0.5.5" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" [[package]] name = "wyz" @@ -6533,9 +6606,9 @@ dependencies = [ [[package]] name = "yaml-rust2" -version = "0.10.1" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "818913695e83ece1f8d2a1c52d54484b7b46d0f9c06beeb2649b9da50d9b512d" +checksum = "4ce2a4ff45552406d02501cea6c18d8a7e50228e7736a872951fe2fe75c91be7" dependencies = [ "arraydeque", "encoding_rs", @@ -6544,9 +6617,9 @@ dependencies = [ [[package]] name = "yoke" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" dependencies = [ "serde", "stable_deref_trait", @@ -6556,34 +6629,34 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.25" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.25" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -6603,7 +6676,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "synstructure", ] @@ -6624,14 +6697,25 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", +] + +[[package]] +name = "zerotrie" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", ] [[package]] name = "zerovec" -version = "0.10.4" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" dependencies = [ "yoke", "zerofrom", @@ -6640,11 +6724,11 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.10.3" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] diff --git a/Cargo.toml b/Cargo.toml index 32fe580..05f3fc6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,3 +1,11 @@ [workspace] -members = ["aa-types", "aa-core", "core", "executors", "server", "thirdweb-core", "twmq"] +members = [ + "aa-types", + "aa-core", + "core", + "executors", + "server", + "thirdweb-core", + "twmq", +] resolver = "2" diff --git a/aa-core/Cargo.toml b/aa-core/Cargo.toml index 8644cc7..dcd8702 100644 --- a/aa-core/Cargo.toml +++ b/aa-core/Cargo.toml @@ -8,7 +8,7 @@ alloy = { version = "1.0.8", features = ["serde"] } tokio = "1.44.2" engine-aa-types = { path = "../aa-types" } engine-core = { path = "../core" } -vault-types = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "main" } -vault-sdk = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "main" } +vault-types = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "pb/update-alloy" } +vault-sdk = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "pb/update-alloy" } serde = "1.0.219" tracing = "0.1.41" diff --git a/core/Cargo.toml b/core/Cargo.toml index 9945f42..51d0ea2 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -10,8 +10,8 @@ schemars = "0.8.22" serde = { version = "1.0.219", features = ["derive"] } serde_json = "1.0.140" thiserror = "2.0.12" -vault-types = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "main" } -vault-sdk = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "main" } +vault-types = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "pb/update-alloy" } +vault-sdk = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "pb/update-alloy" } tower = "0.5.2" tracing = "0.1.41" twmq = { version = "0.1.0", path = "../twmq" } diff --git a/core/src/signer.rs b/core/src/signer.rs index d978dee..8905f41 100644 --- a/core/src/signer.rs +++ b/core/src/signer.rs @@ -1,4 +1,5 @@ use alloy::{ + consensus::TypedTransaction, dyn_abi::TypedData, hex::FromHex, primitives::{Address, Bytes, ChainId}, @@ -176,6 +177,14 @@ pub trait AccountSigner { typed_data: &TypedData, credentials: SigningCredential, ) -> impl std::future::Future> + Send; + + /// Sign a transaction + fn sign_transaction( + &self, + options: Self::SigningOptions, + transaction: TypedTransaction, + credentials: SigningCredential, + ) -> impl std::future::Future> + Send; } /// EOA signer implementation @@ -188,7 +197,10 @@ pub struct EoaSigner { impl EoaSigner { /// Create a new EOA signer pub fn new(vault_client: VaultClient, iaw_client: IAWClient) -> Self { - Self { vault_client, iaw_client } + Self { + vault_client, + iaw_client, + } } } @@ -221,7 +233,10 @@ impl AccountSigner for EoaSigner { Ok(vault_result.signature) } - SigningCredential::Iaw { auth_token, thirdweb_auth } => { + SigningCredential::Iaw { + auth_token, + thirdweb_auth, + } => { // Convert MessageFormat to IAW MessageFormat let iaw_format = match format { MessageFormat::Text => thirdweb_core::iaw::MessageFormat::Text, @@ -268,7 +283,10 @@ impl AccountSigner for EoaSigner { Ok(vault_result.signature) } - SigningCredential::Iaw { auth_token, thirdweb_auth } => { + SigningCredential::Iaw { + auth_token, + thirdweb_auth, + } => { let iaw_result = self .iaw_client .sign_typed_data( @@ -287,6 +305,43 @@ impl AccountSigner for EoaSigner { } } } + + async fn sign_transaction( + &self, + options: EoaSigningOptions, + transaction: TypedTransaction, + credentials: SigningCredential, + ) -> Result { + match credentials { + SigningCredential::Vault(auth_method) => { + let vault_result = self + .vault_client + .sign_transaction(auth_method.clone(), transaction, options.from) + .await + .map_err(|e| { + tracing::error!("Error signing transaction with EOA (Vault): {:?}", e); + e + })?; + + Ok(vault_result.signature) + } + SigningCredential::Iaw { + auth_token, + thirdweb_auth, + } => { + let iaw_result = self + .iaw_client + .sign_transaction(auth_token.clone(), thirdweb_auth.clone(), transaction) + .await + .map_err(|e| { + tracing::error!("Error signing transaction with EOA (IAW): {:?}", e); + EngineError::from(e) + })?; + + Ok(iaw_result.signature) + } + } + } } /// Parameters for signing a message (used in routes) diff --git a/executors/Cargo.toml b/executors/Cargo.toml index 21efa2e..ecd9b04 100644 --- a/executors/Cargo.toml +++ b/executors/Cargo.toml @@ -19,4 +19,4 @@ engine-core = { version = "0.1.0", path = "../core" } engine-aa-core = { version = "0.1.0", path = "../aa-core" } rand = "0.9.1" uuid = { version = "1.17.0", features = ["v4"] } -chrono = "0.4.41" +chrono = "0.4.41" \ No newline at end of file diff --git a/executors/src/eoa/confirm.rs b/executors/src/eoa/confirm.rs new file mode 100644 index 0000000..8eefb79 --- /dev/null +++ b/executors/src/eoa/confirm.rs @@ -0,0 +1,592 @@ +use alloy::primitives::{Address, B256, U256}; +use alloy::providers::Provider; +use engine_core::{ + chain::{Chain, ChainService, RpcCredentials}, + error::{AlloyRpcErrorToEngineError, EngineError, RpcErrorKind}, + execution_options::WebhookOptions, +}; +use serde::{Deserialize, Serialize}; +use std::{sync::Arc, time::Duration}; +use twmq::{ + FailHookData, NackHookData, Queue, SuccessHookData, UserCancellable, + error::TwmqError, + hooks::TransactionContext, + job::{BorrowedJob, DelayOptions, JobResult, RequeuePosition, ToJobError, ToJobResult}, +}; + +use crate::{ + transaction_registry::TransactionRegistry, + webhook::{ + WebhookJobHandler, + envelope::{ExecutorStage, HasTransactionMetadata, HasWebhookOptions, WebhookCapable}, + }, +}; + +use super::{ + nonce_manager::NonceManager, + send::{EoaSendHandler, EoaSendJobData}, +}; + +// --- Job Payload --- +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct EoaConfirmationJobData { + pub transaction_id: String, + pub chain_id: u64, + pub eoa_address: Address, + pub nonce: U256, + pub transaction_hash: B256, + pub webhook_options: Option>, + pub rpc_credentials: RpcCredentials, +} + +impl HasWebhookOptions for EoaConfirmationJobData { + fn webhook_options(&self) -> Option> { + self.webhook_options.clone() + } +} + +impl HasTransactionMetadata for EoaConfirmationJobData { + fn transaction_id(&self) -> String { + self.transaction_id.clone() + } +} + +// --- Success Result --- +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct EoaConfirmationResult { + pub transaction_hash: B256, + pub nonce_confirmed: U256, + pub block_number: U256, + pub block_hash: B256, + pub gas_used: U256, + pub effective_gas_price: U256, + pub status: bool, +} + +// --- Error Types --- +#[derive(Serialize, Deserialize, Debug, Clone, thiserror::Error)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE", tag = "errorCode")] +pub enum EoaConfirmationError { + #[error("Chain service error for chainId {chain_id}: {message}")] + ChainServiceError { chain_id: u64, message: String }, + + #[error("Transaction not yet confirmed")] + NotYetConfirmed, + + #[error("Transaction was replaced by another transaction")] + TransactionReplaced { + original_hash: B256, + replacing_hash: Option, + nonce: U256, + }, + + #[error("Transaction failed with revert")] + TransactionReverted { + transaction_hash: B256, + revert_reason: Option, + }, + + #[error("RPC error during confirmation: {message}")] + RpcError { message: String, retryable: bool }, + + #[error("Nonce conflict detected - multiple transactions for nonce {nonce}")] + NonceConflict { + nonce: U256, + competing_hashes: Vec, + }, + + #[error("Invalid RPC Credentials: {message}")] + InvalidRpcCredentials { message: String }, + + #[error("Internal error: {message}")] + InternalError { message: String }, + + #[error("Transaction cancelled by user")] + UserCancelled, +} + +impl From for EoaConfirmationError { + fn from(error: TwmqError) -> Self { + EoaConfirmationError::InternalError { + message: format!("Queue error: {}", error), + } + } +} + +impl UserCancellable for EoaConfirmationError { + fn user_cancelled() -> Self { + EoaConfirmationError::UserCancelled + } +} + +// --- Handler --- +pub struct EoaConfirmationHandler +where + CS: ChainService + Send + Sync + 'static, +{ + pub chain_service: Arc, + pub nonce_manager: Arc, + pub webhook_queue: Arc>, + pub send_queue: Arc>>, + pub transaction_registry: Arc, + pub max_quick_checks: u32, +} + +impl ExecutorStage for EoaConfirmationHandler +where + CS: ChainService + Send + Sync + 'static, +{ + fn executor_name() -> &'static str { + "eoa" + } + + fn stage_name() -> &'static str { + "confirm" + } +} + +impl WebhookCapable for EoaConfirmationHandler +where + CS: ChainService + Send + Sync + 'static, +{ + fn webhook_queue(&self) -> &Arc> { + &self.webhook_queue + } +} + +impl twmq::DurableExecution for EoaConfirmationHandler +where + CS: ChainService + Send + Sync + 'static, +{ + type Output = EoaConfirmationResult; + type ErrorData = EoaConfirmationError; + type JobData = EoaConfirmationJobData; + + #[tracing::instrument(skip(self, job), fields(transaction_id = job.job.id, stage = Self::stage_name(), executor = Self::executor_name()))] + async fn process( + &self, + job: &BorrowedJob, + ) -> JobResult { + let job_data = &job.job.data; + + // 1. Get Chain + let chain = self + .chain_service + .get_chain(job_data.chain_id) + .map_err(|e| EoaConfirmationError::ChainServiceError { + chain_id: job_data.chain_id, + message: format!("Failed to get chain instance: {}", e), + }) + .map_err_fail()?; + + let chain_auth_headers = job_data + .rpc_credentials + .to_header_map() + .map_err(|e| EoaConfirmationError::InvalidRpcCredentials { + message: e.to_string(), + }) + .map_err_fail()?; + + let chain = chain.with_new_default_headers(chain_auth_headers); + + // 2. Get current onchain nonce + let onchain_nonce = match chain + .provider() + .get_transaction_count(job_data.eoa_address) + .await + { + Ok(nonce) => nonce, + Err(e) => { + let engine_error = e.to_engine_error(&chain); + return Err(EoaConfirmationError::RpcError { + message: format!("Failed to get transaction count: {}", engine_error), + retryable: true, + } + .nack(Some(Duration::from_secs(5)), RequeuePosition::Last)); + } + }; + + // 3. Get cached nonce to detect progression + let last_known_nonce = self + .nonce_manager + .get_cached_onchain_nonce(job_data.eoa_address, job_data.chain_id) + .await + .map_err(|e| EoaConfirmationError::InternalError { + message: format!("Failed to get cached nonce: {}", e), + }) + .map_err_fail()? + .unwrap_or(U256::ZERO); + + tracing::debug!( + nonce = %job_data.nonce, + onchain_nonce = %onchain_nonce, + last_known_nonce = %last_known_nonce, + "Checking confirmation status" + ); + + // 4. If nonce hasn't moved, check specific transaction + if onchain_nonce <= last_known_nonce { + return self.check_specific_transaction(job, &chain).await; + } + + // 5. Nonce moved! Check all in-flight transactions for this range + self.process_nonce_progression(job, &chain, last_known_nonce, onchain_nonce) + .await + } + + async fn on_success( + &self, + job: &BorrowedJob, + success_data: SuccessHookData<'_, Self::Output>, + tx: &mut TransactionContext<'_>, + ) { + // 1. Remove nonce assignment (transaction confirmed) + self.nonce_manager.add_remove_assignment_command( + tx.pipeline(), + job.job.data.eoa_address, + job.job.data.chain_id, + success_data.result.nonce_confirmed, + ); + + // 2. Update cached onchain nonce + let cached_nonce = success_data.result.nonce_confirmed + U256::from(1); + // TODO: Add method to update cached nonce via pipeline + + // 3. Remove from transaction registry (completed) + self.transaction_registry + .add_remove_command(tx.pipeline(), &job.job.data.transaction_id); + + // 4. Reset error counters on success + // TODO: Update health to reset consecutive errors + + if let Err(e) = self.queue_success_webhook(job, success_data, tx) { + tracing::error!( + transaction_id = %job.job.data.transaction_id, + error = %e, + "Failed to queue success webhook" + ); + } + } + + async fn on_nack( + &self, + job: &BorrowedJob, + nack_data: NackHookData<'_, Self::ErrorData>, + tx: &mut TransactionContext<'_>, + ) { + if let Err(e) = self.queue_nack_webhook(job, nack_data, tx) { + tracing::error!( + transaction_id = %job.job.data.transaction_id, + error = %e, + "Failed to queue nack webhook" + ); + } + } + + async fn on_fail( + &self, + job: &BorrowedJob, + fail_data: FailHookData<'_, Self::ErrorData>, + tx: &mut TransactionContext<'_>, + ) { + // Handle different failure types + match fail_data.error { + EoaConfirmationError::TransactionReplaced { nonce, .. } => { + // Transaction was replaced - requeue the original transaction + self.requeue_replaced_transaction(job, *nonce, tx).await; + } + EoaConfirmationError::TransactionReverted { .. } => { + // Transaction reverted - remove nonce assignment and don't requeue + self.nonce_manager.add_remove_assignment_command( + tx.pipeline(), + job.job.data.eoa_address, + job.job.data.chain_id, + job.job.data.nonce, + ); + } + _ => { + // Other failures - remove nonce assignment + self.nonce_manager.add_remove_assignment_command( + tx.pipeline(), + job.job.data.eoa_address, + job.job.data.chain_id, + job.job.data.nonce, + ); + } + } + + // Remove from transaction registry + self.transaction_registry + .add_remove_command(tx.pipeline(), &job.job.data.transaction_id); + + if let Err(e) = self.queue_fail_webhook(job, fail_data, tx) { + tracing::error!( + transaction_id = %job.job.data.transaction_id, + error = %e, + "Failed to queue fail webhook" + ); + } + } +} + +// --- Confirmation Logic --- + +impl EoaConfirmationHandler +where + CS: ChainService + Send + Sync + 'static, +{ + async fn check_specific_transaction( + &self, + job: &BorrowedJob, + chain: &impl Chain, + ) -> JobResult { + let job_data = &job.job.data; + + // Poll for specific transaction receipt + match chain + .provider() + .get_transaction_receipt(job_data.transaction_hash) + .await + { + Ok(Some(receipt)) => { + // Found receipt - check if transaction succeeded + if receipt.status() { + tracing::info!( + transaction_hash = %job_data.transaction_hash, + block_number = %receipt.block_number.unwrap_or_default(), + "Transaction confirmed successfully" + ); + + Ok(EoaConfirmationResult { + transaction_hash: job_data.transaction_hash, + nonce_confirmed: job_data.nonce, + block_number: receipt.block_number.unwrap_or_default(), + block_hash: receipt.block_hash.unwrap_or_default(), + gas_used: receipt.gas_used, + effective_gas_price: receipt.effective_gas_price.unwrap_or_default(), + status: true, + }) + } else { + // Transaction reverted + Err(EoaConfirmationError::TransactionReverted { + transaction_hash: job_data.transaction_hash, + revert_reason: None, // Could extract from logs if needed + } + .fail()) + } + } + Ok(None) => { + // No receipt yet + if job.job.attempts > self.max_quick_checks { + // After max quick checks, switch to slow path (longer delays) + Err(EoaConfirmationError::NotYetConfirmed + .nack(Some(Duration::from_secs(30)), RequeuePosition::Last)) + } else { + // Quick recheck + Err(EoaConfirmationError::NotYetConfirmed + .nack(Some(Duration::from_secs(3)), RequeuePosition::Last)) + } + } + Err(e) => { + let engine_error = e.to_engine_error(chain); + let retryable = matches!(engine_error, + EngineError::RpcError { kind: RpcErrorKind::OtherTransportError(_), .. } | + EngineError::RpcError { kind: RpcErrorKind::ErrorResp(resp), .. } if matches!(resp.code, -32005 | -32603) + ); + + if retryable { + Err(EoaConfirmationError::RpcError { + message: format!("RPC error getting receipt: {}", engine_error), + retryable: true, + } + .nack(Some(Duration::from_secs(5)), RequeuePosition::Last)) + } else { + Err(EoaConfirmationError::RpcError { + message: format!("Failed to get receipt: {}", engine_error), + retryable: false, + } + .fail()) + } + } + } + } + + async fn process_nonce_progression( + &self, + job: &BorrowedJob, + chain: &impl Chain, + last_known_nonce: U256, + onchain_nonce: U256, + ) -> JobResult { + let job_data = &job.job.data; + + tracing::info!( + from_nonce = %last_known_nonce, + to_nonce = %onchain_nonce, + "Processing nonce progression" + ); + + // Check all nonces from last_known to current onchain + for nonce_to_check in last_known_nonce.to::()..onchain_nonce.to::() { + let nonce_u256 = U256::from(nonce_to_check); + + // Get all assignments for this nonce + let assignments = self + .nonce_manager + .get_nonce_assignments(job_data.eoa_address, job_data.chain_id, nonce_u256) + .await + .map_err(|e| EoaConfirmationError::InternalError { + message: format!("Failed to get nonce assignments: {}", e), + }) + .map_err_fail()?; + + for assignment in assignments { + match chain + .provider() + .get_transaction_receipt(assignment.transaction_hash) + .await + { + Ok(Some(receipt)) => { + // Found a receipt for this nonce + if assignment.transaction_id == job_data.transaction_id { + // This job won the race! + if receipt.status() { + tracing::info!( + transaction_hash = %assignment.transaction_hash, + nonce = %nonce_u256, + "Transaction confirmed in nonce progression" + ); + + // Update cached nonce + if let Err(e) = self + .nonce_manager + .update_cached_onchain_nonce( + job_data.eoa_address, + job_data.chain_id, + onchain_nonce, + ) + .await + { + tracing::error!("Failed to update cached nonce: {}", e); + } + + return Ok(EoaConfirmationResult { + transaction_hash: assignment.transaction_hash, + nonce_confirmed: nonce_u256, + block_number: receipt.block_number.unwrap_or_default(), + block_hash: receipt.block_hash.unwrap_or_default(), + gas_used: receipt.gas_used, + effective_gas_price: receipt + .effective_gas_price + .unwrap_or_default(), + status: true, + }); + } else { + // Transaction reverted + return Err(EoaConfirmationError::TransactionReverted { + transaction_hash: assignment.transaction_hash, + revert_reason: None, + } + .fail()); + } + } else { + // Different transaction won - will handle in requeue + tracing::info!( + winning_hash = %assignment.transaction_hash, + losing_transaction_id = %job_data.transaction_id, + nonce = %nonce_u256, + "Different transaction won nonce race" + ); + } + } + Ok(None) => { + // No receipt for this hash yet + if nonce_u256 < onchain_nonce - U256::from(1) { + // Old nonce with no receipt - transaction was likely replaced + tracing::warn!( + transaction_hash = %assignment.transaction_hash, + nonce = %nonce_u256, + "Old transaction with no receipt - likely replaced" + ); + } + } + Err(e) => { + tracing::error!( + transaction_hash = %assignment.transaction_hash, + nonce = %nonce_u256, + error = %e, + "Error getting receipt during nonce progression" + ); + } + } + } + } + + // Update cached nonce regardless + if let Err(e) = self + .nonce_manager + .update_cached_onchain_nonce(job_data.eoa_address, job_data.chain_id, onchain_nonce) + .await + { + tracing::error!("Failed to update cached nonce: {}", e); + } + + // If we get here, our transaction wasn't found in the confirmed range + if job_data.nonce < onchain_nonce { + // Our nonce is old and we didn't find a receipt - transaction was replaced + Err(EoaConfirmationError::TransactionReplaced { + original_hash: job_data.transaction_hash, + replacing_hash: None, + nonce: job_data.nonce, + } + .fail()) + } else { + // Our nonce is current or future - keep waiting + Err(EoaConfirmationError::NotYetConfirmed + .nack(Some(Duration::from_secs(5)), RequeuePosition::Last)) + } + } + + async fn requeue_replaced_transaction( + &self, + job: &BorrowedJob, + nonce: U256, + tx: &mut TransactionContext<'_>, + ) { + tracing::info!( + transaction_id = %job.job.data.transaction_id, + nonce = %nonce, + "Requeuing replaced transaction" + ); + + // Create a new send job without assigned nonce (will get new nonce) + let requeue_job = self + .send_queue + .clone() + .job(EoaSendJobData { + transaction_id: job.job.data.transaction_id.clone(), + chain_id: job.job.data.chain_id, + from: job.job.data.eoa_address, + to: Address::ZERO, // TODO: Get original transaction details + value: U256::ZERO, + data: Default::default(), + gas_limit: None, + max_fee_per_gas: None, + max_priority_fee_per_gas: None, + assigned_nonce: None, // Will get new nonce + webhook_options: job.job.data.webhook_options.clone(), + rpc_credentials: job.job.data.rpc_credentials.clone(), + }) + .with_id(&format!("{}_retry", job.job.data.transaction_id)); + + if let Err(e) = tx.queue_job(requeue_job) { + tracing::error!( + transaction_id = %job.job.data.transaction_id, + error = %e, + "Failed to requeue replaced transaction" + ); + } + } +} diff --git a/executors/src/eoa/eoa_confirmation_worker.rs b/executors/src/eoa/eoa_confirmation_worker.rs new file mode 100644 index 0000000..a8ebd4c --- /dev/null +++ b/executors/src/eoa/eoa_confirmation_worker.rs @@ -0,0 +1,575 @@ +use alloy::primitives::{Address, B256, U256}; +use alloy::providers::Provider; +use alloy::rpc::types::{BlockNumberOrTag, TransactionReceipt}; +use alloy::transports::{RpcError, TransportErrorKind}; +use engine_core::{ + chain::{Chain, ChainService, RpcCredentials}, + error::{AlloyRpcErrorToEngineError, EngineError, RpcErrorKind}, + execution_options::WebhookOptions, +}; +use serde::{Deserialize, Serialize}; +use std::{sync::Arc, time::Duration}; +use twmq::{ + FailHookData, NackHookData, Queue, SuccessHookData, UserCancellable, + error::TwmqError, + hooks::TransactionContext, + job::{BorrowedJob, DelayOptions, JobResult, RequeuePosition, ToJobError, ToJobResult}, +}; + +use crate::{ + transaction_registry::TransactionRegistry, + webhook::{ + WebhookJobHandler, + envelope::{ExecutorStage, HasTransactionMetadata, HasWebhookOptions, WebhookCapable}, + }, +}; + +use super::{ + nonce_manager::NonceManager, + send::{EoaSendHandler, EoaSendJobData}, + transaction_store::{ActiveAttempt, ConfirmationData, TransactionStore}, +}; + +// --- Job Payload --- +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct EoaConfirmationWorkerJobData { + pub eoa: Address, + pub chain_id: u64, + pub rpc_credentials: RpcCredentials, +} + +// --- Success Result --- +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct EoaConfirmationWorkerResult { + pub eoa: Address, + pub chain_id: u64, + pub nonce_progression: Option<(u64, u64)>, // (from, to) + pub transactions_confirmed: u32, + pub transactions_requeued: u32, +} + +// --- Error Types --- +#[derive(Serialize, Deserialize, Debug, Clone, thiserror::Error)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE", tag = "errorCode")] +pub enum EoaConfirmationWorkerError { + #[error("Chain service error for chainId {chain_id}: {message}")] + ChainServiceError { chain_id: u64, message: String }, + + #[error("RPC error: {message}")] + RpcError { message: String, retryable: bool }, + + #[error("Active transactions found - will continue monitoring")] + ActiveTransactionsFound { count: u32 }, + + #[error("Sync required for EOA {eoa}")] + SyncRequired { eoa: Address }, + + #[error("Invalid RPC Credentials: {message}")] + InvalidRpcCredentials { message: String }, + + #[error("Internal error: {message}")] + InternalError { message: String }, + + #[error("Worker cancelled by user")] + UserCancelled, +} + +impl From for EoaConfirmationWorkerError { + fn from(error: TwmqError) -> Self { + EoaConfirmationWorkerError::InternalError { + message: format!("Queue error: {}", error), + } + } +} + +impl UserCancellable for EoaConfirmationWorkerError { + fn user_cancelled() -> Self { + EoaConfirmationWorkerError::UserCancelled + } +} + +// --- Handler --- +pub struct EoaConfirmationWorker +where + CS: ChainService + Send + Sync + 'static, +{ + pub chain_service: Arc, + pub nonce_manager: Arc, + pub transaction_store: Arc, + pub send_queue: Arc>>, + pub webhook_queue: Arc>, + pub transaction_registry: Arc, +} + +impl ExecutorStage for EoaConfirmationWorker +where + CS: ChainService + Send + Sync + 'static, +{ + fn executor_name() -> &'static str { + "eoa" + } + + fn stage_name() -> &'static str { + "confirmation_worker" + } +} + +impl WebhookCapable for EoaConfirmationWorker +where + CS: ChainService + Send + Sync + 'static, +{ + fn webhook_queue(&self) -> &Arc> { + &self.webhook_queue + } +} + +impl twmq::DurableExecution for EoaConfirmationWorker +where + CS: ChainService + Send + Sync + 'static, +{ + type Output = EoaConfirmationWorkerResult; + type ErrorData = EoaConfirmationWorkerError; + type JobData = EoaConfirmationWorkerJobData; + + #[tracing::instrument(skip(self, job), fields(eoa = %job.job.data.eoa, chain_id = job.job.data.chain_id, stage = Self::stage_name(), executor = Self::executor_name()))] + async fn process( + &self, + job: &BorrowedJob, + ) -> JobResult { + let job_data = &job.job.data; + + // 1. Get Chain + let chain = self + .chain_service + .get_chain(job_data.chain_id) + .map_err(|e| EoaConfirmationWorkerError::ChainServiceError { + chain_id: job_data.chain_id, + message: format!("Failed to get chain instance: {}", e), + }) + .map_err_fail()?; + + let chain_auth_headers = job_data + .rpc_credentials + .to_header_map() + .map_err(|e| EoaConfirmationWorkerError::InvalidRpcCredentials { + message: e.to_string(), + }) + .map_err_fail()?; + + let chain = chain.with_new_default_headers(chain_auth_headers); + + // 2. Get current onchain nonce + let onchain_nonce = match chain.provider().get_transaction_count(job_data.eoa).await { + Ok(nonce) => nonce, + Err(e) => { + let engine_error = e.to_engine_error(&chain); + return Err(EoaConfirmationWorkerError::RpcError { + message: format!("Failed to get transaction count: {}", engine_error), + retryable: true, + } + .nack(Some(Duration::from_secs(5)), RequeuePosition::Last)); + } + }; + + // 3. Get cached onchain nonce to detect progression + let cached_nonce = self + .nonce_manager + .get_cached_onchain_nonce(job_data.eoa, job_data.chain_id) + .await + .map_err(|e| EoaConfirmationWorkerError::InternalError { + message: format!("Failed to get cached nonce: {}", e), + }) + .map_err_fail()? + .map(|n| n.to::()) + .unwrap_or(0); + + tracing::debug!( + eoa = %job_data.eoa, + onchain_nonce = %onchain_nonce, + cached_nonce = %cached_nonce, + "Checking nonce progression" + ); + + // 4. Check if nonce has progressed + let nonce_progression = if onchain_nonce > cached_nonce { + Some((cached_nonce, onchain_nonce)) + } else { + None + }; + + // 5. Get all active transactions for this EOA + let active_transaction_ids = self + .transaction_store + .get_active_transactions(job_data.eoa, job_data.chain_id) + .await + .map_err(|e| EoaConfirmationWorkerError::InternalError { + message: format!("Failed to get active transactions: {}", e), + }) + .map_err_fail()?; + + // 6. If no active transactions and no progression, we're done + if active_transaction_ids.is_empty() && nonce_progression.is_none() { + tracing::debug!( + eoa = %job_data.eoa, + "No active transactions and no nonce progression - stopping worker" + ); + + return Ok(EoaConfirmationWorkerResult { + eoa: job_data.eoa, + chain_id: job_data.chain_id, + nonce_progression: None, + transactions_confirmed: 0, + transactions_requeued: 0, + }); + } + + // 7. Process any nonce progression + let mut transactions_confirmed = 0; + let mut transactions_requeued = 0; + + if let Some((from_nonce, to_nonce)) = nonce_progression { + tracing::info!( + eoa = %job_data.eoa, + from_nonce = %from_nonce, + to_nonce = %to_nonce, + "Processing nonce progression" + ); + + let (confirmed, requeued) = self + .process_nonce_progression(job_data, &chain, from_nonce, to_nonce) + .await + .map_err_fail()?; + + transactions_confirmed += confirmed; + transactions_requeued += requeued; + + // Update cached nonce + if let Err(e) = self + .nonce_manager + .update_cached_onchain_nonce(job_data.eoa, job_data.chain_id, onchain_nonce) + .await + { + tracing::error!( + eoa = %job_data.eoa, + error = %e, + "Failed to update cached nonce" + ); + } + } + + // 8. Check if we still have active transactions - if so, requeue worker + let remaining_active = self + .transaction_store + .get_active_transactions(job_data.eoa, job_data.chain_id) + .await + .map_err(|e| EoaConfirmationWorkerError::InternalError { + message: format!("Failed to get remaining active transactions: {}", e), + }) + .map_err_fail()?; + + if !remaining_active.is_empty() { + tracing::debug!( + eoa = %job_data.eoa, + active_count = remaining_active.len(), + "Active transactions found - requeuing worker" + ); + + return Err(EoaConfirmationWorkerError::ActiveTransactionsFound { + count: remaining_active.len() as u32, + } + .nack(Some(Duration::from_secs(3)), RequeuePosition::Last)); + } + + // 9. No more active transactions - worker can complete + Ok(EoaConfirmationWorkerResult { + eoa: job_data.eoa, + chain_id: job_data.chain_id, + nonce_progression, + transactions_confirmed, + transactions_requeued, + }) + } + + async fn on_success( + &self, + job: &BorrowedJob, + success_data: SuccessHookData<'_, Self::Output>, + tx: &mut TransactionContext<'_>, + ) { + tracing::info!( + eoa = %job.job.data.eoa, + chain_id = job.job.data.chain_id, + transactions_confirmed = success_data.result.transactions_confirmed, + transactions_requeued = success_data.result.transactions_requeued, + "EOA confirmation worker completed" + ); + } + + async fn on_nack( + &self, + job: &BorrowedJob, + nack_data: NackHookData<'_, Self::ErrorData>, + tx: &mut TransactionContext<'_>, + ) { + tracing::debug!( + eoa = %job.job.data.eoa, + chain_id = job.job.data.chain_id, + error = ?nack_data.error, + "EOA confirmation worker nacked - will retry" + ); + } + + async fn on_fail( + &self, + job: &BorrowedJob, + fail_data: FailHookData<'_, Self::ErrorData>, + tx: &mut TransactionContext<'_>, + ) { + tracing::error!( + eoa = %job.job.data.eoa, + chain_id = job.job.data.chain_id, + error = ?fail_data.error, + "EOA confirmation worker failed permanently" + ); + } +} + +// --- Core Logic --- + +impl EoaConfirmationWorker +where + CS: ChainService + Send + Sync + 'static, +{ + /// Process nonce progression and determine winners/losers + async fn process_nonce_progression( + &self, + job_data: &EoaConfirmationWorkerJobData, + chain: &impl Chain, + from_nonce: u64, + to_nonce: u64, + ) -> Result<(u32, u32), EoaConfirmationWorkerError> { + let mut transactions_confirmed = 0; + let mut transactions_requeued = 0; + + // Process each nonce from cached to current onchain + for nonce in from_nonce..to_nonce { + let nonce_u256 = U256::from(nonce); + + // Get all our transactions competing for this nonce + let competing_transaction_ids = self + .transaction_store + .get_transactions_by_nonce(job_data.eoa, job_data.chain_id, nonce_u256) + .await + .map_err(|e| EoaConfirmationWorkerError::InternalError { + message: format!("Failed to get transactions by nonce {}: {}", nonce, e), + })?; + + if competing_transaction_ids.is_empty() { + tracing::debug!( + nonce = %nonce, + "No competing transactions for nonce - chain progressed without us" + ); + continue; + } + + tracing::debug!( + nonce = %nonce, + competing_count = competing_transaction_ids.len(), + "Processing competing transactions for nonce" + ); + + // Check each competing transaction + let mut found_winner = false; + for transaction_id in &competing_transaction_ids { + if let Some(attempt) = self + .transaction_store + .get_active_attempt(transaction_id) + .await + .map_err(|e| EoaConfirmationWorkerError::InternalError { + message: format!( + "Failed to get active attempt for {}: {}", + transaction_id, e + ), + })? + { + // Query receipt by hash + match chain + .provider() + .get_transaction_receipt(attempt.transaction_hash) + .await + { + Ok(Some(receipt)) => { + if receipt.status() { + // This transaction won! + tracing::info!( + transaction_id = %transaction_id, + transaction_hash = %attempt.transaction_hash, + nonce = %nonce, + block_number = %receipt.block_number.unwrap_or_default(), + "Transaction confirmed on-chain" + ); + + let confirmation_data = ConfirmationData { + transaction_hash: attempt.transaction_hash, + receipt, + confirmed_at: std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs(), + }; + + if let Err(e) = self + .transaction_store + .mark_transaction_confirmed(transaction_id, &confirmation_data) + .await + { + tracing::error!( + transaction_id = %transaction_id, + error = %e, + "Failed to mark transaction as confirmed" + ); + } + + transactions_confirmed += 1; + found_winner = true; + } else { + // Transaction reverted + tracing::warn!( + transaction_id = %transaction_id, + transaction_hash = %attempt.transaction_hash, + nonce = %nonce, + "Transaction reverted on-chain" + ); + + if let Err(e) = self + .transaction_store + .mark_transaction_failed(transaction_id, "Transaction reverted") + .await + { + tracing::error!( + transaction_id = %transaction_id, + error = %e, + "Failed to mark transaction as failed" + ); + } + } + } + Ok(None) => { + // No receipt - transaction might still be pending or was replaced + tracing::debug!( + transaction_id = %transaction_id, + transaction_hash = %attempt.transaction_hash, + nonce = %nonce, + "No receipt found for transaction" + ); + } + Err(e) => { + tracing::error!( + transaction_id = %transaction_id, + transaction_hash = %attempt.transaction_hash, + nonce = %nonce, + error = %e, + "Error getting receipt for transaction" + ); + } + } + } + } + + // If nonce progressed but none of our transactions won, they all lost + if !found_winner { + for transaction_id in &competing_transaction_ids { + tracing::info!( + transaction_id = %transaction_id, + nonce = %nonce, + "Transaction lost nonce race - requeuing" + ); + + // Remove active attempt and requeue as new send job + if let Err(e) = self + .transaction_store + .remove_active_attempt(transaction_id) + .await + { + tracing::error!( + transaction_id = %transaction_id, + error = %e, + "Failed to remove active attempt for requeue" + ); + continue; + } + + // Create new send job (attempt_number will be incremented) + if let Err(e) = self.requeue_transaction(job_data, transaction_id).await { + tracing::error!( + transaction_id = %transaction_id, + error = %e, + "Failed to requeue transaction" + ); + } else { + transactions_requeued += 1; + } + } + } + } + + Ok((transactions_confirmed, transactions_requeued)) + } + + /// Requeue a transaction that lost a nonce race + async fn requeue_transaction( + &self, + job_data: &EoaConfirmationWorkerJobData, + transaction_id: &str, + ) -> Result<(), EoaConfirmationWorkerError> { + // Get original transaction data + let tx_data = self + .transaction_store + .get_transaction_data(transaction_id) + .await + .map_err(|e| EoaConfirmationWorkerError::InternalError { + message: format!( + "Failed to get transaction data for {}: {}", + transaction_id, e + ), + })? + .ok_or_else(|| EoaConfirmationWorkerError::InternalError { + message: format!("Transaction data not found for {}", transaction_id), + })?; + + // Get current attempt number for new queue job ID + let mut conn = self.transaction_store.redis.clone(); + let counter_key = self.transaction_store.attempt_counter_key(transaction_id); + let attempt_number: u32 = conn.get(&counter_key).await.unwrap_or(0); + + // Create new send job with incremented attempt + let requeue_job = self + .send_queue + .clone() + .job(EoaSendJobData { + transaction_id: tx_data.transaction_id.clone(), + chain_id: tx_data.chain_id, + from: tx_data.eoa, + to: tx_data.to, + value: tx_data.value, + data: tx_data.data.into(), + webhook_options: None, // TODO: Get from original job if needed + assigned_nonce: None, // Will get new nonce + gas_limit: tx_data.gas_limit, + signing_credential: Default::default(), // TODO: Get from original job + rpc_credentials: job_data.rpc_credentials.clone(), + }) + .with_id(&format!("{}_{}", transaction_id, attempt_number)); + + // Queue the job (this would normally be done in a pipeline in the actual hook) + tracing::info!( + transaction_id = %transaction_id, + queue_job_id = %format!("{}_{}", transaction_id, attempt_number), + "Requeuing transaction after race loss" + ); + + Ok(()) + } +} diff --git a/executors/src/eoa/error_classifier.rs b/executors/src/eoa/error_classifier.rs new file mode 100644 index 0000000..6ef2fa6 --- /dev/null +++ b/executors/src/eoa/error_classifier.rs @@ -0,0 +1,304 @@ +use alloy::transports::{RpcError, TransportErrorKind}; +use engine_core::{ + chain::Chain, + error::{AlloyRpcErrorToEngineError, EngineError, RpcErrorKind}, +}; +use std::time::Duration; +use twmq::job::RequeuePosition; + +/// Domain-specific EOA execution errors mapped from RPC errors +#[derive(Debug, Clone)] +pub enum EoaExecutionError { + /// Nonce too low - transaction might already be in mempool + NonceTooLow { message: String }, + + /// Nonce too high - indicates nonce gap or desync + NonceTooHigh { message: String }, + + /// Transaction already known in mempool + AlreadyKnown { message: String }, + + /// Replacement transaction underpriced + ReplacementUnderpriced { message: String }, + + /// Insufficient funds for transaction + InsufficientFunds { message: String }, + + /// Gas-related error (limit, estimation, etc.) + GasError { message: String }, + + /// Transaction pool is full or has limits + PoolLimitExceeded { message: String }, + + /// Account does not exist or invalid + AccountError { message: String }, + + /// Network/connectivity issues - use existing handling + RpcError { + message: String, + inner_error: Option, + }, +} + +/// Recovery strategy for an EOA execution error +#[derive(Debug, Clone, PartialEq)] +pub struct RecoveryStrategy { + /// Should we queue confirmation job + pub queue_confirmation: bool, + /// Should we recycle the nonce + pub recycle_nonce: bool, + /// Should we trigger a resync + pub needs_resync: bool, + /// Is this error retryable + pub retryable: bool, + /// Retry delay if retryable + pub retry_delay: Option, +} + +/// Maps RPC errors to domain-specific EOA errors and determines recovery strategies +pub struct EoaErrorMapper; + +impl EoaErrorMapper { + /// Map an RPC error from transaction sending - only handle actionable errors + pub fn map_send_error( + error: &RpcError, + chain: &C, + ) -> Result { + match error { + RpcError::ErrorResp(error_payload) => { + Ok(Self::map_ethereum_error(error_payload.code, &error_payload.message)) + } + _ => { + // Use existing engine error handling for non-actionable errors + Err(error.to_engine_error(chain)) + } + } + } + + /// Map Ethereum-specific errors that we need to act on + fn map_ethereum_error(code: i64, message: &str) -> EoaExecutionError { + let msg_lower = message.to_lowercase(); + + match code { + -32000 => { + // Only handle the specific ethereum errors we care about + if msg_lower.contains("nonce too low") { + EoaExecutionError::NonceTooLow { + message: message.to_string(), + } + } else if msg_lower.contains("nonce too high") { + EoaExecutionError::NonceTooHigh { + message: message.to_string(), + } + } else if msg_lower.contains("already known") || msg_lower.contains("duplicate") { + EoaExecutionError::AlreadyKnown { + message: message.to_string(), + } + } else if msg_lower.contains("replacement") && msg_lower.contains("underpriced") { + EoaExecutionError::ReplacementUnderpriced { + message: message.to_string(), + } + } else if msg_lower.contains("insufficient funds") { + EoaExecutionError::InsufficientFunds { + message: message.to_string(), + } + } else if msg_lower.contains("gas") { + EoaExecutionError::GasError { + message: message.to_string(), + } + } else if msg_lower.contains("txpool") || msg_lower.contains("pool limit") { + EoaExecutionError::PoolLimitExceeded { + message: message.to_string(), + } + } else if msg_lower.contains("account") { + EoaExecutionError::AccountError { + message: message.to_string(), + } + } else { + // Not an actionable error - let engine error handle it + EoaExecutionError::RpcError { + message: message.to_string(), + inner_error: Some(EngineError::InternalError { message: message.to_string() }), + } + } + } + _ => { + // Not an actionable error code + EoaExecutionError::RpcError { + message: format!("RPC error code {}: {}", code, message), + inner_error: Some(EngineError::InternalError { message: message.to_string() }), + } + } + } + } + + /// Determine recovery strategy for an EOA execution error + pub fn get_recovery_strategy(error: &EoaExecutionError) -> RecoveryStrategy { + match error { + EoaExecutionError::NonceTooLow { .. } => RecoveryStrategy { + queue_confirmation: true, + recycle_nonce: false, + needs_resync: false, + retryable: false, + retry_delay: None, + }, + + EoaExecutionError::NonceTooHigh { .. } => RecoveryStrategy { + queue_confirmation: false, + recycle_nonce: true, + needs_resync: true, + retryable: true, + retry_delay: Some(Duration::from_secs(10)), + }, + + EoaExecutionError::AlreadyKnown { .. } => RecoveryStrategy { + queue_confirmation: true, + recycle_nonce: false, + needs_resync: false, + retryable: false, + retry_delay: None, + }, + + EoaExecutionError::ReplacementUnderpriced { .. } => RecoveryStrategy { + queue_confirmation: true, + recycle_nonce: false, + needs_resync: false, + retryable: true, + retry_delay: Some(Duration::from_secs(10)), + }, + + EoaExecutionError::InsufficientFunds { .. } => RecoveryStrategy { + queue_confirmation: false, + recycle_nonce: true, + needs_resync: false, + retryable: true, + retry_delay: Some(Duration::from_secs(60)), + }, + + EoaExecutionError::GasError { .. } => RecoveryStrategy { + queue_confirmation: false, + recycle_nonce: true, + needs_resync: false, + retryable: true, + retry_delay: Some(Duration::from_secs(30)), + }, + + EoaExecutionError::PoolLimitExceeded { .. } => RecoveryStrategy { + queue_confirmation: false, + recycle_nonce: true, + needs_resync: false, + retryable: true, + retry_delay: Some(Duration::from_secs(30)), + }, + + EoaExecutionError::AccountError { .. } => RecoveryStrategy { + queue_confirmation: false, + recycle_nonce: true, + needs_resync: false, + retryable: false, + retry_delay: None, + }, + + EoaExecutionError::RpcError { .. } => { + // This should not be used - let engine error handle it + RecoveryStrategy { + queue_confirmation: false, + recycle_nonce: false, + needs_resync: false, + retryable: false, + retry_delay: None, + } + }, + } + } +} + +/// Helper for converting mapped errors and recovery strategies to job results +impl EoaExecutionError { + /// Get the message for this error + pub fn message(&self) -> &str { + match self { + EoaExecutionError::NonceTooLow { message } + | EoaExecutionError::NonceTooHigh { message } + | EoaExecutionError::AlreadyKnown { message } + | EoaExecutionError::ReplacementUnderpriced { message } + | EoaExecutionError::InsufficientFunds { message } + | EoaExecutionError::GasError { message } + | EoaExecutionError::PoolLimitExceeded { message } + | EoaExecutionError::AccountError { message } + | EoaExecutionError::RpcError { message, .. } => message, + } + } + + /// Convert to appropriate job result for send operations + pub fn to_send_job_result( + &self, + strategy: &RecoveryStrategy, + success_factory: impl FnOnce() -> T, + error_factory: impl FnOnce(String) -> E, + ) -> twmq::job::JobResult { + use twmq::job::{ToJobError, ToJobResult}; + + if strategy.queue_confirmation { + // Treat as success since we need to check confirmation + Ok(success_factory()) + } else if strategy.retryable { + if let Some(delay) = strategy.retry_delay { + Err(error_factory(self.message().to_string()).nack(Some(delay), RequeuePosition::Last)) + } else { + Err(error_factory(self.message().to_string()).nack(Some(Duration::from_secs(5)), RequeuePosition::Last)) + } + } else { + // Permanent failure + Err(error_factory(self.message().to_string()).fail()) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_nonce_too_low_mapping() { + let error = EoaErrorMapper::map_ethereum_error(-32000, "nonce too low"); + let strategy = EoaErrorMapper::get_recovery_strategy(&error); + + match error { + EoaExecutionError::NonceTooLow { .. } => {} + _ => panic!("Expected NonceTooLow error"), + } + + assert!(strategy.queue_confirmation); + assert!(!strategy.recycle_nonce); + } + + #[test] + fn test_insufficient_funds_mapping() { + let error = EoaErrorMapper::map_ethereum_error(-32000, "insufficient funds for gas * price + value"); + let strategy = EoaErrorMapper::get_recovery_strategy(&error); + + match error { + EoaExecutionError::InsufficientFunds { .. } => {} + _ => panic!("Expected InsufficientFunds error"), + } + + assert!(!strategy.queue_confirmation); + assert!(strategy.recycle_nonce); + assert!(strategy.retryable); + } + + #[test] + fn test_already_known_mapping() { + let error = EoaErrorMapper::map_ethereum_error(-32000, "already known"); + let strategy = EoaErrorMapper::get_recovery_strategy(&error); + + match error { + EoaExecutionError::AlreadyKnown { .. } => {} + _ => panic!("Expected AlreadyKnown error"), + } + + assert!(strategy.queue_confirmation); + assert!(!strategy.recycle_nonce); + } +} \ No newline at end of file diff --git a/executors/src/eoa/mod.rs b/executors/src/eoa/mod.rs new file mode 100644 index 0000000..262cb19 --- /dev/null +++ b/executors/src/eoa/mod.rs @@ -0,0 +1,19 @@ +pub mod confirm; +pub mod eoa_confirmation_worker; +pub mod error_classifier; +pub mod nonce_manager; +pub mod send; +pub mod transaction_store; + +pub use confirm::{ + EoaConfirmationError, EoaConfirmationHandler, EoaConfirmationJobData, EoaConfirmationResult, +}; +pub use eoa_confirmation_worker::{ + EoaConfirmationWorker, EoaConfirmationWorkerError, EoaConfirmationWorkerJobData, EoaConfirmationWorkerResult, +}; +pub use error_classifier::{EoaErrorMapper, EoaExecutionError, RecoveryStrategy}; +pub use nonce_manager::{EoaHealth, NonceAssignment, NonceManager}; +pub use send::{EoaSendError, EoaSendHandler, EoaSendJobData, EoaSendResult}; +pub use transaction_store::{ + ActiveAttempt, ConfirmationData, TransactionData, TransactionStore, TransactionStoreError, +}; diff --git a/executors/src/eoa/nonce_manager.rs b/executors/src/eoa/nonce_manager.rs new file mode 100644 index 0000000..5592f97 --- /dev/null +++ b/executors/src/eoa/nonce_manager.rs @@ -0,0 +1,550 @@ +use alloy::primitives::{Address, B256, U256}; +use engine_core::error::EngineError; +use serde::{Deserialize, Serialize}; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use thiserror::Error; +use twmq::redis::{AsyncCommands, Pipeline, aio::ConnectionManager}; + +#[derive(Debug, Error)] +pub enum NonceManagerError { + #[error("Redis error: {0}")] + RedisError(#[from] twmq::redis::RedisError), + + #[error("Max in-flight transactions reached for EOA {eoa}: {current}/{max}")] + MaxInFlightReached { + eoa: Address, + current: u32, + max: u32, + }, + + #[error("EOA {eoa} needs sync - no optimistic nonce found")] + NeedsSync { eoa: Address }, + + #[error("Nonce assignment failed: {reason}")] + NonceAssignmentFailed { reason: String }, +} + +impl From for EngineError { + fn from(err: NonceManagerError) -> Self { + EngineError::InternalError { + message: err.to_string(), + } + } +} + +/// Tracks nonce assignment for a specific transaction +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct NonceAssignment { + pub transaction_id: String, + pub transaction_hash: B256, + pub assigned_at: u64, +} + +/// Health tracking for an EOA on a specific chain +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct EoaHealth { + pub in_flight_count: u32, + pub consecutive_errors: u32, + pub last_error_time: u64, + pub last_success_time: u64, + pub recycled_nonce_count: u32, + pub last_sync_time: u64, + pub is_synced: bool, +} + +impl Default for EoaHealth { + fn default() -> Self { + Self { + in_flight_count: 0, + consecutive_errors: 0, + last_error_time: 0, + last_success_time: 0, + recycled_nonce_count: 0, + last_sync_time: 0, + is_synced: false, + } + } +} + +/// Manages nonce assignment and recycling for EOA transactions +pub struct NonceManager { + redis: ConnectionManager, + namespace: Option, + max_in_flight: u32, + max_recycled: u32, +} + +impl NonceManager { + pub fn new( + redis: ConnectionManager, + namespace: Option, + max_in_flight: u32, + max_recycled: u32, + ) -> Self { + Self { + redis, + namespace, + max_in_flight, + max_recycled, + } + } + + // Redis key naming methods with proper EOA namespacing + fn eoa_key(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{}:eoa:{}:{}", ns, chain_id, eoa), + None => format!("eoa:{}:{}", chain_id, eoa), + } + } + + fn optimistic_nonce_key(&self, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{}:eoa_nonce:optimistic:{}", ns, chain_id), + None => format!("eoa_nonce:optimistic:{}", chain_id), + } + } + + fn recycled_nonces_key(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{}:eoa_nonce:recycled:{}:{}", ns, chain_id, eoa), + None => format!("eoa_nonce:recycled:{}:{}", chain_id, eoa), + } + } + + fn nonce_assignments_key(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{}:eoa_nonce:assigned:{}:{}", ns, chain_id, eoa), + None => format!("eoa_nonce:assigned:{}:{}", chain_id, eoa), + } + } + + fn onchain_nonce_cache_key(&self, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{}:eoa_nonce:onchain:{}", ns, chain_id), + None => format!("eoa_nonce:onchain:{}", chain_id), + } + } + + fn health_status_key(&self, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{}:eoa_health:{}", ns, chain_id), + None => format!("eoa_health:{}", chain_id), + } + } + + fn epoch_key(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{}:eoa_epoch:{}:{}", ns, chain_id, eoa), + None => format!("eoa_epoch:{}:{}", chain_id, eoa), + } + } + + fn sync_lock_key(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{}:eoa_sync_lock:{}:{}", ns, chain_id, eoa), + None => format!("eoa_sync_lock:{}:{}", chain_id, eoa), + } + } + + /// Get current health status for an EOA + pub async fn get_eoa_health( + &self, + eoa: Address, + chain_id: u64, + ) -> Result { + let mut conn = self.redis.clone(); + let health_key = self.health_status_key(chain_id); + let eoa_field = eoa.to_string(); + + let health_json: Option = conn.hget(&health_key, &eoa_field).await?; + + match health_json { + Some(json) => Ok(serde_json::from_str(&json).unwrap_or_default()), + None => Ok(EoaHealth::default()), + } + } + + /// Update health status for an EOA + pub async fn update_eoa_health( + &self, + eoa: Address, + chain_id: u64, + health: &EoaHealth, + ) -> Result<(), NonceManagerError> { + let mut conn = self.redis.clone(); + let health_key = self.health_status_key(chain_id); + let eoa_field = eoa.to_string(); + let health_json = serde_json::to_string(health).unwrap(); + + let _: () = conn.hset(&health_key, &eoa_field, health_json).await?; + Ok(()) + } + + /// Atomic nonce assignment using cached onchain nonce with epoch-based recycling protection + pub async fn assign_nonce( + &self, + eoa: Address, + chain_id: u64, + ) -> Result<(u64, u64), NonceManagerError> { + let script = twmq::redis::Script::new( + r#" + local eoa = ARGV[1] + local max_recycled = tonumber(ARGV[2]) + local max_in_flight = tonumber(ARGV[3]) + local now = tonumber(ARGV[4]) + + local optimistic_nonce_key = KEYS[1] + local recycled_nonces_key = KEYS[2] + local health_key = KEYS[3] + local epoch_key = KEYS[4] + local onchain_cache_key = KEYS[5] + + -- Get current epoch (or initialize) + local current_epoch = redis.call('GET', epoch_key) + if not current_epoch then + current_epoch = tostring(now) + redis.call('SET', epoch_key, current_epoch) + end + + -- Derive recycled count + local recycled_count = redis.call('ZCARD', recycled_nonces_key) + + -- Get optimistic nonce + local optimistic_nonce = redis.call('HGET', optimistic_nonce_key, eoa) + if not optimistic_nonce then + -- Not initialized, need sync + return {-3, "needs_sync", "0", current_epoch} + end + optimistic_nonce = tonumber(optimistic_nonce) + + -- Get cached onchain nonce + local onchain_nonce = redis.call('HGET', onchain_cache_key, eoa) + if not onchain_nonce then + -- No cached onchain nonce, need sync + return {-3, "needs_sync", "0", current_epoch} + end + onchain_nonce = tonumber(onchain_nonce) + + -- Derive in-flight count + local in_flight_count = math.max(0, optimistic_nonce - onchain_nonce) + + -- Check if recycled count exceeds threshold + if recycled_count > max_recycled then + -- Force reset: increment epoch, clear recycled nonces, trigger resync + local reset_epoch = tostring(now) + + -- Update epoch (this invalidates any stale recycling attempts) + redis.call('SET', epoch_key, reset_epoch) + + -- Clear all recycled nonces + redis.call('DEL', recycled_nonces_key) + + -- Clear optimistic nonce to force resync + redis.call('HDEL', optimistic_nonce_key, eoa) + + -- Clear cached onchain nonce to force fresh fetch + redis.call('HDEL', onchain_cache_key, eoa) + + -- Update health to indicate reset occurred + local health_json = redis.call('HGET', health_key, eoa) + local health = {} + if health_json then + health = cjson.decode(health_json) + end + health.last_sync_time = now + health.is_synced = false + redis.call('HSET', health_key, eoa, cjson.encode(health)) + + return {-1, "too_many_recycled_reset", "0", reset_epoch} + end + + -- Check in-flight threshold + if in_flight_count >= max_in_flight then + return {-2, "max_in_flight", tostring(in_flight_count), current_epoch} + end + + -- Try to pop the lowest recycled nonce first + if recycled_count > 0 then + local recycled_nonce = redis.call('ZPOPMIN', recycled_nonces_key) + if #recycled_nonce > 0 then + -- Update health with successful assignment + local health_json = redis.call('HGET', health_key, eoa) + local health = {} + if health_json then + health = cjson.decode(health_json) + end + health.last_success_time = now + redis.call('HSET', health_key, eoa, cjson.encode(health)) + + return {0, recycled_nonce[1], current_epoch} + end + end + + -- No recycled nonce, increment optimistic nonce + local nonce = optimistic_nonce + redis.call('HSET', optimistic_nonce_key, eoa, nonce + 1) + + -- Update health with successful assignment + local health_json = redis.call('HGET', health_key, eoa) + local health = {} + if health_json then + health = cjson.decode(health_json) + end + health.last_success_time = now + redis.call('HSET', health_key, eoa, cjson.encode(health)) + + return {1, tostring(nonce), current_epoch} + "#, + ); + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + + let result: (i32, String, String, String) = script + .key(self.optimistic_nonce_key(chain_id)) + .key(self.recycled_nonces_key(eoa, chain_id)) + .key(self.health_status_key(chain_id)) + .key(self.epoch_key(eoa, chain_id)) + .key(self.onchain_nonce_cache_key(chain_id)) + .arg(eoa.to_string()) + .arg(self.max_recycled) + .arg(self.max_in_flight) + .arg(now) + .invoke_async(&mut self.redis.clone()) + .await?; + + match result.0 { + -1 => { + // Reset occurred due to too many recycled nonces - force resync needed + Err(NonceManagerError::NeedsSync { eoa }) + } + -2 => Err(NonceManagerError::MaxInFlightReached { + eoa, + current: result.2.parse().unwrap_or(0), + max: self.max_in_flight, + }), + -3 => Err(NonceManagerError::NeedsSync { eoa }), + 0 | 1 => { + let nonce: u64 = + result + .1 + .parse() + .map_err(|e| NonceManagerError::NonceAssignmentFailed { + reason: format!("Failed to parse nonce: {}", e), + })?; + let epoch: u64 = + result + .3 + .parse() + .map_err(|e| NonceManagerError::NonceAssignmentFailed { + reason: format!("Failed to parse epoch: {}", e), + })?; + Ok((nonce, epoch)) + } + _ => Err(NonceManagerError::NonceAssignmentFailed { + reason: "Unexpected result from nonce assignment".to_string(), + }), + } + } + + /// Record a nonce assignment for tracking + pub fn add_nonce_assignment_command( + &self, + pipeline: &mut Pipeline, + eoa: Address, + chain_id: u64, + nonce: U256, + transaction_id: &str, + transaction_hash: B256, + ) { + let assignment = NonceAssignment { + transaction_id: transaction_id.to_string(), + transaction_hash, + assigned_at: SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(), + }; + + let assignment_json = serde_json::to_string(&assignment).unwrap(); + let assignments_key = self.nonce_assignments_key(eoa, chain_id); + + pipeline.hset(&assignments_key, nonce.to_string(), assignment_json); + } + + /// Recycle a nonce back to the recycled set + pub fn add_recycle_nonce_command( + &self, + pipeline: &mut Pipeline, + eoa: Address, + chain_id: u64, + nonce: U256, + ) { + let recycled_key = self.recycled_nonces_key(eoa, chain_id); + let health_key = self.health_status_key(chain_id); + + // Add to recycled sorted set (score = nonce value for ordering) + pipeline.zadd(&recycled_key, nonce.to_string(), nonce.to::()); + + // Update health to increment recycled count + pipeline.hincr(&health_key, format!("{}:recycled_nonce_count", eoa), 1); + } + + /// Remove a nonce assignment after successful confirmation + pub fn add_remove_assignment_command( + &self, + pipeline: &mut Pipeline, + eoa: Address, + chain_id: u64, + nonce: U256, + ) { + let assignments_key = self.nonce_assignments_key(eoa, chain_id); + let health_key = self.health_status_key(chain_id); + + pipeline.hdel(&assignments_key, nonce.to_string()); + + // Decrement in-flight count + pipeline.hincr(&health_key, format!("{}:in_flight_count", eoa), -1); + } + + /// Get all nonce assignments for an EOA + pub async fn get_nonce_assignments( + &self, + eoa: Address, + chain_id: u64, + nonce: U256, + ) -> Result, NonceManagerError> { + let mut conn = self.redis.clone(); + let assignments_key = self.nonce_assignments_key(eoa, chain_id); + + let assignment_json: Option = + conn.hget(&assignments_key, nonce.to_string()).await?; + + match assignment_json { + Some(json) => { + let assignment: NonceAssignment = serde_json::from_str(&json).map_err(|e| { + NonceManagerError::NonceAssignmentFailed { + reason: format!("Failed to deserialize assignment: {}", e), + } + })?; + Ok(vec![assignment]) + } + None => Ok(vec![]), + } + } + + /// Attempt to acquire sync lock and sync nonce for an EOA + pub async fn try_sync_nonce( + &self, + eoa: Address, + chain_id: u64, + onchain_nonce: u64, + ) -> Result { + let script = twmq::redis::Script::new( + r#" + local eoa = ARGV[1] + local onchain_nonce = tonumber(ARGV[2]) + local now = tonumber(ARGV[3]) + + local optimistic_nonce_key = KEYS[1] + local recycled_nonces_key = KEYS[2] + local health_key = KEYS[3] + local onchain_cache_key = KEYS[4] + local sync_lock_key = KEYS[5] + local epoch_key = KEYS[6] + + -- Try to acquire sync lock (60 second expiry) + local lock_acquired = redis.call('SET', sync_lock_key, now, 'NX', 'EX', '60') + if not lock_acquired then + -- Another process is syncing + return {0, "sync_in_progress"} + end + + -- Successfully acquired lock, perform sync + -- Clear recycled nonces and reset optimistic nonce + redis.call('DEL', recycled_nonces_key) + redis.call('HSET', optimistic_nonce_key, eoa, onchain_nonce) + redis.call('HSET', onchain_cache_key, eoa, onchain_nonce) + + -- Update epoch to invalidate any stale recycling attempts + local new_epoch = tostring(now) + redis.call('SET', epoch_key, new_epoch) + + -- Update health status + local health_json = redis.call('HGET', health_key, eoa) + local health = {} + if health_json then + health = cjson.decode(health_json) + end + health.is_synced = true + health.last_sync_time = now + health.consecutive_errors = 0 + redis.call('HSET', health_key, eoa, cjson.encode(health)) + + return {1, "synced", new_epoch} + "#, + ); + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + + let result: (i32, String, Option) = script + .key(self.optimistic_nonce_key(chain_id)) + .key(self.recycled_nonces_key(eoa, chain_id)) + .key(self.health_status_key(chain_id)) + .key(self.onchain_nonce_cache_key(chain_id)) + .key(self.sync_lock_key(eoa, chain_id)) + .key(self.epoch_key(eoa, chain_id)) + .arg(eoa.to_string()) + .arg(onchain_nonce) + .arg(now) + .invoke_async(&mut self.redis.clone()) + .await?; + + Ok(result.0 == 1) + } + + /// Get cached onchain nonce + pub async fn get_cached_onchain_nonce( + &self, + eoa: Address, + chain_id: u64, + ) -> Result, NonceManagerError> { + let mut conn = self.redis.clone(); + let cache_key = self.onchain_nonce_cache_key(chain_id); + + let nonce_str: Option = conn.hget(&cache_key, eoa.to_string()).await?; + + match nonce_str { + Some(s) => { + let nonce = + s.parse::() + .map_err(|e| NonceManagerError::NonceAssignmentFailed { + reason: format!("Failed to parse cached nonce: {}", e), + })?; + Ok(Some(nonce)) + } + None => Ok(None), + } + } + + /// Update cached onchain nonce + pub async fn update_cached_onchain_nonce( + &self, + eoa: Address, + chain_id: u64, + nonce: u64, + ) -> Result<(), NonceManagerError> { + let mut conn = self.redis.clone(); + let cache_key = self.onchain_nonce_cache_key(chain_id); + + let _: () = conn + .hset(&cache_key, eoa.to_string(), nonce.to_string()) + .await?; + Ok(()) + } +} diff --git a/executors/src/eoa/send.rs b/executors/src/eoa/send.rs new file mode 100644 index 0000000..10ca37c --- /dev/null +++ b/executors/src/eoa/send.rs @@ -0,0 +1,783 @@ +use alloy::consensus::Transaction; +use alloy::network::TransactionBuilder; +use alloy::primitives::{Address, B256, Bytes, U256}; +use alloy::providers::Provider; +use alloy::rpc::types::TransactionRequest as AlloyTransactionRequest; +use alloy::transports::{RpcError, TransportErrorKind}; +use engine_core::credentials::SigningCredential; +use engine_core::{ + chain::{Chain, ChainService, RpcCredentials}, + error::{AlloyRpcErrorToEngineError, EngineError}, + execution_options::WebhookOptions, + signer::{AccountSigner, EoaSigner, EoaSigningOptions}, +}; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use std::{sync::Arc, time::Duration}; +use twmq::{ + FailHookData, NackHookData, Queue, SuccessHookData, UserCancellable, + error::TwmqError, + hooks::TransactionContext, + job::{BorrowedJob, DelayOptions, JobResult, RequeuePosition, ToJobError, ToJobResult}, +}; + +use crate::{ + transaction_registry::TransactionRegistry, + webhook::{ + WebhookJobHandler, + envelope::{ExecutorStage, HasTransactionMetadata, HasWebhookOptions, WebhookCapable}, + }, +}; + +use super::{ + confirm::{EoaConfirmationHandler, EoaConfirmationJobData}, + error_classifier::{EoaErrorMapper, EoaExecutionError, RecoveryStrategy}, + nonce_manager::NonceManager, +}; + +// --- Job Payload --- +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct EoaSendJobData { + pub transaction_id: String, + pub chain_id: u64, + pub from: Address, + pub to: Option
, + pub value: U256, + pub data: Bytes, + pub webhook_options: Option>, + + pub assigned_nonce: Option, + + pub gas_limit: Option, + + pub signing_credential: SigningCredential, + pub rpc_credentials: RpcCredentials, +} + +impl HasWebhookOptions for EoaSendJobData { + fn webhook_options(&self) -> Option> { + self.webhook_options.clone() + } +} + +impl HasTransactionMetadata for EoaSendJobData { + fn transaction_id(&self) -> String { + self.transaction_id.clone() + } +} + +// --- Success Result --- +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct EoaSendResult { + pub transaction_hash: B256, + pub nonce_used: u64, + pub gas_limit: U256, + pub max_fee_per_gas: U256, + pub max_priority_fee_per_gas: U256, + pub possibly_duplicate: Option, +} + +// --- Error Types --- +#[derive(Serialize, Deserialize, Debug, Clone, thiserror::Error)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE", tag = "errorCode")] +pub enum EoaSendError { + #[error("Chain service error for chainId {chain_id}: {message}")] + ChainServiceError { chain_id: u64, message: String }, + + #[error("Transaction simulation failed: {message}")] + SimulationFailed { + message: String, + revert_reason: Option, + revert_data: Option, + }, + + #[error("Gas estimation failed: {message}")] + GasEstimationFailed { message: String }, + + #[error("Fee estimation failed: {message}")] + FeeEstimationFailed { + message: String, + inner_error: EngineError, + }, + + #[error("Nonce assignment failed: {reason}")] + NonceAssignmentFailed { reason: String }, + + #[error("Max in-flight transactions reached for EOA {eoa}: {current}/{max}")] + MaxInFlightReached { + eoa: Address, + current: u32, + max: u32, + }, + + #[error("Transaction send failed: {message}")] + SendFailed { + nonce_used: U256, + message: String, + possibly_sent: bool, // true for "nonce too low" errors + should_retry: bool, + }, + + #[error("EOA health check failed: {reason}")] + UnhealthyEoa { + eoa: Address, + reason: String, + should_resync: bool, + }, + + #[error("Invalid RPC Credentials: {message}")] + InvalidRpcCredentials { message: String }, + + #[error("Internal error: {message}")] + InternalError { message: String }, + + #[error("Transaction cancelled by user")] + UserCancelled, +} + +impl From for EoaSendError { + fn from(error: TwmqError) -> Self { + EoaSendError::InternalError { + message: format!("Queue error: {}", error), + } + } +} + +impl UserCancellable for EoaSendError { + fn user_cancelled() -> Self { + EoaSendError::UserCancelled + } +} + +impl EoaSendError { + /// Returns true if the nonce might have been consumed (used) by this error + pub fn possibly_sent(&self) -> bool { + match self { + EoaSendError::SendFailed { possibly_sent, .. } => *possibly_sent, + _ => false, + } + } +} + +// --- Handler --- +pub struct EoaSendHandler +where + CS: ChainService + Send + Sync + 'static, +{ + pub chain_service: Arc, + pub nonce_manager: Arc, + pub webhook_queue: Arc>, + pub confirm_queue: Arc>>, + pub transaction_registry: Arc, + pub max_in_flight: u32, +} + +impl ExecutorStage for EoaSendHandler +where + CS: ChainService + Send + Sync + 'static, +{ + fn executor_name() -> &'static str { + "eoa" + } + + fn stage_name() -> &'static str { + "send" + } +} + +impl WebhookCapable for EoaSendHandler +where + CS: ChainService + Send + Sync + 'static, +{ + fn webhook_queue(&self) -> &Arc> { + &self.webhook_queue + } +} + +impl twmq::DurableExecution for EoaSendHandler +where + CS: ChainService + Send + Sync + 'static, +{ + type Output = EoaSendResult; + type ErrorData = EoaSendError; + type JobData = EoaSendJobData; + + #[tracing::instrument(skip(self, job), fields(transaction_id = job.job.id, stage = Self::stage_name(), executor = Self::executor_name()))] + async fn process( + &self, + job: &BorrowedJob, + ) -> JobResult { + let job_data = &job.job.data; + + // 1. Get Chain + let chain = self + .chain_service + .get_chain(job_data.chain_id) + .map_err(|e| EoaSendError::ChainServiceError { + chain_id: job_data.chain_id, + message: format!("Failed to get chain instance: {}", e), + }) + .map_err_fail()?; + + let chain_auth_headers = job_data + .rpc_credentials + .to_header_map() + .map_err(|e| EoaSendError::InvalidRpcCredentials { + message: e.to_string(), + }) + .map_err_fail()?; + + let chain = chain.with_new_default_headers(chain_auth_headers); + + // 2. Build base transaction request + let mut tx_request = AlloyTransactionRequest::default() + .with_from(job_data.from) + .with_value(job_data.value) + .with_input(job_data.data.clone()) + .with_chain_id(job_data.chain_id); + + if let Some(to) = job_data.to { + tx_request = tx_request.with_to(to); + } + + tx_request = self + .estimate_gas_fees(&chain, tx_request) + .await + .map_err_fail()?; + + // 4. Estimate gas limit if not provided (this also simulates) + if let Some(gas_limit) = job_data.gas_limit { + tx_request = tx_request.with_gas_limit(gas_limit); + } else { + match chain.provider().estimate_gas(tx_request).await { + Ok(gas) => { + let gas_with_buffer = gas * 110 / 100; // Add 10% buffer + tx_request = tx_request.with_gas_limit(gas_with_buffer); + } + Err(rpc_error) => { + return self.handle_simulation_error(rpc_error, &chain).await; + } + } + } + + // 5. Assign nonce (atomic operation with sync fallback) + let assigned_nonce = if let Some(nonce) = job_data.assigned_nonce { + // Retry with previously assigned nonce + nonce + } else { + // First attempt - assign new nonce + match self + .nonce_manager + .assign_nonce(job_data.from, job_data.chain_id) + .await + { + Ok((nonce, _epoch)) => nonce, + Err(super::nonce_manager::NonceManagerError::NeedsSync { .. }) => { + // Need to sync - try to acquire sync lock and sync + return self.handle_sync_required(job_data, &chain).await; + } + Err(e) => { + return self.handle_nonce_assignment_error(e).await; + } + } + }; + + // 6. Apply nonce to final transaction + let final_tx = tx_request + .with_nonce(assigned_nonce) + .build_typed_tx() + .map_err(|e| EoaSendError::InternalError { + message: format!("Failed to build typed transaction: {}", json!(e)), + }) + .map_err_fail()?; + + let gas_limit = final_tx.gas_limit(); + let max_fee_per_gas = final_tx.max_fee_per_gas(); + let max_priority_fee_per_gas = final_tx.max_priority_fee_per_gas(); + + tracing::debug!( + nonce = %assigned_nonce, + gas_limit = %gas_limit, + max_fee_per_gas = %max_fee_per_gas, + max_priority_fee_per_gas = ?max_priority_fee_per_gas, + "Sending transaction" + ); + + // 7. Send transaction + match chain.provider().send_transaction(final_tx).await { + Ok(pending_tx) => { + let tx_hash = *pending_tx.tx_hash(); + + tracing::info!( + transaction_hash = %tx_hash, + nonce = %assigned_nonce, + "Transaction sent successfully" + ); + + Ok(EoaSendResult { + transaction_hash: tx_hash, + nonce_used: assigned_nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + possibly_duplicate: None, + }) + } + Err(send_error) => { + self.handle_send_error(send_error, assigned_nonce, &chain) + .await + } + } + } + + async fn on_success( + &self, + job: &BorrowedJob, + success_data: SuccessHookData<'_, Self::Output>, + tx: &mut TransactionContext<'_>, + ) { + // 1. Record nonce assignment for tracking + self.nonce_manager.add_nonce_assignment_command( + tx.pipeline(), + job.job.data.from, + job.job.data.chain_id, + success_data.result.nonce_used, + &job.job.data.transaction_id, + success_data.result.transaction_hash, + ); + + // 2. Update transaction registry: move from send to confirm queue + self.transaction_registry.add_set_command( + tx.pipeline(), + &job.job.data.transaction_id, + "eoa_confirm", + ); + + // 3. Queue confirmation job + let confirm_job = self + .confirm_queue + .clone() + .job(EoaConfirmationJobData { + transaction_id: job.job.data.transaction_id.clone(), + chain_id: job.job.data.chain_id, + eoa_address: job.job.data.from, + nonce: success_data.result.nonce_used, + transaction_hash: success_data.result.transaction_hash, + webhook_options: job.job.data.webhook_options.clone(), + rpc_credentials: job.job.data.rpc_credentials.clone(), + }) + .with_id(&job.job.data.transaction_id) + .with_delay(DelayOptions { + delay: Duration::from_secs(2), + position: RequeuePosition::Last, + }); + + if let Err(e) = tx.queue_job(confirm_job) { + tracing::error!( + transaction_id = %job.job.data.transaction_id, + error = %e, + "Failed to queue confirmation job" + ); + } + + if let Err(e) = self.queue_success_webhook(job, success_data, tx) { + tracing::error!( + transaction_id = %job.job.data.transaction_id, + error = %e, + "Failed to queue success webhook" + ); + } + } + + async fn on_nack( + &self, + job: &BorrowedJob, + nack_data: NackHookData<'_, Self::ErrorData>, + tx: &mut TransactionContext<'_>, + ) { + // Update health on nack (increment error count) + if matches!( + nack_data.error, + EoaSendError::SimulationFailed { .. } + | EoaSendError::SendFailed { .. } + | EoaSendError::UnhealthyEoa { .. } + ) { + // TODO: Update health error counters + } + + if let Err(e) = self.queue_nack_webhook(job, nack_data, tx) { + tracing::error!( + transaction_id = %job.job.data.transaction_id, + error = %e, + "Failed to queue nack webhook" + ); + } + } + + async fn on_fail( + &self, + job: &BorrowedJob, + fail_data: FailHookData<'_, Self::ErrorData>, + tx: &mut TransactionContext<'_>, + ) { + // Handle nonce recycling based on error type + if let Some(nonce) = job.job.data.assigned_nonce { + let should_recycle = match fail_data.error { + EoaSendError::SendFailed { possibly_sent, .. } => !possibly_sent, + EoaSendError::SimulationFailed { .. } => true, + EoaSendError::GasEstimationFailed { .. } => true, + EoaSendError::NonceAssignmentFailed { .. } => false, // Nonce wasn't assigned + EoaSendError::MaxInFlightReached { .. } => false, // Nonce wasn't assigned + EoaSendError::UnhealthyEoa { should_resync, .. } => *should_resync, + _ => false, + }; + + if should_recycle { + tracing::debug!( + nonce = %nonce, + transaction_id = %job.job.data.transaction_id, + "Recycling nonce after permanent failure" + ); + + self.nonce_manager.add_recycle_nonce_command( + tx.pipeline(), + job.job.data.from, + job.job.data.chain_id, + nonce, + ); + } + } + + // Handle sync triggering for health issues + if let EoaSendError::UnhealthyEoa { + should_resync: true, + .. + } = fail_data.error + { + // TODO: Trigger sync operation + tracing::warn!( + eoa = %job.job.data.from, + chain_id = job.job.data.chain_id, + "EOA health issue detected - sync recommended" + ); + } + + // Remove transaction from registry since it failed permanently + self.transaction_registry + .add_remove_command(tx.pipeline(), &job.job.data.transaction_id); + + if let Err(e) = self.queue_fail_webhook(job, fail_data, tx) { + tracing::error!( + transaction_id = %job.job.data.transaction_id, + error = %e, + "Failed to queue fail webhook" + ); + } + } +} + +// --- Error Handling --- + +impl EoaSendHandler +where + CS: ChainService + Send + Sync + 'static, +{ + async fn handle_simulation_error( + &self, + rpc_error: RpcError, + chain: &impl Chain, + ) -> JobResult { + // Check if this is a revert first + if let RpcError::ErrorResp(error_payload) = &rpc_error { + if error_payload.as_revert_data().is_some() { + return Err(EoaSendError::SimulationFailed { + message: format!( + "Transaction reverted during simulation: {}", + error_payload.message + ), + revert_reason: Some(error_payload.message.clone()), + revert_data: None, + } + .fail()); + } + } + + // Try to map actionable errors + match EoaErrorMapper::map_send_error(&rpc_error, chain) { + Ok(eoa_error) => { + let strategy = EoaErrorMapper::get_recovery_strategy(&eoa_error); + eoa_error.to_send_job_result( + &strategy, + || { + // This shouldn't happen for simulation errors, but handle gracefully + EoaSendResult { + transaction_hash: B256::ZERO, + nonce_used: U256::ZERO, + gas_limit: U256::ZERO, + max_fee_per_gas: U256::ZERO, + max_priority_fee_per_gas: U256::ZERO, + possibly_duplicate: None, + } + }, + |reason| EoaSendError::SimulationFailed { + message: reason.clone(), + revert_reason: None, + revert_data: None, + }, + ) + } + Err(engine_error) => { + // Use existing engine error handling + Err(EoaSendError::SimulationFailed { + message: engine_error.to_string(), + revert_reason: None, + revert_data: None, + } + .fail()) + } + } + } + + async fn handle_sync_required( + &self, + job_data: &EoaSendJobData, + chain: &impl Chain, + ) -> JobResult { + tracing::info!( + eoa = %job_data.from, + chain_id = job_data.chain_id, + "Sync required - attempting to sync nonce" + ); + + // Get current onchain nonce + let onchain_nonce = match chain.provider().get_transaction_count(job_data.from).await { + Ok(nonce) => nonce.to::(), + Err(e) => { + // Try to map actionable errors, otherwise use engine error handling + match EoaErrorMapper::map_send_error(&e, chain) { + Ok(eoa_error) => { + let strategy = EoaErrorMapper::get_recovery_strategy(&eoa_error); + return eoa_error.to_send_job_result( + &strategy, + || EoaSendResult { + transaction_hash: B256::ZERO, + nonce_used: U256::ZERO, + gas_limit: U256::ZERO, + max_fee_per_gas: U256::ZERO, + max_priority_fee_per_gas: U256::ZERO, + possibly_duplicate: None, + }, + |reason| EoaSendError::ChainServiceError { + chain_id: job_data.chain_id, + message: format!( + "Failed to get onchain nonce for sync: {}", + reason + ), + }, + ); + } + Err(engine_error) => { + // Use existing engine error handling + return Err(EoaSendError::ChainServiceError { + chain_id: job_data.chain_id, + message: format!( + "Failed to get onchain nonce for sync: {}", + engine_error + ), + } + .fail()); + } + } + } + }; + + // Try to acquire sync lock and perform sync + match self + .nonce_manager + .try_sync_nonce(job_data.from, job_data.chain_id, onchain_nonce) + .await + { + Ok(true) => { + // Successfully synced + tracing::info!( + eoa = %job_data.from, + chain_id = job_data.chain_id, + onchain_nonce = %onchain_nonce, + "Successfully synced nonce" + ); + + // Delay and retry the job + Err(EoaSendError::NonceAssignmentFailed { + reason: "Nonce synced - retrying".to_string(), + } + .nack(Some(Duration::from_millis(100)), RequeuePosition::Last)) + } + Ok(false) => { + // Another process is syncing + tracing::debug!( + eoa = %job_data.from, + chain_id = job_data.chain_id, + "Another process is syncing - backing off" + ); + + Err(EoaSendError::NonceAssignmentFailed { + reason: "Sync in progress - backing off".to_string(), + } + .nack(Some(Duration::from_secs(2)), RequeuePosition::Last)) + } + Err(e) => { + tracing::error!( + eoa = %job_data.from, + chain_id = job_data.chain_id, + error = %e, + "Failed to sync nonce" + ); + + Err(EoaSendError::NonceAssignmentFailed { + reason: format!("Sync failed: {}", e), + } + .nack(Some(Duration::from_secs(10)), RequeuePosition::Last)) + } + } + } + + async fn handle_nonce_assignment_error( + &self, + nonce_error: super::nonce_manager::NonceManagerError, + ) -> JobResult { + use super::nonce_manager::NonceManagerError; + + match nonce_error { + NonceManagerError::MaxInFlightReached { eoa, current, max } => { + Err(EoaSendError::MaxInFlightReached { eoa, current, max } + .nack(Some(Duration::from_secs(10)), RequeuePosition::Last)) + } + NonceManagerError::NeedsSync { eoa } => { + // This shouldn't happen since we handle it above, but just in case + Err(EoaSendError::NonceAssignmentFailed { + reason: format!("Unexpected needs sync for EOA {}", eoa), + } + .nack(Some(Duration::from_secs(30)), RequeuePosition::Last)) + } + _ => Err(EoaSendError::NonceAssignmentFailed { + reason: nonce_error.to_string(), + } + .fail()), + } + } + + async fn handle_send_error( + &self, + send_error: RpcError, + nonce: U256, + chain: &impl Chain, + ) -> JobResult { + // Try to map actionable errors, otherwise use engine error handling + match EoaErrorMapper::map_send_error(&send_error, chain) { + Ok(eoa_error) => { + let strategy = EoaErrorMapper::get_recovery_strategy(&eoa_error); + + tracing::debug!( + nonce = %nonce, + error = ?eoa_error, + strategy = ?strategy, + "Mapped send error" + ); + + if strategy.queue_confirmation { + tracing::warn!(nonce = %nonce, message = %eoa_error.message(), "Transaction possibly sent - treating as success"); + + Ok(EoaSendResult { + transaction_hash: B256::ZERO, // Will be resolved in confirmation + nonce_used: nonce, + gas_limit: U256::ZERO, + max_fee_per_gas: U256::ZERO, + max_priority_fee_per_gas: U256::ZERO, + possibly_duplicate: Some(true), + }) + } else { + eoa_error.to_send_job_result( + &strategy, + || EoaSendResult { + transaction_hash: B256::ZERO, + nonce_used: nonce, + gas_limit: U256::ZERO, + max_fee_per_gas: U256::ZERO, + max_priority_fee_per_gas: U256::ZERO, + possibly_duplicate: None, + }, + |reason| EoaSendError::SendFailed { + nonce_used: nonce, + message: reason, + possibly_sent: strategy.queue_confirmation, + should_retry: strategy.retryable, + }, + ) + } + } + Err(engine_error) => { + // Use existing engine error handling - not actionable + tracing::debug!( + nonce = %nonce, + engine_error = ?engine_error, + "Using engine error handling for non-actionable error" + ); + + Err(EoaSendError::SendFailed { + nonce_used: nonce, + message: engine_error.to_string(), + possibly_sent: false, + should_retry: false, + } + .fail()) + } + } + } + + async fn estimate_gas_fees( + &self, + chain: &impl Chain, + tx: AlloyTransactionRequest, + ) -> Result { + // Try EIP-1559 fees first, fall back to legacy if unsupported + match chain.provider().estimate_eip1559_fees().await { + Ok(eip1559_fees) => { + tracing::debug!( + "Using EIP-1559 fees: max_fee={}, max_priority_fee={}", + eip1559_fees.max_fee_per_gas, + eip1559_fees.max_priority_fee_per_gas + ); + Ok(tx + .with_max_fee_per_gas(eip1559_fees.max_fee_per_gas) + .with_max_priority_fee_per_gas(eip1559_fees.max_priority_fee_per_gas)) + } + Err(eip1559_error) => { + // Check if this is an "unsupported feature" error + if let RpcError::UnsupportedFeature(_) = &eip1559_error { + tracing::debug!("EIP-1559 not supported, falling back to legacy gas price"); + + // Fall back to legacy gas price + match chain.provider().get_gas_price().await { + Ok(gas_price) => { + // For legacy transactions, use the gas price + tracing::debug!("Using legacy gas price: {}", gas_price); + Ok(tx.with_gas_price(gas_price)) + } + Err(legacy_error) => Err(EoaSendError::FeeEstimationFailed { + message: format!("Failed to get legacy gas price: {}", legacy_error), + inner_error: legacy_error.to_engine_error(chain), + }), + } + } else { + // Other EIP-1559 error + Err(EoaSendError::FeeEstimationFailed { + message: format!("Failed to estimate EIP-1559 fees: {}", eip1559_error), + inner_error: eip1559_error.to_engine_error(chain), + }) + } + } + } + } +} diff --git a/executors/src/eoa/transaction_store.rs b/executors/src/eoa/transaction_store.rs new file mode 100644 index 0000000..670449f --- /dev/null +++ b/executors/src/eoa/transaction_store.rs @@ -0,0 +1,445 @@ +use alloy::consensus::Transaction; +use alloy::network::AnyTransactionReceipt; +use alloy::primitives::{Address, B256, U256}; +use alloy::rpc::types::{TransactionReceipt, TransactionRequest}; +use serde::{Deserialize, Serialize}; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use thiserror::Error; +use twmq::redis::{AsyncCommands, Pipeline, aio::ConnectionManager}; + +#[derive(Debug, Error)] +pub enum TransactionStoreError { + #[error("Redis error: {0}")] + RedisError(#[from] twmq::redis::RedisError), + + #[error("Serialization error: {0}")] + SerializationError(#[from] serde_json::Error), + + #[error("Transaction not found: {transaction_id}")] + TransactionNotFound { transaction_id: String }, +} + +/// Initial transaction data from user request +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TransactionData { + pub transaction_id: String, + pub eoa: Address, + pub chain_id: u64, + pub to: Option
, + pub value: U256, + pub data: Vec, + pub gas_limit: Option, + pub created_at: u64, +} + +/// Active attempt for a transaction (full alloy transaction + metadata) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ActiveAttempt { + pub transaction_id: String, + pub nonce: U256, + pub transaction_hash: B256, + pub alloy_transaction: TransactionRequest, // Full serializable alloy transaction + pub sent_at: u64, + pub attempt_number: u32, +} + +impl ActiveAttempt { + /// Get the queue job ID for this attempt (includes attempt number) + pub fn queue_job_id(&self) -> String { + format!("{}_{}", self.transaction_id, self.attempt_number) + } +} + +/// Confirmation data for a successful transaction +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ConfirmationData { + pub transaction_hash: B256, + pub confirmed_at: u64, + pub receipt: AnyTransactionReceipt, +} + +/// Transaction store focused on transaction_id operations and nonce indexing +pub struct TransactionStore { + pub redis: ConnectionManager, + pub namespace: Option, +} + +impl TransactionStore { + pub fn new(redis: ConnectionManager, namespace: Option) -> Self { + Self { redis, namespace } + } + + // Redis key methods + fn transaction_data_key(&self, transaction_id: &str) -> String { + match &self.namespace { + Some(ns) => format!("{}:eoa_tx_data:{}", ns, transaction_id), + None => format!("eoa_tx_data:{}", transaction_id), + } + } + + fn active_attempt_key(&self, transaction_id: &str) -> String { + match &self.namespace { + Some(ns) => format!("{}:eoa_active_attempt:{}", ns, transaction_id), + None => format!("eoa_active_attempt:{}", transaction_id), + } + } + + fn nonce_to_transactions_key(&self, eoa: Address, chain_id: u64, nonce: U256) -> String { + match &self.namespace { + Some(ns) => format!("{}:eoa_nonce_to_txs:{}:{}:{}", ns, chain_id, eoa, nonce), + None => format!("eoa_nonce_to_txs:{}:{}:{}", chain_id, eoa, nonce), + } + } + + fn eoa_active_transactions_key(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{}:eoa_active_txs:{}:{}", ns, chain_id, eoa), + None => format!("eoa_active_txs:{}:{}", chain_id, eoa), + } + } + + fn confirmation_key(&self, transaction_id: &str) -> String { + match &self.namespace { + Some(ns) => format!("{}:eoa_confirmation:{}", ns, transaction_id), + None => format!("eoa_confirmation:{}", transaction_id), + } + } + + fn attempt_counter_key(&self, transaction_id: &str) -> String { + match &self.namespace { + Some(ns) => format!("{}:eoa_attempt_counter:{}", ns, transaction_id), + None => format!("eoa_attempt_counter:{}", transaction_id), + } + } + + /// Store initial transaction data + pub async fn store_transaction_data( + &self, + transaction_data: &TransactionData, + ) -> Result<(), TransactionStoreError> { + let mut conn = self.redis.clone(); + let data_key = self.transaction_data_key(&transaction_data.transaction_id); + let active_key = + self.eoa_active_transactions_key(transaction_data.eoa, transaction_data.chain_id); + + let data_json = serde_json::to_string(transaction_data)?; + + // Store transaction data + let _: () = conn.set(&data_key, data_json).await?; + + // Add to active transactions set + let _: () = conn + .sadd(&active_key, &transaction_data.transaction_id) + .await?; + + Ok(()) + } + + /// Get initial transaction data by transaction ID + pub async fn get_transaction_data( + &self, + transaction_id: &str, + ) -> Result, TransactionStoreError> { + let mut conn = self.redis.clone(); + let data_key = self.transaction_data_key(transaction_id); + + let data_json: Option = conn.get(&data_key).await?; + + match data_json { + Some(json) => { + let data: TransactionData = serde_json::from_str(&json)?; + Ok(Some(data)) + } + None => Ok(None), + } + } + + /// Add/update active attempt for a transaction + pub async fn add_active_attempt( + &self, + attempt: &ActiveAttempt, + ) -> Result<(), TransactionStoreError> { + let mut conn = self.redis.clone(); + let attempt_key = self.active_attempt_key(&attempt.transaction_id); + + // Get transaction data to determine EOA and chain_id for indexing + let tx_data = self + .get_transaction_data(&attempt.transaction_id) + .await? + .ok_or_else(|| TransactionStoreError::TransactionNotFound { + transaction_id: attempt.transaction_id.clone(), + })?; + + let nonce_key = + self.nonce_to_transactions_key(tx_data.eoa, tx_data.chain_id, attempt.nonce); + let counter_key = self.attempt_counter_key(&attempt.transaction_id); + + let attempt_json = serde_json::to_string(attempt)?; + + // Store active attempt + let _: () = conn.set(&attempt_key, attempt_json).await?; + + // Index by nonce (multiple transactions can compete for same nonce) + let _: () = conn.sadd(&nonce_key, &attempt.transaction_id).await?; + + // Increment attempt counter for observability + let _: () = conn.incr(&counter_key, 1).await?; + + Ok(()) + } + + /// Get active attempt for a transaction + pub async fn get_active_attempt( + &self, + transaction_id: &str, + ) -> Result, TransactionStoreError> { + let mut conn = self.redis.clone(); + let attempt_key = self.active_attempt_key(transaction_id); + + let attempt_json: Option = conn.get(&attempt_key).await?; + + match attempt_json { + Some(json) => { + let attempt: ActiveAttempt = serde_json::from_str(&json)?; + Ok(Some(attempt)) + } + None => Ok(None), + } + } + + /// Get all transaction IDs competing for a specific nonce + pub async fn get_transactions_by_nonce( + &self, + eoa: Address, + chain_id: u64, + nonce: U256, + ) -> Result, TransactionStoreError> { + let mut conn = self.redis.clone(); + let nonce_key = self.nonce_to_transactions_key(eoa, chain_id, nonce); + + let transaction_ids: Vec = conn.smembers(&nonce_key).await?; + Ok(transaction_ids) + } + + /// Get all active transaction IDs for an EOA + pub async fn get_active_transactions( + &self, + eoa: Address, + chain_id: u64, + ) -> Result, TransactionStoreError> { + let mut conn = self.redis.clone(); + let active_key = self.eoa_active_transactions_key(eoa, chain_id); + + let transaction_ids: Vec = conn.smembers(&active_key).await?; + Ok(transaction_ids) + } + + /// Get all sent transactions (have active attempts) for an EOA + pub async fn get_sent_transactions( + &self, + eoa: Address, + chain_id: u64, + ) -> Result, TransactionStoreError> { + let transaction_ids = self.get_active_transactions(eoa, chain_id).await?; + + let mut sent_transactions = Vec::new(); + for transaction_id in transaction_ids { + if let Some(attempt) = self.get_active_attempt(&transaction_id).await? { + sent_transactions.push((transaction_id, attempt)); + } + } + + Ok(sent_transactions) + } + + /// Mark transaction as confirmed and clean up + pub async fn mark_transaction_confirmed( + &self, + transaction_id: &str, + confirmation_data: &ConfirmationData, + ) -> Result<(), TransactionStoreError> { + let mut conn = self.redis.clone(); + let confirmation_key = self.confirmation_key(transaction_id); + + // Get transaction data to determine EOA and chain_id + let tx_data = self + .get_transaction_data(transaction_id) + .await? + .ok_or_else(|| TransactionStoreError::TransactionNotFound { + transaction_id: transaction_id.to_string(), + })?; + + let active_key = self.eoa_active_transactions_key(tx_data.eoa, tx_data.chain_id); + let attempt_key = self.active_attempt_key(transaction_id); + + // Get current attempt to clean up nonce index + if let Some(attempt) = self.get_active_attempt(transaction_id).await? { + let nonce_key = + self.nonce_to_transactions_key(tx_data.eoa, tx_data.chain_id, attempt.nonce); + let _: () = conn.srem(&nonce_key, transaction_id).await?; + } + + // Store confirmation data + let confirmation_json = serde_json::to_string(confirmation_data)?; + let _: () = conn.set(&confirmation_key, confirmation_json).await?; + + // Remove from active set + let _: () = conn.srem(&active_key, transaction_id).await?; + + // Remove active attempt + let _: () = conn.del(&attempt_key).await?; + + Ok(()) + } + + /// Mark transaction as failed and clean up + pub async fn mark_transaction_failed( + &self, + transaction_id: &str, + error_message: &str, + ) -> Result<(), TransactionStoreError> { + let mut conn = self.redis.clone(); + + // Get transaction data to determine EOA and chain_id + let tx_data = self + .get_transaction_data(transaction_id) + .await? + .ok_or_else(|| TransactionStoreError::TransactionNotFound { + transaction_id: transaction_id.to_string(), + })?; + + let active_key = self.eoa_active_transactions_key(tx_data.eoa, tx_data.chain_id); + let attempt_key = self.active_attempt_key(transaction_id); + + // Get current attempt to clean up nonce index + if let Some(attempt) = self.get_active_attempt(transaction_id).await? { + let nonce_key = + self.nonce_to_transactions_key(tx_data.eoa, tx_data.chain_id, attempt.nonce); + let _: () = conn.srem(&nonce_key, transaction_id).await?; + } + + // Remove from active set + let _: () = conn.srem(&active_key, transaction_id).await?; + + // Remove active attempt + let _: () = conn.del(&attempt_key).await?; + + Ok(()) + } + + /// Remove active attempt (for requeuing after race loss) + pub async fn remove_active_attempt( + &self, + transaction_id: &str, + ) -> Result<(), TransactionStoreError> { + let mut conn = self.redis.clone(); + let attempt_key = self.active_attempt_key(transaction_id); + + // Get current attempt to clean up nonce index + if let Some(attempt) = self.get_active_attempt(transaction_id).await? { + let tx_data = self + .get_transaction_data(transaction_id) + .await? + .ok_or_else(|| TransactionStoreError::TransactionNotFound { + transaction_id: transaction_id.to_string(), + })?; + + let nonce_key = + self.nonce_to_transactions_key(tx_data.eoa, tx_data.chain_id, attempt.nonce); + let _: () = conn.srem(&nonce_key, transaction_id).await?; + } + + // Remove active attempt (transaction stays in active set for requeuing) + let _: () = conn.del(&attempt_key).await?; + + Ok(()) + } + + /// Pipeline commands for atomic operations in hooks + pub fn add_store_transaction_command( + &self, + pipeline: &mut Pipeline, + transaction_data: &TransactionData, + ) { + let data_key = self.transaction_data_key(&transaction_data.transaction_id); + let active_key = + self.eoa_active_transactions_key(transaction_data.eoa, transaction_data.chain_id); + + let data_json = serde_json::to_string(transaction_data).unwrap(); + + pipeline.set(&data_key, data_json); + pipeline.sadd(&active_key, &transaction_data.transaction_id); + } + + pub fn add_active_attempt_command( + &self, + pipeline: &mut Pipeline, + attempt: &ActiveAttempt, + eoa: Address, + chain_id: u64, + ) { + let attempt_key = self.active_attempt_key(&attempt.transaction_id); + let nonce_key = self.nonce_to_transactions_key(eoa, chain_id, attempt.nonce); + let counter_key = self.attempt_counter_key(&attempt.transaction_id); + + let attempt_json = serde_json::to_string(attempt).unwrap(); + + pipeline.set(&attempt_key, attempt_json); + pipeline.sadd(&nonce_key, &attempt.transaction_id); + pipeline.incr(&counter_key, 1); + } + + pub fn add_remove_active_attempt_command( + &self, + pipeline: &mut Pipeline, + transaction_id: &str, + eoa: Address, + chain_id: u64, + nonce: U256, + ) { + let attempt_key = self.active_attempt_key(transaction_id); + let nonce_key = self.nonce_to_transactions_key(eoa, chain_id, nonce); + + pipeline.del(&attempt_key); + pipeline.srem(&nonce_key, transaction_id); + } + + pub fn add_mark_confirmed_command( + &self, + pipeline: &mut Pipeline, + transaction_id: &str, + confirmation_data: &ConfirmationData, + eoa: Address, + chain_id: u64, + nonce: U256, + ) { + let confirmation_key = self.confirmation_key(transaction_id); + let active_key = self.eoa_active_transactions_key(eoa, chain_id); + let attempt_key = self.active_attempt_key(transaction_id); + let nonce_key = self.nonce_to_transactions_key(eoa, chain_id, nonce); + + let confirmation_json = serde_json::to_string(confirmation_data).unwrap(); + + pipeline.set(&confirmation_key, confirmation_json); + pipeline.srem(&active_key, transaction_id); + pipeline.del(&attempt_key); + pipeline.srem(&nonce_key, transaction_id); + } + + pub fn add_mark_failed_command( + &self, + pipeline: &mut Pipeline, + transaction_id: &str, + eoa: Address, + chain_id: u64, + nonce: U256, + ) { + let active_key = self.eoa_active_transactions_key(eoa, chain_id); + let attempt_key = self.active_attempt_key(transaction_id); + let nonce_key = self.nonce_to_transactions_key(eoa, chain_id, nonce); + + pipeline.srem(&active_key, transaction_id); + pipeline.del(&attempt_key); + pipeline.srem(&nonce_key, transaction_id); + } +} diff --git a/executors/src/lib.rs b/executors/src/lib.rs index 0e9dd3e..5d9c5b3 100644 --- a/executors/src/lib.rs +++ b/executors/src/lib.rs @@ -1,3 +1,4 @@ +pub mod eoa; pub mod external_bundler; -pub mod webhook; pub mod transaction_registry; +pub mod webhook; From c3263e4e7c2ff72f70956926f8590f1af69b4dfc Mon Sep 17 00:00:00 2001 From: Prithvish Baidya Date: Thu, 3 Jul 2025 18:32:24 +0530 Subject: [PATCH 3/8] wip --- executors/src/eoa/confirm.rs | 592 ------------------- executors/src/eoa/eoa_confirmation_worker.rs | 2 +- executors/src/eoa/mod.rs | 4 +- executors/src/eoa/send.rs | 50 +- 4 files changed, 40 insertions(+), 608 deletions(-) delete mode 100644 executors/src/eoa/confirm.rs diff --git a/executors/src/eoa/confirm.rs b/executors/src/eoa/confirm.rs deleted file mode 100644 index 8eefb79..0000000 --- a/executors/src/eoa/confirm.rs +++ /dev/null @@ -1,592 +0,0 @@ -use alloy::primitives::{Address, B256, U256}; -use alloy::providers::Provider; -use engine_core::{ - chain::{Chain, ChainService, RpcCredentials}, - error::{AlloyRpcErrorToEngineError, EngineError, RpcErrorKind}, - execution_options::WebhookOptions, -}; -use serde::{Deserialize, Serialize}; -use std::{sync::Arc, time::Duration}; -use twmq::{ - FailHookData, NackHookData, Queue, SuccessHookData, UserCancellable, - error::TwmqError, - hooks::TransactionContext, - job::{BorrowedJob, DelayOptions, JobResult, RequeuePosition, ToJobError, ToJobResult}, -}; - -use crate::{ - transaction_registry::TransactionRegistry, - webhook::{ - WebhookJobHandler, - envelope::{ExecutorStage, HasTransactionMetadata, HasWebhookOptions, WebhookCapable}, - }, -}; - -use super::{ - nonce_manager::NonceManager, - send::{EoaSendHandler, EoaSendJobData}, -}; - -// --- Job Payload --- -#[derive(Serialize, Deserialize, Debug, Clone)] -#[serde(rename_all = "camelCase")] -pub struct EoaConfirmationJobData { - pub transaction_id: String, - pub chain_id: u64, - pub eoa_address: Address, - pub nonce: U256, - pub transaction_hash: B256, - pub webhook_options: Option>, - pub rpc_credentials: RpcCredentials, -} - -impl HasWebhookOptions for EoaConfirmationJobData { - fn webhook_options(&self) -> Option> { - self.webhook_options.clone() - } -} - -impl HasTransactionMetadata for EoaConfirmationJobData { - fn transaction_id(&self) -> String { - self.transaction_id.clone() - } -} - -// --- Success Result --- -#[derive(Serialize, Deserialize, Debug, Clone)] -#[serde(rename_all = "camelCase")] -pub struct EoaConfirmationResult { - pub transaction_hash: B256, - pub nonce_confirmed: U256, - pub block_number: U256, - pub block_hash: B256, - pub gas_used: U256, - pub effective_gas_price: U256, - pub status: bool, -} - -// --- Error Types --- -#[derive(Serialize, Deserialize, Debug, Clone, thiserror::Error)] -#[serde(rename_all = "SCREAMING_SNAKE_CASE", tag = "errorCode")] -pub enum EoaConfirmationError { - #[error("Chain service error for chainId {chain_id}: {message}")] - ChainServiceError { chain_id: u64, message: String }, - - #[error("Transaction not yet confirmed")] - NotYetConfirmed, - - #[error("Transaction was replaced by another transaction")] - TransactionReplaced { - original_hash: B256, - replacing_hash: Option, - nonce: U256, - }, - - #[error("Transaction failed with revert")] - TransactionReverted { - transaction_hash: B256, - revert_reason: Option, - }, - - #[error("RPC error during confirmation: {message}")] - RpcError { message: String, retryable: bool }, - - #[error("Nonce conflict detected - multiple transactions for nonce {nonce}")] - NonceConflict { - nonce: U256, - competing_hashes: Vec, - }, - - #[error("Invalid RPC Credentials: {message}")] - InvalidRpcCredentials { message: String }, - - #[error("Internal error: {message}")] - InternalError { message: String }, - - #[error("Transaction cancelled by user")] - UserCancelled, -} - -impl From for EoaConfirmationError { - fn from(error: TwmqError) -> Self { - EoaConfirmationError::InternalError { - message: format!("Queue error: {}", error), - } - } -} - -impl UserCancellable for EoaConfirmationError { - fn user_cancelled() -> Self { - EoaConfirmationError::UserCancelled - } -} - -// --- Handler --- -pub struct EoaConfirmationHandler -where - CS: ChainService + Send + Sync + 'static, -{ - pub chain_service: Arc, - pub nonce_manager: Arc, - pub webhook_queue: Arc>, - pub send_queue: Arc>>, - pub transaction_registry: Arc, - pub max_quick_checks: u32, -} - -impl ExecutorStage for EoaConfirmationHandler -where - CS: ChainService + Send + Sync + 'static, -{ - fn executor_name() -> &'static str { - "eoa" - } - - fn stage_name() -> &'static str { - "confirm" - } -} - -impl WebhookCapable for EoaConfirmationHandler -where - CS: ChainService + Send + Sync + 'static, -{ - fn webhook_queue(&self) -> &Arc> { - &self.webhook_queue - } -} - -impl twmq::DurableExecution for EoaConfirmationHandler -where - CS: ChainService + Send + Sync + 'static, -{ - type Output = EoaConfirmationResult; - type ErrorData = EoaConfirmationError; - type JobData = EoaConfirmationJobData; - - #[tracing::instrument(skip(self, job), fields(transaction_id = job.job.id, stage = Self::stage_name(), executor = Self::executor_name()))] - async fn process( - &self, - job: &BorrowedJob, - ) -> JobResult { - let job_data = &job.job.data; - - // 1. Get Chain - let chain = self - .chain_service - .get_chain(job_data.chain_id) - .map_err(|e| EoaConfirmationError::ChainServiceError { - chain_id: job_data.chain_id, - message: format!("Failed to get chain instance: {}", e), - }) - .map_err_fail()?; - - let chain_auth_headers = job_data - .rpc_credentials - .to_header_map() - .map_err(|e| EoaConfirmationError::InvalidRpcCredentials { - message: e.to_string(), - }) - .map_err_fail()?; - - let chain = chain.with_new_default_headers(chain_auth_headers); - - // 2. Get current onchain nonce - let onchain_nonce = match chain - .provider() - .get_transaction_count(job_data.eoa_address) - .await - { - Ok(nonce) => nonce, - Err(e) => { - let engine_error = e.to_engine_error(&chain); - return Err(EoaConfirmationError::RpcError { - message: format!("Failed to get transaction count: {}", engine_error), - retryable: true, - } - .nack(Some(Duration::from_secs(5)), RequeuePosition::Last)); - } - }; - - // 3. Get cached nonce to detect progression - let last_known_nonce = self - .nonce_manager - .get_cached_onchain_nonce(job_data.eoa_address, job_data.chain_id) - .await - .map_err(|e| EoaConfirmationError::InternalError { - message: format!("Failed to get cached nonce: {}", e), - }) - .map_err_fail()? - .unwrap_or(U256::ZERO); - - tracing::debug!( - nonce = %job_data.nonce, - onchain_nonce = %onchain_nonce, - last_known_nonce = %last_known_nonce, - "Checking confirmation status" - ); - - // 4. If nonce hasn't moved, check specific transaction - if onchain_nonce <= last_known_nonce { - return self.check_specific_transaction(job, &chain).await; - } - - // 5. Nonce moved! Check all in-flight transactions for this range - self.process_nonce_progression(job, &chain, last_known_nonce, onchain_nonce) - .await - } - - async fn on_success( - &self, - job: &BorrowedJob, - success_data: SuccessHookData<'_, Self::Output>, - tx: &mut TransactionContext<'_>, - ) { - // 1. Remove nonce assignment (transaction confirmed) - self.nonce_manager.add_remove_assignment_command( - tx.pipeline(), - job.job.data.eoa_address, - job.job.data.chain_id, - success_data.result.nonce_confirmed, - ); - - // 2. Update cached onchain nonce - let cached_nonce = success_data.result.nonce_confirmed + U256::from(1); - // TODO: Add method to update cached nonce via pipeline - - // 3. Remove from transaction registry (completed) - self.transaction_registry - .add_remove_command(tx.pipeline(), &job.job.data.transaction_id); - - // 4. Reset error counters on success - // TODO: Update health to reset consecutive errors - - if let Err(e) = self.queue_success_webhook(job, success_data, tx) { - tracing::error!( - transaction_id = %job.job.data.transaction_id, - error = %e, - "Failed to queue success webhook" - ); - } - } - - async fn on_nack( - &self, - job: &BorrowedJob, - nack_data: NackHookData<'_, Self::ErrorData>, - tx: &mut TransactionContext<'_>, - ) { - if let Err(e) = self.queue_nack_webhook(job, nack_data, tx) { - tracing::error!( - transaction_id = %job.job.data.transaction_id, - error = %e, - "Failed to queue nack webhook" - ); - } - } - - async fn on_fail( - &self, - job: &BorrowedJob, - fail_data: FailHookData<'_, Self::ErrorData>, - tx: &mut TransactionContext<'_>, - ) { - // Handle different failure types - match fail_data.error { - EoaConfirmationError::TransactionReplaced { nonce, .. } => { - // Transaction was replaced - requeue the original transaction - self.requeue_replaced_transaction(job, *nonce, tx).await; - } - EoaConfirmationError::TransactionReverted { .. } => { - // Transaction reverted - remove nonce assignment and don't requeue - self.nonce_manager.add_remove_assignment_command( - tx.pipeline(), - job.job.data.eoa_address, - job.job.data.chain_id, - job.job.data.nonce, - ); - } - _ => { - // Other failures - remove nonce assignment - self.nonce_manager.add_remove_assignment_command( - tx.pipeline(), - job.job.data.eoa_address, - job.job.data.chain_id, - job.job.data.nonce, - ); - } - } - - // Remove from transaction registry - self.transaction_registry - .add_remove_command(tx.pipeline(), &job.job.data.transaction_id); - - if let Err(e) = self.queue_fail_webhook(job, fail_data, tx) { - tracing::error!( - transaction_id = %job.job.data.transaction_id, - error = %e, - "Failed to queue fail webhook" - ); - } - } -} - -// --- Confirmation Logic --- - -impl EoaConfirmationHandler -where - CS: ChainService + Send + Sync + 'static, -{ - async fn check_specific_transaction( - &self, - job: &BorrowedJob, - chain: &impl Chain, - ) -> JobResult { - let job_data = &job.job.data; - - // Poll for specific transaction receipt - match chain - .provider() - .get_transaction_receipt(job_data.transaction_hash) - .await - { - Ok(Some(receipt)) => { - // Found receipt - check if transaction succeeded - if receipt.status() { - tracing::info!( - transaction_hash = %job_data.transaction_hash, - block_number = %receipt.block_number.unwrap_or_default(), - "Transaction confirmed successfully" - ); - - Ok(EoaConfirmationResult { - transaction_hash: job_data.transaction_hash, - nonce_confirmed: job_data.nonce, - block_number: receipt.block_number.unwrap_or_default(), - block_hash: receipt.block_hash.unwrap_or_default(), - gas_used: receipt.gas_used, - effective_gas_price: receipt.effective_gas_price.unwrap_or_default(), - status: true, - }) - } else { - // Transaction reverted - Err(EoaConfirmationError::TransactionReverted { - transaction_hash: job_data.transaction_hash, - revert_reason: None, // Could extract from logs if needed - } - .fail()) - } - } - Ok(None) => { - // No receipt yet - if job.job.attempts > self.max_quick_checks { - // After max quick checks, switch to slow path (longer delays) - Err(EoaConfirmationError::NotYetConfirmed - .nack(Some(Duration::from_secs(30)), RequeuePosition::Last)) - } else { - // Quick recheck - Err(EoaConfirmationError::NotYetConfirmed - .nack(Some(Duration::from_secs(3)), RequeuePosition::Last)) - } - } - Err(e) => { - let engine_error = e.to_engine_error(chain); - let retryable = matches!(engine_error, - EngineError::RpcError { kind: RpcErrorKind::OtherTransportError(_), .. } | - EngineError::RpcError { kind: RpcErrorKind::ErrorResp(resp), .. } if matches!(resp.code, -32005 | -32603) - ); - - if retryable { - Err(EoaConfirmationError::RpcError { - message: format!("RPC error getting receipt: {}", engine_error), - retryable: true, - } - .nack(Some(Duration::from_secs(5)), RequeuePosition::Last)) - } else { - Err(EoaConfirmationError::RpcError { - message: format!("Failed to get receipt: {}", engine_error), - retryable: false, - } - .fail()) - } - } - } - } - - async fn process_nonce_progression( - &self, - job: &BorrowedJob, - chain: &impl Chain, - last_known_nonce: U256, - onchain_nonce: U256, - ) -> JobResult { - let job_data = &job.job.data; - - tracing::info!( - from_nonce = %last_known_nonce, - to_nonce = %onchain_nonce, - "Processing nonce progression" - ); - - // Check all nonces from last_known to current onchain - for nonce_to_check in last_known_nonce.to::()..onchain_nonce.to::() { - let nonce_u256 = U256::from(nonce_to_check); - - // Get all assignments for this nonce - let assignments = self - .nonce_manager - .get_nonce_assignments(job_data.eoa_address, job_data.chain_id, nonce_u256) - .await - .map_err(|e| EoaConfirmationError::InternalError { - message: format!("Failed to get nonce assignments: {}", e), - }) - .map_err_fail()?; - - for assignment in assignments { - match chain - .provider() - .get_transaction_receipt(assignment.transaction_hash) - .await - { - Ok(Some(receipt)) => { - // Found a receipt for this nonce - if assignment.transaction_id == job_data.transaction_id { - // This job won the race! - if receipt.status() { - tracing::info!( - transaction_hash = %assignment.transaction_hash, - nonce = %nonce_u256, - "Transaction confirmed in nonce progression" - ); - - // Update cached nonce - if let Err(e) = self - .nonce_manager - .update_cached_onchain_nonce( - job_data.eoa_address, - job_data.chain_id, - onchain_nonce, - ) - .await - { - tracing::error!("Failed to update cached nonce: {}", e); - } - - return Ok(EoaConfirmationResult { - transaction_hash: assignment.transaction_hash, - nonce_confirmed: nonce_u256, - block_number: receipt.block_number.unwrap_or_default(), - block_hash: receipt.block_hash.unwrap_or_default(), - gas_used: receipt.gas_used, - effective_gas_price: receipt - .effective_gas_price - .unwrap_or_default(), - status: true, - }); - } else { - // Transaction reverted - return Err(EoaConfirmationError::TransactionReverted { - transaction_hash: assignment.transaction_hash, - revert_reason: None, - } - .fail()); - } - } else { - // Different transaction won - will handle in requeue - tracing::info!( - winning_hash = %assignment.transaction_hash, - losing_transaction_id = %job_data.transaction_id, - nonce = %nonce_u256, - "Different transaction won nonce race" - ); - } - } - Ok(None) => { - // No receipt for this hash yet - if nonce_u256 < onchain_nonce - U256::from(1) { - // Old nonce with no receipt - transaction was likely replaced - tracing::warn!( - transaction_hash = %assignment.transaction_hash, - nonce = %nonce_u256, - "Old transaction with no receipt - likely replaced" - ); - } - } - Err(e) => { - tracing::error!( - transaction_hash = %assignment.transaction_hash, - nonce = %nonce_u256, - error = %e, - "Error getting receipt during nonce progression" - ); - } - } - } - } - - // Update cached nonce regardless - if let Err(e) = self - .nonce_manager - .update_cached_onchain_nonce(job_data.eoa_address, job_data.chain_id, onchain_nonce) - .await - { - tracing::error!("Failed to update cached nonce: {}", e); - } - - // If we get here, our transaction wasn't found in the confirmed range - if job_data.nonce < onchain_nonce { - // Our nonce is old and we didn't find a receipt - transaction was replaced - Err(EoaConfirmationError::TransactionReplaced { - original_hash: job_data.transaction_hash, - replacing_hash: None, - nonce: job_data.nonce, - } - .fail()) - } else { - // Our nonce is current or future - keep waiting - Err(EoaConfirmationError::NotYetConfirmed - .nack(Some(Duration::from_secs(5)), RequeuePosition::Last)) - } - } - - async fn requeue_replaced_transaction( - &self, - job: &BorrowedJob, - nonce: U256, - tx: &mut TransactionContext<'_>, - ) { - tracing::info!( - transaction_id = %job.job.data.transaction_id, - nonce = %nonce, - "Requeuing replaced transaction" - ); - - // Create a new send job without assigned nonce (will get new nonce) - let requeue_job = self - .send_queue - .clone() - .job(EoaSendJobData { - transaction_id: job.job.data.transaction_id.clone(), - chain_id: job.job.data.chain_id, - from: job.job.data.eoa_address, - to: Address::ZERO, // TODO: Get original transaction details - value: U256::ZERO, - data: Default::default(), - gas_limit: None, - max_fee_per_gas: None, - max_priority_fee_per_gas: None, - assigned_nonce: None, // Will get new nonce - webhook_options: job.job.data.webhook_options.clone(), - rpc_credentials: job.job.data.rpc_credentials.clone(), - }) - .with_id(&format!("{}_retry", job.job.data.transaction_id)); - - if let Err(e) = tx.queue_job(requeue_job) { - tracing::error!( - transaction_id = %job.job.data.transaction_id, - error = %e, - "Failed to requeue replaced transaction" - ); - } - } -} diff --git a/executors/src/eoa/eoa_confirmation_worker.rs b/executors/src/eoa/eoa_confirmation_worker.rs index a8ebd4c..b7890d5 100644 --- a/executors/src/eoa/eoa_confirmation_worker.rs +++ b/executors/src/eoa/eoa_confirmation_worker.rs @@ -20,7 +20,7 @@ use crate::{ transaction_registry::TransactionRegistry, webhook::{ WebhookJobHandler, - envelope::{ExecutorStage, HasTransactionMetadata, HasWebhookOptions, WebhookCapable}, + envelope::{ExecutorStage, WebhookCapable}, }, }; diff --git a/executors/src/eoa/mod.rs b/executors/src/eoa/mod.rs index 262cb19..5cc0a01 100644 --- a/executors/src/eoa/mod.rs +++ b/executors/src/eoa/mod.rs @@ -1,4 +1,3 @@ -pub mod confirm; pub mod eoa_confirmation_worker; pub mod error_classifier; pub mod nonce_manager; @@ -9,7 +8,8 @@ pub use confirm::{ EoaConfirmationError, EoaConfirmationHandler, EoaConfirmationJobData, EoaConfirmationResult, }; pub use eoa_confirmation_worker::{ - EoaConfirmationWorker, EoaConfirmationWorkerError, EoaConfirmationWorkerJobData, EoaConfirmationWorkerResult, + EoaConfirmationWorker, EoaConfirmationWorkerError, EoaConfirmationWorkerJobData, + EoaConfirmationWorkerResult, }; pub use error_classifier::{EoaErrorMapper, EoaExecutionError, RecoveryStrategy}; pub use nonce_manager::{EoaHealth, NonceAssignment, NonceManager}; diff --git a/executors/src/eoa/send.rs b/executors/src/eoa/send.rs index 10ca37c..ac9614b 100644 --- a/executors/src/eoa/send.rs +++ b/executors/src/eoa/send.rs @@ -1,4 +1,4 @@ -use alloy::consensus::Transaction; +use alloy::consensus::{SignableTransaction, Transaction}; use alloy::network::TransactionBuilder; use alloy::primitives::{Address, B256, Bytes, U256}; use alloy::providers::Provider; @@ -30,7 +30,6 @@ use crate::{ }; use super::{ - confirm::{EoaConfirmationHandler, EoaConfirmationJobData}, error_classifier::{EoaErrorMapper, EoaExecutionError, RecoveryStrategy}, nonce_manager::NonceManager, }; @@ -73,9 +72,6 @@ impl HasTransactionMetadata for EoaSendJobData { pub struct EoaSendResult { pub transaction_hash: B256, pub nonce_used: u64, - pub gas_limit: U256, - pub max_fee_per_gas: U256, - pub max_priority_fee_per_gas: U256, pub possibly_duplicate: Option, } @@ -169,8 +165,8 @@ where pub chain_service: Arc, pub nonce_manager: Arc, pub webhook_queue: Arc>, - pub confirm_queue: Arc>>, pub transaction_registry: Arc, + pub eoa_signer: Arc, pub max_in_flight: u32, } @@ -297,16 +293,47 @@ where let max_fee_per_gas = final_tx.max_fee_per_gas(); let max_priority_fee_per_gas = final_tx.max_priority_fee_per_gas(); + // 7. Sign the transaction + let signing_options = EoaSigningOptions { + from: job_data.from, + chain_id: Some(job_data.chain_id), + }; + + let signature = self + .eoa_signer + .sign_transaction( + signing_options, + final_tx.clone(), + job_data.signing_credential.clone(), + ) + .await + .map_err(|e| EoaSendError::InternalError { + message: format!("Failed to sign transaction: {}", e), + }) + .map_err_fail()?; + + // 8. Create signed transaction envelope + let signed_tx = final_tx.into_signed( + signature + .parse() + .map_err(|e| EoaSendError::InternalError { + message: format!("Failed to parse signature: {}", e), + }) + .map_err_fail()?, + ); + tracing::debug!( nonce = %assigned_nonce, gas_limit = %gas_limit, max_fee_per_gas = %max_fee_per_gas, max_priority_fee_per_gas = ?max_priority_fee_per_gas, - "Sending transaction" + "Sending signed transaction" ); - // 7. Send transaction - match chain.provider().send_transaction(final_tx).await { + let pre_computed_hash = signed_tx.hash(); + + // 9. Send transaction + match chain.provider().send_tx_envelope(signed_tx.into()).await { Ok(pending_tx) => { let tx_hash = *pending_tx.tx_hash(); @@ -319,9 +346,6 @@ where Ok(EoaSendResult { transaction_hash: tx_hash, nonce_used: assigned_nonce, - gas_limit, - max_fee_per_gas, - max_priority_fee_per_gas, possibly_duplicate: None, }) } @@ -671,7 +695,7 @@ where async fn handle_send_error( &self, send_error: RpcError, - nonce: U256, + nonce: u64, chain: &impl Chain, ) -> JobResult { // Try to map actionable errors, otherwise use engine error handling From 623b545c816c33e405ad6ce740b8e17e775e4fb3 Mon Sep 17 00:00:00 2001 From: Prithvish Baidya Date: Thu, 3 Jul 2025 19:59:01 +0530 Subject: [PATCH 4/8] wip --- executors/src/eoa/eoa_confirmation_worker.rs | 1 - executors/src/eoa/error_classifier.rs | 78 +++++++++++--------- executors/src/eoa/mod.rs | 3 - executors/src/eoa/send.rs | 46 ++++++------ executors/src/eoa/transaction_store.rs | 4 +- 5 files changed, 68 insertions(+), 64 deletions(-) diff --git a/executors/src/eoa/eoa_confirmation_worker.rs b/executors/src/eoa/eoa_confirmation_worker.rs index b7890d5..913316d 100644 --- a/executors/src/eoa/eoa_confirmation_worker.rs +++ b/executors/src/eoa/eoa_confirmation_worker.rs @@ -182,7 +182,6 @@ where message: format!("Failed to get cached nonce: {}", e), }) .map_err_fail()? - .map(|n| n.to::()) .unwrap_or(0); tracing::debug!( diff --git a/executors/src/eoa/error_classifier.rs b/executors/src/eoa/error_classifier.rs index 6ef2fa6..8dd1c80 100644 --- a/executors/src/eoa/error_classifier.rs +++ b/executors/src/eoa/error_classifier.rs @@ -11,28 +11,28 @@ use twmq::job::RequeuePosition; pub enum EoaExecutionError { /// Nonce too low - transaction might already be in mempool NonceTooLow { message: String }, - + /// Nonce too high - indicates nonce gap or desync NonceTooHigh { message: String }, - + /// Transaction already known in mempool AlreadyKnown { message: String }, - + /// Replacement transaction underpriced ReplacementUnderpriced { message: String }, - + /// Insufficient funds for transaction InsufficientFunds { message: String }, - + /// Gas-related error (limit, estimation, etc.) GasError { message: String }, - + /// Transaction pool is full or has limits PoolLimitExceeded { message: String }, - + /// Account does not exist or invalid AccountError { message: String }, - + /// Network/connectivity issues - use existing handling RpcError { message: String, @@ -65,9 +65,10 @@ impl EoaErrorMapper { chain: &C, ) -> Result { match error { - RpcError::ErrorResp(error_payload) => { - Ok(Self::map_ethereum_error(error_payload.code, &error_payload.message)) - } + RpcError::ErrorResp(error_payload) => Ok(Self::map_ethereum_error( + error_payload.code, + &error_payload.message, + )), _ => { // Use existing engine error handling for non-actionable errors Err(error.to_engine_error(chain)) @@ -78,7 +79,7 @@ impl EoaErrorMapper { /// Map Ethereum-specific errors that we need to act on fn map_ethereum_error(code: i64, message: &str) -> EoaExecutionError { let msg_lower = message.to_lowercase(); - + match code { -32000 => { // Only handle the specific ethereum errors we care about @@ -118,7 +119,9 @@ impl EoaErrorMapper { // Not an actionable error - let engine error handle it EoaExecutionError::RpcError { message: message.to_string(), - inner_error: Some(EngineError::InternalError { message: message.to_string() }), + inner_error: Some(EngineError::InternalError { + message: message.to_string(), + }), } } } @@ -126,7 +129,9 @@ impl EoaErrorMapper { // Not an actionable error code EoaExecutionError::RpcError { message: format!("RPC error code {}: {}", code, message), - inner_error: Some(EngineError::InternalError { message: message.to_string() }), + inner_error: Some(EngineError::InternalError { + message: message.to_string(), + }), } } } @@ -142,7 +147,7 @@ impl EoaErrorMapper { retryable: false, retry_delay: None, }, - + EoaExecutionError::NonceTooHigh { .. } => RecoveryStrategy { queue_confirmation: false, recycle_nonce: true, @@ -150,7 +155,7 @@ impl EoaErrorMapper { retryable: true, retry_delay: Some(Duration::from_secs(10)), }, - + EoaExecutionError::AlreadyKnown { .. } => RecoveryStrategy { queue_confirmation: true, recycle_nonce: false, @@ -158,7 +163,7 @@ impl EoaErrorMapper { retryable: false, retry_delay: None, }, - + EoaExecutionError::ReplacementUnderpriced { .. } => RecoveryStrategy { queue_confirmation: true, recycle_nonce: false, @@ -166,7 +171,7 @@ impl EoaErrorMapper { retryable: true, retry_delay: Some(Duration::from_secs(10)), }, - + EoaExecutionError::InsufficientFunds { .. } => RecoveryStrategy { queue_confirmation: false, recycle_nonce: true, @@ -174,7 +179,7 @@ impl EoaErrorMapper { retryable: true, retry_delay: Some(Duration::from_secs(60)), }, - + EoaExecutionError::GasError { .. } => RecoveryStrategy { queue_confirmation: false, recycle_nonce: true, @@ -182,7 +187,7 @@ impl EoaErrorMapper { retryable: true, retry_delay: Some(Duration::from_secs(30)), }, - + EoaExecutionError::PoolLimitExceeded { .. } => RecoveryStrategy { queue_confirmation: false, recycle_nonce: true, @@ -190,7 +195,7 @@ impl EoaErrorMapper { retryable: true, retry_delay: Some(Duration::from_secs(30)), }, - + EoaExecutionError::AccountError { .. } => RecoveryStrategy { queue_confirmation: false, recycle_nonce: true, @@ -198,7 +203,7 @@ impl EoaErrorMapper { retryable: false, retry_delay: None, }, - + EoaExecutionError::RpcError { .. } => { // This should not be used - let engine error handle it RecoveryStrategy { @@ -208,7 +213,7 @@ impl EoaErrorMapper { retryable: false, retry_delay: None, } - }, + } } } } @@ -229,7 +234,7 @@ impl EoaExecutionError { | EoaExecutionError::RpcError { message, .. } => message, } } - + /// Convert to appropriate job result for send operations pub fn to_send_job_result( &self, @@ -244,9 +249,11 @@ impl EoaExecutionError { Ok(success_factory()) } else if strategy.retryable { if let Some(delay) = strategy.retry_delay { - Err(error_factory(self.message().to_string()).nack(Some(delay), RequeuePosition::Last)) + Err(error_factory(self.message().to_string()) + .nack(Some(delay), RequeuePosition::Last)) } else { - Err(error_factory(self.message().to_string()).nack(Some(Duration::from_secs(5)), RequeuePosition::Last)) + Err(error_factory(self.message().to_string()) + .nack(Some(Duration::from_secs(5)), RequeuePosition::Last)) } } else { // Permanent failure @@ -263,26 +270,29 @@ mod tests { fn test_nonce_too_low_mapping() { let error = EoaErrorMapper::map_ethereum_error(-32000, "nonce too low"); let strategy = EoaErrorMapper::get_recovery_strategy(&error); - + match error { EoaExecutionError::NonceTooLow { .. } => {} _ => panic!("Expected NonceTooLow error"), } - + assert!(strategy.queue_confirmation); assert!(!strategy.recycle_nonce); } #[test] fn test_insufficient_funds_mapping() { - let error = EoaErrorMapper::map_ethereum_error(-32000, "insufficient funds for gas * price + value"); + let error = EoaErrorMapper::map_ethereum_error( + -32000, + "insufficient funds for gas * price + value", + ); let strategy = EoaErrorMapper::get_recovery_strategy(&error); - + match error { EoaExecutionError::InsufficientFunds { .. } => {} _ => panic!("Expected InsufficientFunds error"), } - + assert!(!strategy.queue_confirmation); assert!(strategy.recycle_nonce); assert!(strategy.retryable); @@ -292,13 +302,13 @@ mod tests { fn test_already_known_mapping() { let error = EoaErrorMapper::map_ethereum_error(-32000, "already known"); let strategy = EoaErrorMapper::get_recovery_strategy(&error); - + match error { EoaExecutionError::AlreadyKnown { .. } => {} _ => panic!("Expected AlreadyKnown error"), } - + assert!(strategy.queue_confirmation); assert!(!strategy.recycle_nonce); } -} \ No newline at end of file +} diff --git a/executors/src/eoa/mod.rs b/executors/src/eoa/mod.rs index 5cc0a01..81a26e8 100644 --- a/executors/src/eoa/mod.rs +++ b/executors/src/eoa/mod.rs @@ -4,9 +4,6 @@ pub mod nonce_manager; pub mod send; pub mod transaction_store; -pub use confirm::{ - EoaConfirmationError, EoaConfirmationHandler, EoaConfirmationJobData, EoaConfirmationResult, -}; pub use eoa_confirmation_worker::{ EoaConfirmationWorker, EoaConfirmationWorkerError, EoaConfirmationWorkerJobData, EoaConfirmationWorkerResult, diff --git a/executors/src/eoa/send.rs b/executors/src/eoa/send.rs index ac9614b..6aa6c2a 100644 --- a/executors/src/eoa/send.rs +++ b/executors/src/eoa/send.rs @@ -21,6 +21,7 @@ use twmq::{ job::{BorrowedJob, DelayOptions, JobResult, RequeuePosition, ToJobError, ToJobResult}, }; +use crate::eoa::{EoaConfirmationWorker, EoaConfirmationWorkerJobData}; use crate::{ transaction_registry::TransactionRegistry, webhook::{ @@ -47,7 +48,6 @@ pub struct EoaSendJobData { pub webhook_options: Option>, pub assigned_nonce: Option, - pub gas_limit: Option, pub signing_credential: SigningCredential, @@ -83,12 +83,18 @@ pub enum EoaSendError { ChainServiceError { chain_id: u64, message: String }, #[error("Transaction simulation failed: {message}")] - SimulationFailed { + SimulationFailedWithRevert { message: String, revert_reason: Option, revert_data: Option, }, + #[error("Transaction simulation failed: {message}")] + SimulationFailed { + message: String, + inner_error: EngineError, + }, + #[error("Gas estimation failed: {message}")] GasEstimationFailed { message: String }, @@ -168,6 +174,7 @@ where pub transaction_registry: Arc, pub eoa_signer: Arc, pub max_in_flight: u32, + pub confirm_queue: Arc>>, } impl ExecutorStage for EoaSendHandler @@ -247,13 +254,14 @@ where if let Some(gas_limit) = job_data.gas_limit { tx_request = tx_request.with_gas_limit(gas_limit); } else { - match chain.provider().estimate_gas(tx_request).await { - Ok(gas) => { - let gas_with_buffer = gas * 110 / 100; // Add 10% buffer + let gas_limit_result = chain.provider().estimate_gas(tx_request.clone()).await; + match gas_limit_result { + Ok(gas_limit) => { + let gas_with_buffer = gas_limit * 110 / 100; // Add 10% buffer tx_request = tx_request.with_gas_limit(gas_with_buffer); } - Err(rpc_error) => { - return self.handle_simulation_error(rpc_error, &chain).await; + Err(e) => { + return self.handle_simulation_error(e, &chain); } } } @@ -383,14 +391,9 @@ where let confirm_job = self .confirm_queue .clone() - .job(EoaConfirmationJobData { - transaction_id: job.job.data.transaction_id.clone(), + .job(EoaConfirmationWorkerJobData { chain_id: job.job.data.chain_id, - eoa_address: job.job.data.from, - nonce: success_data.result.nonce_used, - transaction_hash: success_data.result.transaction_hash, - webhook_options: job.job.data.webhook_options.clone(), - rpc_credentials: job.job.data.rpc_credentials.clone(), + eoa: job.job.data.from, }) .with_id(&job.job.data.transaction_id) .with_delay(DelayOptions { @@ -508,21 +511,21 @@ impl EoaSendHandler where CS: ChainService + Send + Sync + 'static, { - async fn handle_simulation_error( + fn handle_simulation_error( &self, rpc_error: RpcError, chain: &impl Chain, ) -> JobResult { // Check if this is a revert first if let RpcError::ErrorResp(error_payload) = &rpc_error { - if error_payload.as_revert_data().is_some() { - return Err(EoaSendError::SimulationFailed { + if let Some(revert_data) = error_payload.as_revert_data() { + return Err(EoaSendError::SimulationFailedWithRevert { message: format!( "Transaction reverted during simulation: {}", error_payload.message ), - revert_reason: Some(error_payload.message.clone()), - revert_data: None, + revert_reason: Some(error_payload.message.to_string()), + revert_data: Some(revert_data), } .fail()); } @@ -539,16 +542,11 @@ where EoaSendResult { transaction_hash: B256::ZERO, nonce_used: U256::ZERO, - gas_limit: U256::ZERO, - max_fee_per_gas: U256::ZERO, - max_priority_fee_per_gas: U256::ZERO, possibly_duplicate: None, } }, |reason| EoaSendError::SimulationFailed { message: reason.clone(), - revert_reason: None, - revert_data: None, }, ) } diff --git a/executors/src/eoa/transaction_store.rs b/executors/src/eoa/transaction_store.rs index 670449f..b5e5330 100644 --- a/executors/src/eoa/transaction_store.rs +++ b/executors/src/eoa/transaction_store.rs @@ -1,4 +1,4 @@ -use alloy::consensus::Transaction; +use alloy::consensus::{Receipt, Transaction}; use alloy::network::AnyTransactionReceipt; use alloy::primitives::{Address, B256, U256}; use alloy::rpc::types::{TransactionReceipt, TransactionRequest}; @@ -55,7 +55,7 @@ impl ActiveAttempt { pub struct ConfirmationData { pub transaction_hash: B256, pub confirmed_at: u64, - pub receipt: AnyTransactionReceipt, + pub receipt: TransactionReceipt, } /// Transaction store focused on transaction_id operations and nonce indexing From 87affb44117c1aac13a2bd2b43742f57c04688f8 Mon Sep 17 00:00:00 2001 From: Prithvish Baidya Date: Sat, 5 Jul 2025 04:28:37 +0530 Subject: [PATCH 5/8] eoa worker changes --- Cargo.lock | 2 + executors/Cargo.toml | 4 +- executors/src/eoa/eoa_confirmation_worker.rs | 574 ------ executors/src/eoa/mod.rs | 16 +- executors/src/eoa/nonce_manager.rs | 550 ------ executors/src/eoa/send.rs | 805 -------- executors/src/eoa/store.rs | 1792 ++++++++++++++++++ executors/src/eoa/transaction_store.rs | 445 ----- executors/src/eoa/worker.rs | 986 ++++++++++ twmq/src/job.rs | 15 +- 10 files changed, 2794 insertions(+), 2395 deletions(-) delete mode 100644 executors/src/eoa/eoa_confirmation_worker.rs delete mode 100644 executors/src/eoa/nonce_manager.rs delete mode 100644 executors/src/eoa/send.rs create mode 100644 executors/src/eoa/store.rs delete mode 100644 executors/src/eoa/transaction_store.rs create mode 100644 executors/src/eoa/worker.rs diff --git a/Cargo.lock b/Cargo.lock index 4440c46..021ef5f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2442,6 +2442,7 @@ dependencies = [ "engine-aa-core", "engine-aa-types", "engine-core", + "futures", "hex", "hmac", "rand 0.9.1", @@ -2450,6 +2451,7 @@ dependencies = [ "serde_json", "sha2", "thiserror 2.0.12", + "tokio", "tracing", "twmq", "uuid", diff --git a/executors/Cargo.toml b/executors/Cargo.toml index ecd9b04..2148d9e 100644 --- a/executors/Cargo.toml +++ b/executors/Cargo.toml @@ -19,4 +19,6 @@ engine-core = { version = "0.1.0", path = "../core" } engine-aa-core = { version = "0.1.0", path = "../aa-core" } rand = "0.9.1" uuid = { version = "1.17.0", features = ["v4"] } -chrono = "0.4.41" \ No newline at end of file +chrono = "0.4.41" +tokio = { version = "1.45.0", features = ["full"] } +futures = "0.3.31" diff --git a/executors/src/eoa/eoa_confirmation_worker.rs b/executors/src/eoa/eoa_confirmation_worker.rs deleted file mode 100644 index 913316d..0000000 --- a/executors/src/eoa/eoa_confirmation_worker.rs +++ /dev/null @@ -1,574 +0,0 @@ -use alloy::primitives::{Address, B256, U256}; -use alloy::providers::Provider; -use alloy::rpc::types::{BlockNumberOrTag, TransactionReceipt}; -use alloy::transports::{RpcError, TransportErrorKind}; -use engine_core::{ - chain::{Chain, ChainService, RpcCredentials}, - error::{AlloyRpcErrorToEngineError, EngineError, RpcErrorKind}, - execution_options::WebhookOptions, -}; -use serde::{Deserialize, Serialize}; -use std::{sync::Arc, time::Duration}; -use twmq::{ - FailHookData, NackHookData, Queue, SuccessHookData, UserCancellable, - error::TwmqError, - hooks::TransactionContext, - job::{BorrowedJob, DelayOptions, JobResult, RequeuePosition, ToJobError, ToJobResult}, -}; - -use crate::{ - transaction_registry::TransactionRegistry, - webhook::{ - WebhookJobHandler, - envelope::{ExecutorStage, WebhookCapable}, - }, -}; - -use super::{ - nonce_manager::NonceManager, - send::{EoaSendHandler, EoaSendJobData}, - transaction_store::{ActiveAttempt, ConfirmationData, TransactionStore}, -}; - -// --- Job Payload --- -#[derive(Serialize, Deserialize, Debug, Clone)] -#[serde(rename_all = "camelCase")] -pub struct EoaConfirmationWorkerJobData { - pub eoa: Address, - pub chain_id: u64, - pub rpc_credentials: RpcCredentials, -} - -// --- Success Result --- -#[derive(Serialize, Deserialize, Debug, Clone)] -#[serde(rename_all = "camelCase")] -pub struct EoaConfirmationWorkerResult { - pub eoa: Address, - pub chain_id: u64, - pub nonce_progression: Option<(u64, u64)>, // (from, to) - pub transactions_confirmed: u32, - pub transactions_requeued: u32, -} - -// --- Error Types --- -#[derive(Serialize, Deserialize, Debug, Clone, thiserror::Error)] -#[serde(rename_all = "SCREAMING_SNAKE_CASE", tag = "errorCode")] -pub enum EoaConfirmationWorkerError { - #[error("Chain service error for chainId {chain_id}: {message}")] - ChainServiceError { chain_id: u64, message: String }, - - #[error("RPC error: {message}")] - RpcError { message: String, retryable: bool }, - - #[error("Active transactions found - will continue monitoring")] - ActiveTransactionsFound { count: u32 }, - - #[error("Sync required for EOA {eoa}")] - SyncRequired { eoa: Address }, - - #[error("Invalid RPC Credentials: {message}")] - InvalidRpcCredentials { message: String }, - - #[error("Internal error: {message}")] - InternalError { message: String }, - - #[error("Worker cancelled by user")] - UserCancelled, -} - -impl From for EoaConfirmationWorkerError { - fn from(error: TwmqError) -> Self { - EoaConfirmationWorkerError::InternalError { - message: format!("Queue error: {}", error), - } - } -} - -impl UserCancellable for EoaConfirmationWorkerError { - fn user_cancelled() -> Self { - EoaConfirmationWorkerError::UserCancelled - } -} - -// --- Handler --- -pub struct EoaConfirmationWorker -where - CS: ChainService + Send + Sync + 'static, -{ - pub chain_service: Arc, - pub nonce_manager: Arc, - pub transaction_store: Arc, - pub send_queue: Arc>>, - pub webhook_queue: Arc>, - pub transaction_registry: Arc, -} - -impl ExecutorStage for EoaConfirmationWorker -where - CS: ChainService + Send + Sync + 'static, -{ - fn executor_name() -> &'static str { - "eoa" - } - - fn stage_name() -> &'static str { - "confirmation_worker" - } -} - -impl WebhookCapable for EoaConfirmationWorker -where - CS: ChainService + Send + Sync + 'static, -{ - fn webhook_queue(&self) -> &Arc> { - &self.webhook_queue - } -} - -impl twmq::DurableExecution for EoaConfirmationWorker -where - CS: ChainService + Send + Sync + 'static, -{ - type Output = EoaConfirmationWorkerResult; - type ErrorData = EoaConfirmationWorkerError; - type JobData = EoaConfirmationWorkerJobData; - - #[tracing::instrument(skip(self, job), fields(eoa = %job.job.data.eoa, chain_id = job.job.data.chain_id, stage = Self::stage_name(), executor = Self::executor_name()))] - async fn process( - &self, - job: &BorrowedJob, - ) -> JobResult { - let job_data = &job.job.data; - - // 1. Get Chain - let chain = self - .chain_service - .get_chain(job_data.chain_id) - .map_err(|e| EoaConfirmationWorkerError::ChainServiceError { - chain_id: job_data.chain_id, - message: format!("Failed to get chain instance: {}", e), - }) - .map_err_fail()?; - - let chain_auth_headers = job_data - .rpc_credentials - .to_header_map() - .map_err(|e| EoaConfirmationWorkerError::InvalidRpcCredentials { - message: e.to_string(), - }) - .map_err_fail()?; - - let chain = chain.with_new_default_headers(chain_auth_headers); - - // 2. Get current onchain nonce - let onchain_nonce = match chain.provider().get_transaction_count(job_data.eoa).await { - Ok(nonce) => nonce, - Err(e) => { - let engine_error = e.to_engine_error(&chain); - return Err(EoaConfirmationWorkerError::RpcError { - message: format!("Failed to get transaction count: {}", engine_error), - retryable: true, - } - .nack(Some(Duration::from_secs(5)), RequeuePosition::Last)); - } - }; - - // 3. Get cached onchain nonce to detect progression - let cached_nonce = self - .nonce_manager - .get_cached_onchain_nonce(job_data.eoa, job_data.chain_id) - .await - .map_err(|e| EoaConfirmationWorkerError::InternalError { - message: format!("Failed to get cached nonce: {}", e), - }) - .map_err_fail()? - .unwrap_or(0); - - tracing::debug!( - eoa = %job_data.eoa, - onchain_nonce = %onchain_nonce, - cached_nonce = %cached_nonce, - "Checking nonce progression" - ); - - // 4. Check if nonce has progressed - let nonce_progression = if onchain_nonce > cached_nonce { - Some((cached_nonce, onchain_nonce)) - } else { - None - }; - - // 5. Get all active transactions for this EOA - let active_transaction_ids = self - .transaction_store - .get_active_transactions(job_data.eoa, job_data.chain_id) - .await - .map_err(|e| EoaConfirmationWorkerError::InternalError { - message: format!("Failed to get active transactions: {}", e), - }) - .map_err_fail()?; - - // 6. If no active transactions and no progression, we're done - if active_transaction_ids.is_empty() && nonce_progression.is_none() { - tracing::debug!( - eoa = %job_data.eoa, - "No active transactions and no nonce progression - stopping worker" - ); - - return Ok(EoaConfirmationWorkerResult { - eoa: job_data.eoa, - chain_id: job_data.chain_id, - nonce_progression: None, - transactions_confirmed: 0, - transactions_requeued: 0, - }); - } - - // 7. Process any nonce progression - let mut transactions_confirmed = 0; - let mut transactions_requeued = 0; - - if let Some((from_nonce, to_nonce)) = nonce_progression { - tracing::info!( - eoa = %job_data.eoa, - from_nonce = %from_nonce, - to_nonce = %to_nonce, - "Processing nonce progression" - ); - - let (confirmed, requeued) = self - .process_nonce_progression(job_data, &chain, from_nonce, to_nonce) - .await - .map_err_fail()?; - - transactions_confirmed += confirmed; - transactions_requeued += requeued; - - // Update cached nonce - if let Err(e) = self - .nonce_manager - .update_cached_onchain_nonce(job_data.eoa, job_data.chain_id, onchain_nonce) - .await - { - tracing::error!( - eoa = %job_data.eoa, - error = %e, - "Failed to update cached nonce" - ); - } - } - - // 8. Check if we still have active transactions - if so, requeue worker - let remaining_active = self - .transaction_store - .get_active_transactions(job_data.eoa, job_data.chain_id) - .await - .map_err(|e| EoaConfirmationWorkerError::InternalError { - message: format!("Failed to get remaining active transactions: {}", e), - }) - .map_err_fail()?; - - if !remaining_active.is_empty() { - tracing::debug!( - eoa = %job_data.eoa, - active_count = remaining_active.len(), - "Active transactions found - requeuing worker" - ); - - return Err(EoaConfirmationWorkerError::ActiveTransactionsFound { - count: remaining_active.len() as u32, - } - .nack(Some(Duration::from_secs(3)), RequeuePosition::Last)); - } - - // 9. No more active transactions - worker can complete - Ok(EoaConfirmationWorkerResult { - eoa: job_data.eoa, - chain_id: job_data.chain_id, - nonce_progression, - transactions_confirmed, - transactions_requeued, - }) - } - - async fn on_success( - &self, - job: &BorrowedJob, - success_data: SuccessHookData<'_, Self::Output>, - tx: &mut TransactionContext<'_>, - ) { - tracing::info!( - eoa = %job.job.data.eoa, - chain_id = job.job.data.chain_id, - transactions_confirmed = success_data.result.transactions_confirmed, - transactions_requeued = success_data.result.transactions_requeued, - "EOA confirmation worker completed" - ); - } - - async fn on_nack( - &self, - job: &BorrowedJob, - nack_data: NackHookData<'_, Self::ErrorData>, - tx: &mut TransactionContext<'_>, - ) { - tracing::debug!( - eoa = %job.job.data.eoa, - chain_id = job.job.data.chain_id, - error = ?nack_data.error, - "EOA confirmation worker nacked - will retry" - ); - } - - async fn on_fail( - &self, - job: &BorrowedJob, - fail_data: FailHookData<'_, Self::ErrorData>, - tx: &mut TransactionContext<'_>, - ) { - tracing::error!( - eoa = %job.job.data.eoa, - chain_id = job.job.data.chain_id, - error = ?fail_data.error, - "EOA confirmation worker failed permanently" - ); - } -} - -// --- Core Logic --- - -impl EoaConfirmationWorker -where - CS: ChainService + Send + Sync + 'static, -{ - /// Process nonce progression and determine winners/losers - async fn process_nonce_progression( - &self, - job_data: &EoaConfirmationWorkerJobData, - chain: &impl Chain, - from_nonce: u64, - to_nonce: u64, - ) -> Result<(u32, u32), EoaConfirmationWorkerError> { - let mut transactions_confirmed = 0; - let mut transactions_requeued = 0; - - // Process each nonce from cached to current onchain - for nonce in from_nonce..to_nonce { - let nonce_u256 = U256::from(nonce); - - // Get all our transactions competing for this nonce - let competing_transaction_ids = self - .transaction_store - .get_transactions_by_nonce(job_data.eoa, job_data.chain_id, nonce_u256) - .await - .map_err(|e| EoaConfirmationWorkerError::InternalError { - message: format!("Failed to get transactions by nonce {}: {}", nonce, e), - })?; - - if competing_transaction_ids.is_empty() { - tracing::debug!( - nonce = %nonce, - "No competing transactions for nonce - chain progressed without us" - ); - continue; - } - - tracing::debug!( - nonce = %nonce, - competing_count = competing_transaction_ids.len(), - "Processing competing transactions for nonce" - ); - - // Check each competing transaction - let mut found_winner = false; - for transaction_id in &competing_transaction_ids { - if let Some(attempt) = self - .transaction_store - .get_active_attempt(transaction_id) - .await - .map_err(|e| EoaConfirmationWorkerError::InternalError { - message: format!( - "Failed to get active attempt for {}: {}", - transaction_id, e - ), - })? - { - // Query receipt by hash - match chain - .provider() - .get_transaction_receipt(attempt.transaction_hash) - .await - { - Ok(Some(receipt)) => { - if receipt.status() { - // This transaction won! - tracing::info!( - transaction_id = %transaction_id, - transaction_hash = %attempt.transaction_hash, - nonce = %nonce, - block_number = %receipt.block_number.unwrap_or_default(), - "Transaction confirmed on-chain" - ); - - let confirmation_data = ConfirmationData { - transaction_hash: attempt.transaction_hash, - receipt, - confirmed_at: std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_secs(), - }; - - if let Err(e) = self - .transaction_store - .mark_transaction_confirmed(transaction_id, &confirmation_data) - .await - { - tracing::error!( - transaction_id = %transaction_id, - error = %e, - "Failed to mark transaction as confirmed" - ); - } - - transactions_confirmed += 1; - found_winner = true; - } else { - // Transaction reverted - tracing::warn!( - transaction_id = %transaction_id, - transaction_hash = %attempt.transaction_hash, - nonce = %nonce, - "Transaction reverted on-chain" - ); - - if let Err(e) = self - .transaction_store - .mark_transaction_failed(transaction_id, "Transaction reverted") - .await - { - tracing::error!( - transaction_id = %transaction_id, - error = %e, - "Failed to mark transaction as failed" - ); - } - } - } - Ok(None) => { - // No receipt - transaction might still be pending or was replaced - tracing::debug!( - transaction_id = %transaction_id, - transaction_hash = %attempt.transaction_hash, - nonce = %nonce, - "No receipt found for transaction" - ); - } - Err(e) => { - tracing::error!( - transaction_id = %transaction_id, - transaction_hash = %attempt.transaction_hash, - nonce = %nonce, - error = %e, - "Error getting receipt for transaction" - ); - } - } - } - } - - // If nonce progressed but none of our transactions won, they all lost - if !found_winner { - for transaction_id in &competing_transaction_ids { - tracing::info!( - transaction_id = %transaction_id, - nonce = %nonce, - "Transaction lost nonce race - requeuing" - ); - - // Remove active attempt and requeue as new send job - if let Err(e) = self - .transaction_store - .remove_active_attempt(transaction_id) - .await - { - tracing::error!( - transaction_id = %transaction_id, - error = %e, - "Failed to remove active attempt for requeue" - ); - continue; - } - - // Create new send job (attempt_number will be incremented) - if let Err(e) = self.requeue_transaction(job_data, transaction_id).await { - tracing::error!( - transaction_id = %transaction_id, - error = %e, - "Failed to requeue transaction" - ); - } else { - transactions_requeued += 1; - } - } - } - } - - Ok((transactions_confirmed, transactions_requeued)) - } - - /// Requeue a transaction that lost a nonce race - async fn requeue_transaction( - &self, - job_data: &EoaConfirmationWorkerJobData, - transaction_id: &str, - ) -> Result<(), EoaConfirmationWorkerError> { - // Get original transaction data - let tx_data = self - .transaction_store - .get_transaction_data(transaction_id) - .await - .map_err(|e| EoaConfirmationWorkerError::InternalError { - message: format!( - "Failed to get transaction data for {}: {}", - transaction_id, e - ), - })? - .ok_or_else(|| EoaConfirmationWorkerError::InternalError { - message: format!("Transaction data not found for {}", transaction_id), - })?; - - // Get current attempt number for new queue job ID - let mut conn = self.transaction_store.redis.clone(); - let counter_key = self.transaction_store.attempt_counter_key(transaction_id); - let attempt_number: u32 = conn.get(&counter_key).await.unwrap_or(0); - - // Create new send job with incremented attempt - let requeue_job = self - .send_queue - .clone() - .job(EoaSendJobData { - transaction_id: tx_data.transaction_id.clone(), - chain_id: tx_data.chain_id, - from: tx_data.eoa, - to: tx_data.to, - value: tx_data.value, - data: tx_data.data.into(), - webhook_options: None, // TODO: Get from original job if needed - assigned_nonce: None, // Will get new nonce - gas_limit: tx_data.gas_limit, - signing_credential: Default::default(), // TODO: Get from original job - rpc_credentials: job_data.rpc_credentials.clone(), - }) - .with_id(&format!("{}_{}", transaction_id, attempt_number)); - - // Queue the job (this would normally be done in a pipeline in the actual hook) - tracing::info!( - transaction_id = %transaction_id, - queue_job_id = %format!("{}_{}", transaction_id, attempt_number), - "Requeuing transaction after race loss" - ); - - Ok(()) - } -} diff --git a/executors/src/eoa/mod.rs b/executors/src/eoa/mod.rs index 81a26e8..0c7c17f 100644 --- a/executors/src/eoa/mod.rs +++ b/executors/src/eoa/mod.rs @@ -1,16 +1,4 @@ -pub mod eoa_confirmation_worker; pub mod error_classifier; -pub mod nonce_manager; -pub mod send; -pub mod transaction_store; - -pub use eoa_confirmation_worker::{ - EoaConfirmationWorker, EoaConfirmationWorkerError, EoaConfirmationWorkerJobData, - EoaConfirmationWorkerResult, -}; +pub mod store; +pub mod worker; pub use error_classifier::{EoaErrorMapper, EoaExecutionError, RecoveryStrategy}; -pub use nonce_manager::{EoaHealth, NonceAssignment, NonceManager}; -pub use send::{EoaSendError, EoaSendHandler, EoaSendJobData, EoaSendResult}; -pub use transaction_store::{ - ActiveAttempt, ConfirmationData, TransactionData, TransactionStore, TransactionStoreError, -}; diff --git a/executors/src/eoa/nonce_manager.rs b/executors/src/eoa/nonce_manager.rs deleted file mode 100644 index 5592f97..0000000 --- a/executors/src/eoa/nonce_manager.rs +++ /dev/null @@ -1,550 +0,0 @@ -use alloy::primitives::{Address, B256, U256}; -use engine_core::error::EngineError; -use serde::{Deserialize, Serialize}; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use thiserror::Error; -use twmq::redis::{AsyncCommands, Pipeline, aio::ConnectionManager}; - -#[derive(Debug, Error)] -pub enum NonceManagerError { - #[error("Redis error: {0}")] - RedisError(#[from] twmq::redis::RedisError), - - #[error("Max in-flight transactions reached for EOA {eoa}: {current}/{max}")] - MaxInFlightReached { - eoa: Address, - current: u32, - max: u32, - }, - - #[error("EOA {eoa} needs sync - no optimistic nonce found")] - NeedsSync { eoa: Address }, - - #[error("Nonce assignment failed: {reason}")] - NonceAssignmentFailed { reason: String }, -} - -impl From for EngineError { - fn from(err: NonceManagerError) -> Self { - EngineError::InternalError { - message: err.to_string(), - } - } -} - -/// Tracks nonce assignment for a specific transaction -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct NonceAssignment { - pub transaction_id: String, - pub transaction_hash: B256, - pub assigned_at: u64, -} - -/// Health tracking for an EOA on a specific chain -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct EoaHealth { - pub in_flight_count: u32, - pub consecutive_errors: u32, - pub last_error_time: u64, - pub last_success_time: u64, - pub recycled_nonce_count: u32, - pub last_sync_time: u64, - pub is_synced: bool, -} - -impl Default for EoaHealth { - fn default() -> Self { - Self { - in_flight_count: 0, - consecutive_errors: 0, - last_error_time: 0, - last_success_time: 0, - recycled_nonce_count: 0, - last_sync_time: 0, - is_synced: false, - } - } -} - -/// Manages nonce assignment and recycling for EOA transactions -pub struct NonceManager { - redis: ConnectionManager, - namespace: Option, - max_in_flight: u32, - max_recycled: u32, -} - -impl NonceManager { - pub fn new( - redis: ConnectionManager, - namespace: Option, - max_in_flight: u32, - max_recycled: u32, - ) -> Self { - Self { - redis, - namespace, - max_in_flight, - max_recycled, - } - } - - // Redis key naming methods with proper EOA namespacing - fn eoa_key(&self, eoa: Address, chain_id: u64) -> String { - match &self.namespace { - Some(ns) => format!("{}:eoa:{}:{}", ns, chain_id, eoa), - None => format!("eoa:{}:{}", chain_id, eoa), - } - } - - fn optimistic_nonce_key(&self, chain_id: u64) -> String { - match &self.namespace { - Some(ns) => format!("{}:eoa_nonce:optimistic:{}", ns, chain_id), - None => format!("eoa_nonce:optimistic:{}", chain_id), - } - } - - fn recycled_nonces_key(&self, eoa: Address, chain_id: u64) -> String { - match &self.namespace { - Some(ns) => format!("{}:eoa_nonce:recycled:{}:{}", ns, chain_id, eoa), - None => format!("eoa_nonce:recycled:{}:{}", chain_id, eoa), - } - } - - fn nonce_assignments_key(&self, eoa: Address, chain_id: u64) -> String { - match &self.namespace { - Some(ns) => format!("{}:eoa_nonce:assigned:{}:{}", ns, chain_id, eoa), - None => format!("eoa_nonce:assigned:{}:{}", chain_id, eoa), - } - } - - fn onchain_nonce_cache_key(&self, chain_id: u64) -> String { - match &self.namespace { - Some(ns) => format!("{}:eoa_nonce:onchain:{}", ns, chain_id), - None => format!("eoa_nonce:onchain:{}", chain_id), - } - } - - fn health_status_key(&self, chain_id: u64) -> String { - match &self.namespace { - Some(ns) => format!("{}:eoa_health:{}", ns, chain_id), - None => format!("eoa_health:{}", chain_id), - } - } - - fn epoch_key(&self, eoa: Address, chain_id: u64) -> String { - match &self.namespace { - Some(ns) => format!("{}:eoa_epoch:{}:{}", ns, chain_id, eoa), - None => format!("eoa_epoch:{}:{}", chain_id, eoa), - } - } - - fn sync_lock_key(&self, eoa: Address, chain_id: u64) -> String { - match &self.namespace { - Some(ns) => format!("{}:eoa_sync_lock:{}:{}", ns, chain_id, eoa), - None => format!("eoa_sync_lock:{}:{}", chain_id, eoa), - } - } - - /// Get current health status for an EOA - pub async fn get_eoa_health( - &self, - eoa: Address, - chain_id: u64, - ) -> Result { - let mut conn = self.redis.clone(); - let health_key = self.health_status_key(chain_id); - let eoa_field = eoa.to_string(); - - let health_json: Option = conn.hget(&health_key, &eoa_field).await?; - - match health_json { - Some(json) => Ok(serde_json::from_str(&json).unwrap_or_default()), - None => Ok(EoaHealth::default()), - } - } - - /// Update health status for an EOA - pub async fn update_eoa_health( - &self, - eoa: Address, - chain_id: u64, - health: &EoaHealth, - ) -> Result<(), NonceManagerError> { - let mut conn = self.redis.clone(); - let health_key = self.health_status_key(chain_id); - let eoa_field = eoa.to_string(); - let health_json = serde_json::to_string(health).unwrap(); - - let _: () = conn.hset(&health_key, &eoa_field, health_json).await?; - Ok(()) - } - - /// Atomic nonce assignment using cached onchain nonce with epoch-based recycling protection - pub async fn assign_nonce( - &self, - eoa: Address, - chain_id: u64, - ) -> Result<(u64, u64), NonceManagerError> { - let script = twmq::redis::Script::new( - r#" - local eoa = ARGV[1] - local max_recycled = tonumber(ARGV[2]) - local max_in_flight = tonumber(ARGV[3]) - local now = tonumber(ARGV[4]) - - local optimistic_nonce_key = KEYS[1] - local recycled_nonces_key = KEYS[2] - local health_key = KEYS[3] - local epoch_key = KEYS[4] - local onchain_cache_key = KEYS[5] - - -- Get current epoch (or initialize) - local current_epoch = redis.call('GET', epoch_key) - if not current_epoch then - current_epoch = tostring(now) - redis.call('SET', epoch_key, current_epoch) - end - - -- Derive recycled count - local recycled_count = redis.call('ZCARD', recycled_nonces_key) - - -- Get optimistic nonce - local optimistic_nonce = redis.call('HGET', optimistic_nonce_key, eoa) - if not optimistic_nonce then - -- Not initialized, need sync - return {-3, "needs_sync", "0", current_epoch} - end - optimistic_nonce = tonumber(optimistic_nonce) - - -- Get cached onchain nonce - local onchain_nonce = redis.call('HGET', onchain_cache_key, eoa) - if not onchain_nonce then - -- No cached onchain nonce, need sync - return {-3, "needs_sync", "0", current_epoch} - end - onchain_nonce = tonumber(onchain_nonce) - - -- Derive in-flight count - local in_flight_count = math.max(0, optimistic_nonce - onchain_nonce) - - -- Check if recycled count exceeds threshold - if recycled_count > max_recycled then - -- Force reset: increment epoch, clear recycled nonces, trigger resync - local reset_epoch = tostring(now) - - -- Update epoch (this invalidates any stale recycling attempts) - redis.call('SET', epoch_key, reset_epoch) - - -- Clear all recycled nonces - redis.call('DEL', recycled_nonces_key) - - -- Clear optimistic nonce to force resync - redis.call('HDEL', optimistic_nonce_key, eoa) - - -- Clear cached onchain nonce to force fresh fetch - redis.call('HDEL', onchain_cache_key, eoa) - - -- Update health to indicate reset occurred - local health_json = redis.call('HGET', health_key, eoa) - local health = {} - if health_json then - health = cjson.decode(health_json) - end - health.last_sync_time = now - health.is_synced = false - redis.call('HSET', health_key, eoa, cjson.encode(health)) - - return {-1, "too_many_recycled_reset", "0", reset_epoch} - end - - -- Check in-flight threshold - if in_flight_count >= max_in_flight then - return {-2, "max_in_flight", tostring(in_flight_count), current_epoch} - end - - -- Try to pop the lowest recycled nonce first - if recycled_count > 0 then - local recycled_nonce = redis.call('ZPOPMIN', recycled_nonces_key) - if #recycled_nonce > 0 then - -- Update health with successful assignment - local health_json = redis.call('HGET', health_key, eoa) - local health = {} - if health_json then - health = cjson.decode(health_json) - end - health.last_success_time = now - redis.call('HSET', health_key, eoa, cjson.encode(health)) - - return {0, recycled_nonce[1], current_epoch} - end - end - - -- No recycled nonce, increment optimistic nonce - local nonce = optimistic_nonce - redis.call('HSET', optimistic_nonce_key, eoa, nonce + 1) - - -- Update health with successful assignment - local health_json = redis.call('HGET', health_key, eoa) - local health = {} - if health_json then - health = cjson.decode(health_json) - end - health.last_success_time = now - redis.call('HSET', health_key, eoa, cjson.encode(health)) - - return {1, tostring(nonce), current_epoch} - "#, - ); - - let now = SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_secs(); - - let result: (i32, String, String, String) = script - .key(self.optimistic_nonce_key(chain_id)) - .key(self.recycled_nonces_key(eoa, chain_id)) - .key(self.health_status_key(chain_id)) - .key(self.epoch_key(eoa, chain_id)) - .key(self.onchain_nonce_cache_key(chain_id)) - .arg(eoa.to_string()) - .arg(self.max_recycled) - .arg(self.max_in_flight) - .arg(now) - .invoke_async(&mut self.redis.clone()) - .await?; - - match result.0 { - -1 => { - // Reset occurred due to too many recycled nonces - force resync needed - Err(NonceManagerError::NeedsSync { eoa }) - } - -2 => Err(NonceManagerError::MaxInFlightReached { - eoa, - current: result.2.parse().unwrap_or(0), - max: self.max_in_flight, - }), - -3 => Err(NonceManagerError::NeedsSync { eoa }), - 0 | 1 => { - let nonce: u64 = - result - .1 - .parse() - .map_err(|e| NonceManagerError::NonceAssignmentFailed { - reason: format!("Failed to parse nonce: {}", e), - })?; - let epoch: u64 = - result - .3 - .parse() - .map_err(|e| NonceManagerError::NonceAssignmentFailed { - reason: format!("Failed to parse epoch: {}", e), - })?; - Ok((nonce, epoch)) - } - _ => Err(NonceManagerError::NonceAssignmentFailed { - reason: "Unexpected result from nonce assignment".to_string(), - }), - } - } - - /// Record a nonce assignment for tracking - pub fn add_nonce_assignment_command( - &self, - pipeline: &mut Pipeline, - eoa: Address, - chain_id: u64, - nonce: U256, - transaction_id: &str, - transaction_hash: B256, - ) { - let assignment = NonceAssignment { - transaction_id: transaction_id.to_string(), - transaction_hash, - assigned_at: SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_secs(), - }; - - let assignment_json = serde_json::to_string(&assignment).unwrap(); - let assignments_key = self.nonce_assignments_key(eoa, chain_id); - - pipeline.hset(&assignments_key, nonce.to_string(), assignment_json); - } - - /// Recycle a nonce back to the recycled set - pub fn add_recycle_nonce_command( - &self, - pipeline: &mut Pipeline, - eoa: Address, - chain_id: u64, - nonce: U256, - ) { - let recycled_key = self.recycled_nonces_key(eoa, chain_id); - let health_key = self.health_status_key(chain_id); - - // Add to recycled sorted set (score = nonce value for ordering) - pipeline.zadd(&recycled_key, nonce.to_string(), nonce.to::()); - - // Update health to increment recycled count - pipeline.hincr(&health_key, format!("{}:recycled_nonce_count", eoa), 1); - } - - /// Remove a nonce assignment after successful confirmation - pub fn add_remove_assignment_command( - &self, - pipeline: &mut Pipeline, - eoa: Address, - chain_id: u64, - nonce: U256, - ) { - let assignments_key = self.nonce_assignments_key(eoa, chain_id); - let health_key = self.health_status_key(chain_id); - - pipeline.hdel(&assignments_key, nonce.to_string()); - - // Decrement in-flight count - pipeline.hincr(&health_key, format!("{}:in_flight_count", eoa), -1); - } - - /// Get all nonce assignments for an EOA - pub async fn get_nonce_assignments( - &self, - eoa: Address, - chain_id: u64, - nonce: U256, - ) -> Result, NonceManagerError> { - let mut conn = self.redis.clone(); - let assignments_key = self.nonce_assignments_key(eoa, chain_id); - - let assignment_json: Option = - conn.hget(&assignments_key, nonce.to_string()).await?; - - match assignment_json { - Some(json) => { - let assignment: NonceAssignment = serde_json::from_str(&json).map_err(|e| { - NonceManagerError::NonceAssignmentFailed { - reason: format!("Failed to deserialize assignment: {}", e), - } - })?; - Ok(vec![assignment]) - } - None => Ok(vec![]), - } - } - - /// Attempt to acquire sync lock and sync nonce for an EOA - pub async fn try_sync_nonce( - &self, - eoa: Address, - chain_id: u64, - onchain_nonce: u64, - ) -> Result { - let script = twmq::redis::Script::new( - r#" - local eoa = ARGV[1] - local onchain_nonce = tonumber(ARGV[2]) - local now = tonumber(ARGV[3]) - - local optimistic_nonce_key = KEYS[1] - local recycled_nonces_key = KEYS[2] - local health_key = KEYS[3] - local onchain_cache_key = KEYS[4] - local sync_lock_key = KEYS[5] - local epoch_key = KEYS[6] - - -- Try to acquire sync lock (60 second expiry) - local lock_acquired = redis.call('SET', sync_lock_key, now, 'NX', 'EX', '60') - if not lock_acquired then - -- Another process is syncing - return {0, "sync_in_progress"} - end - - -- Successfully acquired lock, perform sync - -- Clear recycled nonces and reset optimistic nonce - redis.call('DEL', recycled_nonces_key) - redis.call('HSET', optimistic_nonce_key, eoa, onchain_nonce) - redis.call('HSET', onchain_cache_key, eoa, onchain_nonce) - - -- Update epoch to invalidate any stale recycling attempts - local new_epoch = tostring(now) - redis.call('SET', epoch_key, new_epoch) - - -- Update health status - local health_json = redis.call('HGET', health_key, eoa) - local health = {} - if health_json then - health = cjson.decode(health_json) - end - health.is_synced = true - health.last_sync_time = now - health.consecutive_errors = 0 - redis.call('HSET', health_key, eoa, cjson.encode(health)) - - return {1, "synced", new_epoch} - "#, - ); - - let now = SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_secs(); - - let result: (i32, String, Option) = script - .key(self.optimistic_nonce_key(chain_id)) - .key(self.recycled_nonces_key(eoa, chain_id)) - .key(self.health_status_key(chain_id)) - .key(self.onchain_nonce_cache_key(chain_id)) - .key(self.sync_lock_key(eoa, chain_id)) - .key(self.epoch_key(eoa, chain_id)) - .arg(eoa.to_string()) - .arg(onchain_nonce) - .arg(now) - .invoke_async(&mut self.redis.clone()) - .await?; - - Ok(result.0 == 1) - } - - /// Get cached onchain nonce - pub async fn get_cached_onchain_nonce( - &self, - eoa: Address, - chain_id: u64, - ) -> Result, NonceManagerError> { - let mut conn = self.redis.clone(); - let cache_key = self.onchain_nonce_cache_key(chain_id); - - let nonce_str: Option = conn.hget(&cache_key, eoa.to_string()).await?; - - match nonce_str { - Some(s) => { - let nonce = - s.parse::() - .map_err(|e| NonceManagerError::NonceAssignmentFailed { - reason: format!("Failed to parse cached nonce: {}", e), - })?; - Ok(Some(nonce)) - } - None => Ok(None), - } - } - - /// Update cached onchain nonce - pub async fn update_cached_onchain_nonce( - &self, - eoa: Address, - chain_id: u64, - nonce: u64, - ) -> Result<(), NonceManagerError> { - let mut conn = self.redis.clone(); - let cache_key = self.onchain_nonce_cache_key(chain_id); - - let _: () = conn - .hset(&cache_key, eoa.to_string(), nonce.to_string()) - .await?; - Ok(()) - } -} diff --git a/executors/src/eoa/send.rs b/executors/src/eoa/send.rs deleted file mode 100644 index 6aa6c2a..0000000 --- a/executors/src/eoa/send.rs +++ /dev/null @@ -1,805 +0,0 @@ -use alloy::consensus::{SignableTransaction, Transaction}; -use alloy::network::TransactionBuilder; -use alloy::primitives::{Address, B256, Bytes, U256}; -use alloy::providers::Provider; -use alloy::rpc::types::TransactionRequest as AlloyTransactionRequest; -use alloy::transports::{RpcError, TransportErrorKind}; -use engine_core::credentials::SigningCredential; -use engine_core::{ - chain::{Chain, ChainService, RpcCredentials}, - error::{AlloyRpcErrorToEngineError, EngineError}, - execution_options::WebhookOptions, - signer::{AccountSigner, EoaSigner, EoaSigningOptions}, -}; -use serde::{Deserialize, Serialize}; -use serde_json::json; -use std::{sync::Arc, time::Duration}; -use twmq::{ - FailHookData, NackHookData, Queue, SuccessHookData, UserCancellable, - error::TwmqError, - hooks::TransactionContext, - job::{BorrowedJob, DelayOptions, JobResult, RequeuePosition, ToJobError, ToJobResult}, -}; - -use crate::eoa::{EoaConfirmationWorker, EoaConfirmationWorkerJobData}; -use crate::{ - transaction_registry::TransactionRegistry, - webhook::{ - WebhookJobHandler, - envelope::{ExecutorStage, HasTransactionMetadata, HasWebhookOptions, WebhookCapable}, - }, -}; - -use super::{ - error_classifier::{EoaErrorMapper, EoaExecutionError, RecoveryStrategy}, - nonce_manager::NonceManager, -}; - -// --- Job Payload --- -#[derive(Serialize, Deserialize, Debug, Clone)] -#[serde(rename_all = "camelCase")] -pub struct EoaSendJobData { - pub transaction_id: String, - pub chain_id: u64, - pub from: Address, - pub to: Option
, - pub value: U256, - pub data: Bytes, - pub webhook_options: Option>, - - pub assigned_nonce: Option, - pub gas_limit: Option, - - pub signing_credential: SigningCredential, - pub rpc_credentials: RpcCredentials, -} - -impl HasWebhookOptions for EoaSendJobData { - fn webhook_options(&self) -> Option> { - self.webhook_options.clone() - } -} - -impl HasTransactionMetadata for EoaSendJobData { - fn transaction_id(&self) -> String { - self.transaction_id.clone() - } -} - -// --- Success Result --- -#[derive(Serialize, Deserialize, Debug, Clone)] -#[serde(rename_all = "camelCase")] -pub struct EoaSendResult { - pub transaction_hash: B256, - pub nonce_used: u64, - pub possibly_duplicate: Option, -} - -// --- Error Types --- -#[derive(Serialize, Deserialize, Debug, Clone, thiserror::Error)] -#[serde(rename_all = "SCREAMING_SNAKE_CASE", tag = "errorCode")] -pub enum EoaSendError { - #[error("Chain service error for chainId {chain_id}: {message}")] - ChainServiceError { chain_id: u64, message: String }, - - #[error("Transaction simulation failed: {message}")] - SimulationFailedWithRevert { - message: String, - revert_reason: Option, - revert_data: Option, - }, - - #[error("Transaction simulation failed: {message}")] - SimulationFailed { - message: String, - inner_error: EngineError, - }, - - #[error("Gas estimation failed: {message}")] - GasEstimationFailed { message: String }, - - #[error("Fee estimation failed: {message}")] - FeeEstimationFailed { - message: String, - inner_error: EngineError, - }, - - #[error("Nonce assignment failed: {reason}")] - NonceAssignmentFailed { reason: String }, - - #[error("Max in-flight transactions reached for EOA {eoa}: {current}/{max}")] - MaxInFlightReached { - eoa: Address, - current: u32, - max: u32, - }, - - #[error("Transaction send failed: {message}")] - SendFailed { - nonce_used: U256, - message: String, - possibly_sent: bool, // true for "nonce too low" errors - should_retry: bool, - }, - - #[error("EOA health check failed: {reason}")] - UnhealthyEoa { - eoa: Address, - reason: String, - should_resync: bool, - }, - - #[error("Invalid RPC Credentials: {message}")] - InvalidRpcCredentials { message: String }, - - #[error("Internal error: {message}")] - InternalError { message: String }, - - #[error("Transaction cancelled by user")] - UserCancelled, -} - -impl From for EoaSendError { - fn from(error: TwmqError) -> Self { - EoaSendError::InternalError { - message: format!("Queue error: {}", error), - } - } -} - -impl UserCancellable for EoaSendError { - fn user_cancelled() -> Self { - EoaSendError::UserCancelled - } -} - -impl EoaSendError { - /// Returns true if the nonce might have been consumed (used) by this error - pub fn possibly_sent(&self) -> bool { - match self { - EoaSendError::SendFailed { possibly_sent, .. } => *possibly_sent, - _ => false, - } - } -} - -// --- Handler --- -pub struct EoaSendHandler -where - CS: ChainService + Send + Sync + 'static, -{ - pub chain_service: Arc, - pub nonce_manager: Arc, - pub webhook_queue: Arc>, - pub transaction_registry: Arc, - pub eoa_signer: Arc, - pub max_in_flight: u32, - pub confirm_queue: Arc>>, -} - -impl ExecutorStage for EoaSendHandler -where - CS: ChainService + Send + Sync + 'static, -{ - fn executor_name() -> &'static str { - "eoa" - } - - fn stage_name() -> &'static str { - "send" - } -} - -impl WebhookCapable for EoaSendHandler -where - CS: ChainService + Send + Sync + 'static, -{ - fn webhook_queue(&self) -> &Arc> { - &self.webhook_queue - } -} - -impl twmq::DurableExecution for EoaSendHandler -where - CS: ChainService + Send + Sync + 'static, -{ - type Output = EoaSendResult; - type ErrorData = EoaSendError; - type JobData = EoaSendJobData; - - #[tracing::instrument(skip(self, job), fields(transaction_id = job.job.id, stage = Self::stage_name(), executor = Self::executor_name()))] - async fn process( - &self, - job: &BorrowedJob, - ) -> JobResult { - let job_data = &job.job.data; - - // 1. Get Chain - let chain = self - .chain_service - .get_chain(job_data.chain_id) - .map_err(|e| EoaSendError::ChainServiceError { - chain_id: job_data.chain_id, - message: format!("Failed to get chain instance: {}", e), - }) - .map_err_fail()?; - - let chain_auth_headers = job_data - .rpc_credentials - .to_header_map() - .map_err(|e| EoaSendError::InvalidRpcCredentials { - message: e.to_string(), - }) - .map_err_fail()?; - - let chain = chain.with_new_default_headers(chain_auth_headers); - - // 2. Build base transaction request - let mut tx_request = AlloyTransactionRequest::default() - .with_from(job_data.from) - .with_value(job_data.value) - .with_input(job_data.data.clone()) - .with_chain_id(job_data.chain_id); - - if let Some(to) = job_data.to { - tx_request = tx_request.with_to(to); - } - - tx_request = self - .estimate_gas_fees(&chain, tx_request) - .await - .map_err_fail()?; - - // 4. Estimate gas limit if not provided (this also simulates) - if let Some(gas_limit) = job_data.gas_limit { - tx_request = tx_request.with_gas_limit(gas_limit); - } else { - let gas_limit_result = chain.provider().estimate_gas(tx_request.clone()).await; - match gas_limit_result { - Ok(gas_limit) => { - let gas_with_buffer = gas_limit * 110 / 100; // Add 10% buffer - tx_request = tx_request.with_gas_limit(gas_with_buffer); - } - Err(e) => { - return self.handle_simulation_error(e, &chain); - } - } - } - - // 5. Assign nonce (atomic operation with sync fallback) - let assigned_nonce = if let Some(nonce) = job_data.assigned_nonce { - // Retry with previously assigned nonce - nonce - } else { - // First attempt - assign new nonce - match self - .nonce_manager - .assign_nonce(job_data.from, job_data.chain_id) - .await - { - Ok((nonce, _epoch)) => nonce, - Err(super::nonce_manager::NonceManagerError::NeedsSync { .. }) => { - // Need to sync - try to acquire sync lock and sync - return self.handle_sync_required(job_data, &chain).await; - } - Err(e) => { - return self.handle_nonce_assignment_error(e).await; - } - } - }; - - // 6. Apply nonce to final transaction - let final_tx = tx_request - .with_nonce(assigned_nonce) - .build_typed_tx() - .map_err(|e| EoaSendError::InternalError { - message: format!("Failed to build typed transaction: {}", json!(e)), - }) - .map_err_fail()?; - - let gas_limit = final_tx.gas_limit(); - let max_fee_per_gas = final_tx.max_fee_per_gas(); - let max_priority_fee_per_gas = final_tx.max_priority_fee_per_gas(); - - // 7. Sign the transaction - let signing_options = EoaSigningOptions { - from: job_data.from, - chain_id: Some(job_data.chain_id), - }; - - let signature = self - .eoa_signer - .sign_transaction( - signing_options, - final_tx.clone(), - job_data.signing_credential.clone(), - ) - .await - .map_err(|e| EoaSendError::InternalError { - message: format!("Failed to sign transaction: {}", e), - }) - .map_err_fail()?; - - // 8. Create signed transaction envelope - let signed_tx = final_tx.into_signed( - signature - .parse() - .map_err(|e| EoaSendError::InternalError { - message: format!("Failed to parse signature: {}", e), - }) - .map_err_fail()?, - ); - - tracing::debug!( - nonce = %assigned_nonce, - gas_limit = %gas_limit, - max_fee_per_gas = %max_fee_per_gas, - max_priority_fee_per_gas = ?max_priority_fee_per_gas, - "Sending signed transaction" - ); - - let pre_computed_hash = signed_tx.hash(); - - // 9. Send transaction - match chain.provider().send_tx_envelope(signed_tx.into()).await { - Ok(pending_tx) => { - let tx_hash = *pending_tx.tx_hash(); - - tracing::info!( - transaction_hash = %tx_hash, - nonce = %assigned_nonce, - "Transaction sent successfully" - ); - - Ok(EoaSendResult { - transaction_hash: tx_hash, - nonce_used: assigned_nonce, - possibly_duplicate: None, - }) - } - Err(send_error) => { - self.handle_send_error(send_error, assigned_nonce, &chain) - .await - } - } - } - - async fn on_success( - &self, - job: &BorrowedJob, - success_data: SuccessHookData<'_, Self::Output>, - tx: &mut TransactionContext<'_>, - ) { - // 1. Record nonce assignment for tracking - self.nonce_manager.add_nonce_assignment_command( - tx.pipeline(), - job.job.data.from, - job.job.data.chain_id, - success_data.result.nonce_used, - &job.job.data.transaction_id, - success_data.result.transaction_hash, - ); - - // 2. Update transaction registry: move from send to confirm queue - self.transaction_registry.add_set_command( - tx.pipeline(), - &job.job.data.transaction_id, - "eoa_confirm", - ); - - // 3. Queue confirmation job - let confirm_job = self - .confirm_queue - .clone() - .job(EoaConfirmationWorkerJobData { - chain_id: job.job.data.chain_id, - eoa: job.job.data.from, - }) - .with_id(&job.job.data.transaction_id) - .with_delay(DelayOptions { - delay: Duration::from_secs(2), - position: RequeuePosition::Last, - }); - - if let Err(e) = tx.queue_job(confirm_job) { - tracing::error!( - transaction_id = %job.job.data.transaction_id, - error = %e, - "Failed to queue confirmation job" - ); - } - - if let Err(e) = self.queue_success_webhook(job, success_data, tx) { - tracing::error!( - transaction_id = %job.job.data.transaction_id, - error = %e, - "Failed to queue success webhook" - ); - } - } - - async fn on_nack( - &self, - job: &BorrowedJob, - nack_data: NackHookData<'_, Self::ErrorData>, - tx: &mut TransactionContext<'_>, - ) { - // Update health on nack (increment error count) - if matches!( - nack_data.error, - EoaSendError::SimulationFailed { .. } - | EoaSendError::SendFailed { .. } - | EoaSendError::UnhealthyEoa { .. } - ) { - // TODO: Update health error counters - } - - if let Err(e) = self.queue_nack_webhook(job, nack_data, tx) { - tracing::error!( - transaction_id = %job.job.data.transaction_id, - error = %e, - "Failed to queue nack webhook" - ); - } - } - - async fn on_fail( - &self, - job: &BorrowedJob, - fail_data: FailHookData<'_, Self::ErrorData>, - tx: &mut TransactionContext<'_>, - ) { - // Handle nonce recycling based on error type - if let Some(nonce) = job.job.data.assigned_nonce { - let should_recycle = match fail_data.error { - EoaSendError::SendFailed { possibly_sent, .. } => !possibly_sent, - EoaSendError::SimulationFailed { .. } => true, - EoaSendError::GasEstimationFailed { .. } => true, - EoaSendError::NonceAssignmentFailed { .. } => false, // Nonce wasn't assigned - EoaSendError::MaxInFlightReached { .. } => false, // Nonce wasn't assigned - EoaSendError::UnhealthyEoa { should_resync, .. } => *should_resync, - _ => false, - }; - - if should_recycle { - tracing::debug!( - nonce = %nonce, - transaction_id = %job.job.data.transaction_id, - "Recycling nonce after permanent failure" - ); - - self.nonce_manager.add_recycle_nonce_command( - tx.pipeline(), - job.job.data.from, - job.job.data.chain_id, - nonce, - ); - } - } - - // Handle sync triggering for health issues - if let EoaSendError::UnhealthyEoa { - should_resync: true, - .. - } = fail_data.error - { - // TODO: Trigger sync operation - tracing::warn!( - eoa = %job.job.data.from, - chain_id = job.job.data.chain_id, - "EOA health issue detected - sync recommended" - ); - } - - // Remove transaction from registry since it failed permanently - self.transaction_registry - .add_remove_command(tx.pipeline(), &job.job.data.transaction_id); - - if let Err(e) = self.queue_fail_webhook(job, fail_data, tx) { - tracing::error!( - transaction_id = %job.job.data.transaction_id, - error = %e, - "Failed to queue fail webhook" - ); - } - } -} - -// --- Error Handling --- - -impl EoaSendHandler -where - CS: ChainService + Send + Sync + 'static, -{ - fn handle_simulation_error( - &self, - rpc_error: RpcError, - chain: &impl Chain, - ) -> JobResult { - // Check if this is a revert first - if let RpcError::ErrorResp(error_payload) = &rpc_error { - if let Some(revert_data) = error_payload.as_revert_data() { - return Err(EoaSendError::SimulationFailedWithRevert { - message: format!( - "Transaction reverted during simulation: {}", - error_payload.message - ), - revert_reason: Some(error_payload.message.to_string()), - revert_data: Some(revert_data), - } - .fail()); - } - } - - // Try to map actionable errors - match EoaErrorMapper::map_send_error(&rpc_error, chain) { - Ok(eoa_error) => { - let strategy = EoaErrorMapper::get_recovery_strategy(&eoa_error); - eoa_error.to_send_job_result( - &strategy, - || { - // This shouldn't happen for simulation errors, but handle gracefully - EoaSendResult { - transaction_hash: B256::ZERO, - nonce_used: U256::ZERO, - possibly_duplicate: None, - } - }, - |reason| EoaSendError::SimulationFailed { - message: reason.clone(), - }, - ) - } - Err(engine_error) => { - // Use existing engine error handling - Err(EoaSendError::SimulationFailed { - message: engine_error.to_string(), - revert_reason: None, - revert_data: None, - } - .fail()) - } - } - } - - async fn handle_sync_required( - &self, - job_data: &EoaSendJobData, - chain: &impl Chain, - ) -> JobResult { - tracing::info!( - eoa = %job_data.from, - chain_id = job_data.chain_id, - "Sync required - attempting to sync nonce" - ); - - // Get current onchain nonce - let onchain_nonce = match chain.provider().get_transaction_count(job_data.from).await { - Ok(nonce) => nonce.to::(), - Err(e) => { - // Try to map actionable errors, otherwise use engine error handling - match EoaErrorMapper::map_send_error(&e, chain) { - Ok(eoa_error) => { - let strategy = EoaErrorMapper::get_recovery_strategy(&eoa_error); - return eoa_error.to_send_job_result( - &strategy, - || EoaSendResult { - transaction_hash: B256::ZERO, - nonce_used: U256::ZERO, - gas_limit: U256::ZERO, - max_fee_per_gas: U256::ZERO, - max_priority_fee_per_gas: U256::ZERO, - possibly_duplicate: None, - }, - |reason| EoaSendError::ChainServiceError { - chain_id: job_data.chain_id, - message: format!( - "Failed to get onchain nonce for sync: {}", - reason - ), - }, - ); - } - Err(engine_error) => { - // Use existing engine error handling - return Err(EoaSendError::ChainServiceError { - chain_id: job_data.chain_id, - message: format!( - "Failed to get onchain nonce for sync: {}", - engine_error - ), - } - .fail()); - } - } - } - }; - - // Try to acquire sync lock and perform sync - match self - .nonce_manager - .try_sync_nonce(job_data.from, job_data.chain_id, onchain_nonce) - .await - { - Ok(true) => { - // Successfully synced - tracing::info!( - eoa = %job_data.from, - chain_id = job_data.chain_id, - onchain_nonce = %onchain_nonce, - "Successfully synced nonce" - ); - - // Delay and retry the job - Err(EoaSendError::NonceAssignmentFailed { - reason: "Nonce synced - retrying".to_string(), - } - .nack(Some(Duration::from_millis(100)), RequeuePosition::Last)) - } - Ok(false) => { - // Another process is syncing - tracing::debug!( - eoa = %job_data.from, - chain_id = job_data.chain_id, - "Another process is syncing - backing off" - ); - - Err(EoaSendError::NonceAssignmentFailed { - reason: "Sync in progress - backing off".to_string(), - } - .nack(Some(Duration::from_secs(2)), RequeuePosition::Last)) - } - Err(e) => { - tracing::error!( - eoa = %job_data.from, - chain_id = job_data.chain_id, - error = %e, - "Failed to sync nonce" - ); - - Err(EoaSendError::NonceAssignmentFailed { - reason: format!("Sync failed: {}", e), - } - .nack(Some(Duration::from_secs(10)), RequeuePosition::Last)) - } - } - } - - async fn handle_nonce_assignment_error( - &self, - nonce_error: super::nonce_manager::NonceManagerError, - ) -> JobResult { - use super::nonce_manager::NonceManagerError; - - match nonce_error { - NonceManagerError::MaxInFlightReached { eoa, current, max } => { - Err(EoaSendError::MaxInFlightReached { eoa, current, max } - .nack(Some(Duration::from_secs(10)), RequeuePosition::Last)) - } - NonceManagerError::NeedsSync { eoa } => { - // This shouldn't happen since we handle it above, but just in case - Err(EoaSendError::NonceAssignmentFailed { - reason: format!("Unexpected needs sync for EOA {}", eoa), - } - .nack(Some(Duration::from_secs(30)), RequeuePosition::Last)) - } - _ => Err(EoaSendError::NonceAssignmentFailed { - reason: nonce_error.to_string(), - } - .fail()), - } - } - - async fn handle_send_error( - &self, - send_error: RpcError, - nonce: u64, - chain: &impl Chain, - ) -> JobResult { - // Try to map actionable errors, otherwise use engine error handling - match EoaErrorMapper::map_send_error(&send_error, chain) { - Ok(eoa_error) => { - let strategy = EoaErrorMapper::get_recovery_strategy(&eoa_error); - - tracing::debug!( - nonce = %nonce, - error = ?eoa_error, - strategy = ?strategy, - "Mapped send error" - ); - - if strategy.queue_confirmation { - tracing::warn!(nonce = %nonce, message = %eoa_error.message(), "Transaction possibly sent - treating as success"); - - Ok(EoaSendResult { - transaction_hash: B256::ZERO, // Will be resolved in confirmation - nonce_used: nonce, - gas_limit: U256::ZERO, - max_fee_per_gas: U256::ZERO, - max_priority_fee_per_gas: U256::ZERO, - possibly_duplicate: Some(true), - }) - } else { - eoa_error.to_send_job_result( - &strategy, - || EoaSendResult { - transaction_hash: B256::ZERO, - nonce_used: nonce, - gas_limit: U256::ZERO, - max_fee_per_gas: U256::ZERO, - max_priority_fee_per_gas: U256::ZERO, - possibly_duplicate: None, - }, - |reason| EoaSendError::SendFailed { - nonce_used: nonce, - message: reason, - possibly_sent: strategy.queue_confirmation, - should_retry: strategy.retryable, - }, - ) - } - } - Err(engine_error) => { - // Use existing engine error handling - not actionable - tracing::debug!( - nonce = %nonce, - engine_error = ?engine_error, - "Using engine error handling for non-actionable error" - ); - - Err(EoaSendError::SendFailed { - nonce_used: nonce, - message: engine_error.to_string(), - possibly_sent: false, - should_retry: false, - } - .fail()) - } - } - } - - async fn estimate_gas_fees( - &self, - chain: &impl Chain, - tx: AlloyTransactionRequest, - ) -> Result { - // Try EIP-1559 fees first, fall back to legacy if unsupported - match chain.provider().estimate_eip1559_fees().await { - Ok(eip1559_fees) => { - tracing::debug!( - "Using EIP-1559 fees: max_fee={}, max_priority_fee={}", - eip1559_fees.max_fee_per_gas, - eip1559_fees.max_priority_fee_per_gas - ); - Ok(tx - .with_max_fee_per_gas(eip1559_fees.max_fee_per_gas) - .with_max_priority_fee_per_gas(eip1559_fees.max_priority_fee_per_gas)) - } - Err(eip1559_error) => { - // Check if this is an "unsupported feature" error - if let RpcError::UnsupportedFeature(_) = &eip1559_error { - tracing::debug!("EIP-1559 not supported, falling back to legacy gas price"); - - // Fall back to legacy gas price - match chain.provider().get_gas_price().await { - Ok(gas_price) => { - // For legacy transactions, use the gas price - tracing::debug!("Using legacy gas price: {}", gas_price); - Ok(tx.with_gas_price(gas_price)) - } - Err(legacy_error) => Err(EoaSendError::FeeEstimationFailed { - message: format!("Failed to get legacy gas price: {}", legacy_error), - inner_error: legacy_error.to_engine_error(chain), - }), - } - } else { - // Other EIP-1559 error - Err(EoaSendError::FeeEstimationFailed { - message: format!("Failed to estimate EIP-1559 fees: {}", eip1559_error), - inner_error: eip1559_error.to_engine_error(chain), - }) - } - } - } - } -} diff --git a/executors/src/eoa/store.rs b/executors/src/eoa/store.rs new file mode 100644 index 0000000..3a555d6 --- /dev/null +++ b/executors/src/eoa/store.rs @@ -0,0 +1,1792 @@ +use alloy::consensus::{Signed, TypedTransaction}; +use alloy::eips::eip7702::SignedAuthorization; +use alloy::network::AnyTransactionReceipt; +use alloy::primitives::{Address, B256, Bytes, U256}; +use engine_core::chain::RpcCredentials; +use engine_core::credentials::SigningCredential; +use engine_core::execution_options::WebhookOptions; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use twmq::redis::{AsyncCommands, Pipeline, aio::ConnectionManager}; + +/// The actual user request data +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct EoaTransactionRequest { + pub transaction_id: String, + pub chain_id: u64, + + pub from: Address, + pub to: Option
, + pub value: U256, + pub data: Bytes, + + #[serde(alias = "gas")] + pub gas_limit: Option, + + pub webhook_options: Option>, + + pub signing_credential: SigningCredential, + pub rpc_credentials: RpcCredentials, + + #[serde(flatten)] + pub transaction_type_data: Option, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(untagged)] +pub enum EoaTransactionTypeData { + Eip7702(EoaSend7702JobData), + Eip1559(EoaSend1559JobData), + Legacy(EoaSendLegacyJobData), +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct EoaSend7702JobData { + pub authorization_list: Option>, + pub max_fee_per_gas: Option, + pub max_priority_fee_per_gas: Option, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct EoaSend1559JobData { + pub max_fee_per_gas: Option, + pub max_priority_fee_per_gas: Option, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct EoaSendLegacyJobData { + pub gas_price: Option, +} +/// Active attempt for a transaction (full alloy transaction + metadata) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TransactionAttempt { + pub transaction_id: String, + pub details: Signed, + pub sent_at: chrono::DateTime, + pub attempt_number: u32, +} + +/// Transaction data for a transaction_id +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TransactionData { + pub transaction_id: String, + pub user_request: EoaTransactionRequest, + pub receipt: Option, + pub attempts: Vec, +} + +pub struct BorrowedTransaction { + pub transaction_id: String, + pub data: Signed, + pub borrowed_at: chrono::DateTime, +} + +/// Transaction store focused on transaction_id operations and nonce indexing +pub struct EoaExecutorStore { + pub redis: ConnectionManager, + pub namespace: Option, +} + +impl EoaExecutorStore { + pub fn new(redis: ConnectionManager, namespace: Option) -> Self { + Self { redis, namespace } + } + + /// Name of the key for the transaction data + fn transaction_data_key_name(&self, transaction_id: &str) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:tx_data:{transaction_id}"), + None => format!("eoa_tx_data:{transaction_id}"), + } + } + + /// Name of the list for pending transactions + fn pending_transactions_list_name(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:pending_txs:{chain_id}:{eoa}"), + None => format!("eoa_executor:pending_txs:{chain_id}:{eoa}"), + } + } + + /// Name of the zset for submitted transactions. nonce -> hash + /// Same transaction might appear multiple times in the zset with different nonces/gas prices (and thus different hashes) + fn submitted_transactions_zset_name(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:submitted_txs:{chain_id}:{eoa}"), + None => format!("eoa_executor:submitted_txs:{chain_id}:{eoa}"), + } + } + + /// Name of the key that maps transaction hash to transaction id + fn transaction_hash_to_id_key_name(&self, hash: &str) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:tx_hash_to_id:{hash}"), + None => format!("eoa_executor:tx_hash_to_id:{hash}"), + } + } + + /// Name of the hashmap that maps transaction id to borrowed transactions + /// + /// This is used for crash recovery. Before submitting a transaction, we atomically move from pending to this borrowed hashmap. + /// + /// On worker recovery, if any borrowed transactions are found, we rebroadcast them and move back to pending or submitted + /// + /// If there's no crash, happy path moves borrowed transactions back to pending or submitted + fn borrowed_transactions_hashmap_name(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:borrowed_txs:{chain_id}:{eoa}"), + None => format!("eoa_executor:borrowed_txs:{chain_id}:{eoa}"), + } + } + + /// Name of the set that contains recycled nonces. + /// + /// If a transaction was submitted but failed (ie, we know with certainty it didn't enter the mempool), + /// + /// we add the nonce to this set. + /// + /// These nonces are used with priority, before any other nonces. + fn recycled_nonces_set_name(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:recycled_nonces:{chain_id}:{eoa}"), + None => format!("eoa_executor:recycled_nonces:{chain_id}:{eoa}"), + } + } + + /// Optimistic nonce key name. + /// + /// This is used for optimistic nonce tracking. + /// + /// We store the nonce of the last successfuly sent transaction for each EOA. + /// + /// We increment this nonce for each new transaction. + /// + /// !IMPORTANT! When sending a transaction, we use this nonce as the assigned nonce, NOT the incremented nonce. + fn optimistic_transaction_count_key_name(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:optimistic_nonce:{chain_id}:{eoa}"), + None => format!("eoa_executor:optimistic_nonce:{chain_id}:{eoa}"), + } + } + + /// Name of the key that contains the nonce of the last fetched ONCHAIN transaction count for each EOA. + /// + /// This is a cache for the actual transaction count, which is fetched from the RPC. + /// + /// The nonce for the NEXT transaction is the ONCHAIN transaction count (NOT + 1) + /// + /// Eg: transaction count is 0, so we use nonce 0 for sending the next transaction. Once successful, transaction count will be 1. + fn last_transaction_count_key_name(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:last_tx_nonce:{chain_id}:{eoa}"), + None => format!("eoa_executor:last_tx_nonce:{chain_id}:{eoa}"), + } + } + + /// EOA health key name. + /// + /// EOA health stores: + /// - cached balance, the timestamp of the last balance fetch + /// - timestamp of the last successful transaction confirmation + /// - timestamp of the last 5 nonce resets + fn eoa_health_key_name(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:health:{chain_id}:{eoa}"), + None => format!("eoa_executor:health:{chain_id}:{eoa}"), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct EoaHealth { + pub balance: U256, + pub balance_fetched_at: u64, + pub last_confirmation_at: Option, + pub nonce_resets: Vec, // Last 5 reset timestamps +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BorrowedTransactionData { + pub transaction_id: String, + pub signed_transaction: Signed, + pub hash: B256, + pub borrowed_at: u64, +} + +/// Type of nonce allocation for transaction processing +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum NonceType { + /// Nonce was recycled from a previously failed transaction + Recycled(u64), + /// Nonce was incremented from the current optimistic counter + Incremented(u64), +} + +impl NonceType { + /// Get the nonce value regardless of type + pub fn nonce(&self) -> u64 { + match self { + NonceType::Recycled(nonce) => *nonce, + NonceType::Incremented(nonce) => *nonce, + } + } + + /// Check if this is a recycled nonce + pub fn is_recycled(&self) -> bool { + matches!(self, NonceType::Recycled(_)) + } + + /// Check if this is an incremented nonce + pub fn is_incremented(&self) -> bool { + matches!(self, NonceType::Incremented(_)) + } +} + +impl EoaExecutorStore { + // ========== BOILERPLATE REDUCTION PATTERN ========== + // + // This implementation uses a helper method `execute_with_watch_and_retry` to reduce + // boilerplate in atomic Redis operations. The pattern separates: + // 1. Validation phase: async closure that checks preconditions + // 2. Pipeline phase: sync closure that builds Redis commands + // + // Benefits: + // - Eliminates ~80 lines of boilerplate per method + // - Centralizes retry logic, lock checking, and error handling + // - Makes individual methods focus on business logic + // - Reduces chance of bugs in WATCH/MULTI/EXEC handling + // + // See examples in: + // - atomic_move_pending_to_borrowed_with_recycled_nonce_v2() + // - atomic_move_pending_to_borrowed_with_new_nonce() + // - move_borrowed_to_submitted() + // - move_borrowed_to_recycled() + + /// Aggressively acquire EOA lock, forcefully taking over from stalled workers + pub async fn acquire_eoa_lock_aggressively( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + ) -> Result<(), TransactionStoreError> { + let lock_key = self.eoa_lock_key_name(eoa, chain_id); + let mut conn = self.redis.clone(); + + // First try normal acquisition + let acquired: bool = conn.set_nx(&lock_key, worker_id).await?; + if acquired { + return Ok(()); + } + // Lock exists, forcefully take it over + tracing::warn!( + eoa = %eoa, + chain_id = %chain_id, + worker_id = %worker_id, + "Forcefully taking over EOA lock from stalled worker" + ); + // Force set - no expiry, only released by explicit takeover + let _: () = conn.set(&lock_key, worker_id).await?; + Ok(()) + } + + /// Helper to execute atomic operations with proper retry logic and watch handling + /// + /// This helper centralizes all the boilerplate for WATCH/MULTI/EXEC operations: + /// - Retry logic with exponential backoff + /// - Lock ownership validation + /// - WATCH key management + /// - Error handling and UNWATCH cleanup + /// + /// ## When to use this helper: + /// - Simple validation that doesn't need to pass data to pipeline phase + /// - Operations that can cleanly separate validation from pipeline commands + /// - Cases where reducing boilerplate is more important than complex data flow + /// + /// ## When NOT to use this helper: + /// - Complex validation that needs to pass computed data to pipeline + /// - Operations requiring custom retry logic + /// - Cases where validation and pipeline phases are tightly coupled + /// + /// ## Example usage: + /// ``` + /// self.execute_with_watch_and_retry( + /// eoa, chain_id, worker_id, + /// &[key1, key2], // Keys to WATCH + /// "operation name", + /// async |conn| { // Validation phase + /// let data = conn.get("key").await?; + /// if !is_valid(data) { + /// return Err(SomeError); + /// } + /// Ok(()) + /// }, + /// |pipeline| { // Pipeline phase + /// pipeline.set("key", "value"); + /// pipeline.incr("counter", 1); + /// } + /// ).await + /// ``` + async fn execute_with_watch_and_retry( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + watch_keys: &[String], + operation_name: &str, + validation: V, + operation: F, + ) -> Result<(), TransactionStoreError> + where + V: AsyncFn(&mut ConnectionManager) -> Result<(), TransactionStoreError>, + F: Fn(&mut Pipeline), + { + let lock_key = self.eoa_lock_key_name(eoa, chain_id); + let mut conn = self.redis.clone(); + let mut retry_count = 0; + + loop { + if retry_count >= MAX_RETRIES { + return Err(TransactionStoreError::InternalError { + message: format!( + "Exceeded max retries ({}) for {} on {}:{}", + MAX_RETRIES, operation_name, eoa, chain_id + ), + }); + } + + // Exponential backoff after first retry + if retry_count > 0 { + let delay_ms = RETRY_BASE_DELAY_MS * (1 << (retry_count - 1).min(6)); + tokio::time::sleep(tokio::time::Duration::from_millis(delay_ms)).await; + tracing::debug!( + retry_count = retry_count, + delay_ms = delay_ms, + eoa = %eoa, + chain_id = chain_id, + operation = operation_name, + "Retrying atomic operation" + ); + } + + // WATCH all specified keys including lock + let mut watch_cmd = twmq::redis::cmd("WATCH"); + watch_cmd.arg(&lock_key); + for key in watch_keys { + watch_cmd.arg(key); + } + let _: () = watch_cmd.query_async(&mut conn).await?; + + // Check lock ownership + let current_owner: Option = conn.get(&lock_key).await?; + if current_owner.as_deref() != Some(worker_id) { + let _: () = twmq::redis::cmd("UNWATCH").query_async(&mut conn).await?; + return Err(TransactionStoreError::LockLost { + eoa, + chain_id, + worker_id: worker_id.to_string(), + }); + } + + // Execute validation + match validation(&mut conn).await { + Ok(()) => { + // Build and execute pipeline + let mut pipeline = twmq::redis::pipe(); + pipeline.atomic(); + operation(&mut pipeline); + + match pipeline + .query_async::>(&mut conn) + .await + { + Ok(_) => return Ok(()), // Success + Err(_) => { + // WATCH failed, check if it was our lock + let still_own_lock: Option = conn.get(&lock_key).await?; + if still_own_lock.as_deref() != Some(worker_id) { + return Err(TransactionStoreError::LockLost { + eoa, + chain_id, + worker_id: worker_id.to_string(), + }); + } + // State changed, retry + retry_count += 1; + continue; + } + } + } + Err(e) => { + // Validation failed, unwatch and return error + let _: () = twmq::redis::cmd("UNWATCH").query_async(&mut conn).await?; + return Err(e); + } + } + } + } + + /// Example of how to refactor a complex method using the helper to reduce boilerplate + /// This shows the pattern for atomic_move_pending_to_borrowed_with_recycled_nonce + pub async fn atomic_move_pending_to_borrowed_with_recycled_nonce( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + transaction_id: &str, + nonce: u64, + prepared_tx: &BorrowedTransactionData, + ) -> Result<(), TransactionStoreError> { + let recycled_key = self.recycled_nonces_set_name(eoa, chain_id); + let borrowed_key = self.borrowed_transactions_hashmap_name(eoa, chain_id); + let pending_key = self.pending_transactions_list_name(eoa, chain_id); + let prepared_tx_json = serde_json::to_string(prepared_tx)?; + let transaction_id = transaction_id.to_string(); + + let recyled_key_for_validation = recycled_key.clone(); + let pending_key_for_validation = pending_key.clone(); + let transaction_id_for_validation = transaction_id.clone(); + let borrowed_key_for_validation = borrowed_key.clone(); + + self.execute_with_watch_and_retry( + eoa, + chain_id, + worker_id, + &[recycled_key.clone(), pending_key.clone()], + "pending->borrowed with recycled nonce", + async move |conn: &mut ConnectionManager| { + // Validation phase - check preconditions + let nonce_score: Option = conn.zscore(recycled_key.clone(), nonce).await?; + if nonce_score.is_none() { + return Err(TransactionStoreError::NonceNotInRecycledSet { nonce }); + } + + let pending_transactions: Vec = + conn.lrange(pending_key.clone(), 0, -1).await?; + if !pending_transactions.contains(&transaction_id.clone()) { + return Err(TransactionStoreError::TransactionNotInPendingQueue { + transaction_id: transaction_id.clone(), + }); + } + + Ok(()) + }, + |pipeline: &mut Pipeline| { + pipeline.zrem(recyled_key_for_validation, nonce); + pipeline.lrem(pending_key_for_validation, 0, transaction_id_for_validation); + pipeline.hset( + borrowed_key_for_validation, + nonce.to_string(), + &prepared_tx_json, + ); + }, + ) + .await?; + + Ok(()) + } + + /// Atomically move specific transaction from pending to borrowed with new nonce allocation + pub async fn atomic_move_pending_to_borrowed_with_new_nonce( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + transaction_id: &str, + expected_nonce: u64, + prepared_tx: &BorrowedTransactionData, + ) -> Result<(), TransactionStoreError> { + let optimistic_key = self.optimistic_transaction_count_key_name(eoa, chain_id); + let borrowed_key = self.borrowed_transactions_hashmap_name(eoa, chain_id); + let pending_key = self.pending_transactions_list_name(eoa, chain_id); + let prepared_tx_json = serde_json::to_string(prepared_tx)?; + let transaction_id = transaction_id.to_string(); + + self.execute_with_watch_and_retry( + eoa, + chain_id, + worker_id, + &[optimistic_key.clone(), pending_key.clone()], + "pending->borrowed with new nonce", + async move |conn: &mut ConnectionManager| { + // Check current optimistic nonce + let current_optimistic: Option = conn.get(optimistic_key.clone()).await?; + let current_nonce = match current_optimistic { + Some(nonce) => nonce, + None => return Err(TransactionStoreError::NonceSyncRequired { eoa, chain_id }), + }; + + if current_nonce != expected_nonce { + return Err(TransactionStoreError::OptimisticNonceChanged { + expected: expected_nonce, + actual: current_nonce, + }); + } + + // Check if transaction exists in pending + let pending_transactions: Vec = conn.lrange(&pending_key, 0, -1).await?; + if !pending_transactions.contains(&transaction_id.clone()) { + return Err(TransactionStoreError::TransactionNotInPendingQueue { + transaction_id: transaction_id.clone(), + }); + } + + Ok(()) + }, + move |pipeline| { + // Increment optimistic nonce + pipeline.incr(optimistic_key.clone(), 1); + // Remove transaction from pending + pipeline.lrem(pending_key.clone(), 0, transaction_id.clone()); + // Store borrowed transaction + pipeline.hset( + borrowed_key.clone(), + expected_nonce.to_string(), + &prepared_tx_json, + ); + }, + ) + .await + } + + /// Generic helper that handles WATCH + retry logic for atomic operations + /// The operation closure receives a mutable connection and should: + /// 1. Perform any validation (return early errors if needed) + /// 2. Build and execute the pipeline + /// 3. Return the result + pub async fn with_atomic_operation( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + watch_keys: Vec, + operation_name: &str, + operation: F, + ) -> Result + where + F: Fn(&mut ConnectionManager) -> Fut, + Fut: std::future::Future>, + { + let lock_key = self.eoa_lock_key_name(eoa, chain_id); + let mut conn = self.redis.clone(); + let mut retry_count = 0; + + loop { + if retry_count >= MAX_RETRIES { + return Err(TransactionStoreError::InternalError { + message: format!( + "Exceeded max retries ({}) for {} on {}:{}", + MAX_RETRIES, operation_name, eoa, chain_id + ), + }); + } + + // Exponential backoff after first retry + if retry_count > 0 { + let delay_ms = RETRY_BASE_DELAY_MS * (1 << (retry_count - 1).min(6)); + tokio::time::sleep(tokio::time::Duration::from_millis(delay_ms)).await; + tracing::debug!( + retry_count = retry_count, + delay_ms = delay_ms, + eoa = %eoa, + chain_id = chain_id, + operation = operation_name, + "Retrying atomic operation" + ); + } + + // WATCH all specified keys (lock is always included) + let mut watch_cmd = twmq::redis::cmd("WATCH"); + watch_cmd.arg(&lock_key); + for key in &watch_keys { + watch_cmd.arg(key); + } + let _: () = watch_cmd.query_async(&mut conn).await?; + + // Check if we still own the lock + let current_owner: Option = conn.get(&lock_key).await?; + match current_owner { + Some(owner) if owner == worker_id => { + // We still own it, proceed + } + _ => { + // Lost ownership - immediately fail + let _: () = twmq::redis::cmd("UNWATCH").query_async(&mut conn).await?; + return Err(TransactionStoreError::LockLost { + eoa, + chain_id, + worker_id: worker_id.to_string(), + }); + } + } + + // Execute operation (includes validation and pipeline execution) + match operation(&mut conn).await { + Ok(result) => return Ok(result), + Err(TransactionStoreError::LockLost { .. }) => { + // Lock was lost during operation, propagate immediately + return Err(TransactionStoreError::LockLost { + eoa, + chain_id, + worker_id: worker_id.to_string(), + }); + } + Err(TransactionStoreError::WatchFailed) => { + // WATCH failed, check if it was our lock + let still_own_lock: Option = conn.get(&lock_key).await?; + if still_own_lock.as_deref() != Some(worker_id) { + return Err(TransactionStoreError::LockLost { + eoa, + chain_id, + worker_id: worker_id.to_string(), + }); + } + // Our lock is fine, retry + retry_count += 1; + continue; + } + Err(other_error) => { + // Other errors propagate immediately (validation failures, etc.) + let _: () = twmq::redis::cmd("UNWATCH").query_async(&mut conn).await?; + return Err(other_error); + } + } + } + } + + /// Wrapper that executes operations with lock validation using WATCH/MULTI/EXEC + pub async fn with_lock_check( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + operation: F, + ) -> Result + where + F: Fn(&mut Pipeline) -> R, + T: From, + { + let lock_key = self.eoa_lock_key_name(eoa, chain_id); + let mut conn = self.redis.clone(); + let mut retry_count = 0; + + loop { + if retry_count >= MAX_RETRIES { + return Err(TransactionStoreError::InternalError { + message: format!( + "Exceeded max retries ({}) for lock check on {}:{}", + MAX_RETRIES, eoa, chain_id + ), + }); + } + + // Exponential backoff after first retry + if retry_count > 0 { + let delay_ms = RETRY_BASE_DELAY_MS * (1 << (retry_count - 1).min(6)); + tokio::time::sleep(tokio::time::Duration::from_millis(delay_ms)).await; + tracing::debug!( + retry_count = retry_count, + delay_ms = delay_ms, + eoa = %eoa, + chain_id = chain_id, + "Retrying lock check operation" + ); + } + + // WATCH the EOA lock + let _: () = twmq::redis::cmd("WATCH") + .arg(&lock_key) + .query_async(&mut conn) + .await?; + + // Check if we still own the lock + let current_owner: Option = conn.get(&lock_key).await?; + match current_owner { + Some(owner) if owner == worker_id => { + // We still own it, proceed + } + _ => { + // Lost ownership - immediately fail + let _: () = twmq::redis::cmd("UNWATCH").query_async(&mut conn).await?; + return Err(TransactionStoreError::LockLost { + eoa, + chain_id, + worker_id: worker_id.to_string(), + }); + } + } + + // Build pipeline with operation + let mut pipeline = twmq::redis::pipe(); + pipeline.atomic(); + let result = operation(&mut pipeline); + + // Execute with WATCH protection + match pipeline + .query_async::>(&mut conn) + .await + { + Ok(_) => return Ok(T::from(result)), + Err(_) => { + // WATCH failed, check if it was our lock or someone else's + let still_own_lock: Option = conn.get(&lock_key).await?; + if still_own_lock.as_deref() != Some(worker_id) { + return Err(TransactionStoreError::LockLost { + eoa, + chain_id, + worker_id: worker_id.to_string(), + }); + } + // Our lock is fine, someone else's WATCH failed - retry + retry_count += 1; + continue; + } + } + } + } + + // ========== ATOMIC OPERATIONS ========== + + /// Peek all borrowed transactions without removing them + pub async fn peek_borrowed_transactions( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + ) -> Result, TransactionStoreError> { + let borrowed_key = self.borrowed_transactions_hashmap_name(eoa, chain_id); + let mut conn = self.redis.clone(); + + let borrowed_map: HashMap = conn.hgetall(&borrowed_key).await?; + let mut result = Vec::new(); + + for (nonce_str, transaction_json) in borrowed_map { + let borrowed_data: BorrowedTransactionData = serde_json::from_str(&transaction_json)?; + result.push(borrowed_data); + } + + Ok(result) + } + + /// Atomically move borrowed transaction to submitted state + /// Returns error if transaction not found in borrowed state + pub async fn move_borrowed_to_submitted( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + nonce: u64, + hash: &str, + transaction_id: &str, + ) -> Result<(), TransactionStoreError> { + let borrowed_key = self.borrowed_transactions_hashmap_name(eoa, chain_id); + let submitted_key = self.submitted_transactions_zset_name(eoa, chain_id); + let hash_to_id_key = self.transaction_hash_to_id_key_name(hash); + let hash = hash.to_string(); + let transaction_id = transaction_id.to_string(); + + self.execute_with_watch_and_retry( + eoa, + chain_id, + worker_id, + &[borrowed_key.clone()], + "borrowed->submitted", + async |conn: &mut ConnectionManager| { + // Validate that borrowed transaction actually exists + let borrowed_tx: Option = + conn.hget(&borrowed_key, nonce.to_string()).await?; + if borrowed_tx.is_none() { + return Err(TransactionStoreError::TransactionNotInBorrowedState { + transaction_id: transaction_id.clone(), + nonce, + }); + } + + Ok(()) + }, + |pipeline| { + // Remove from borrowed (we know it exists) + pipeline.hdel(&borrowed_key, nonce.to_string()); + + // Add to submitted + pipeline.zadd(&submitted_key, &hash, nonce); + + // Map hash to transaction ID + pipeline.set(&hash_to_id_key, &transaction_id); + }, + ) + .await + } + + /// Atomically move borrowed transaction back to recycled nonces and pending queue + /// Returns error if transaction not found in borrowed state + pub async fn move_borrowed_to_recycled( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + nonce: u64, + transaction_id: &str, + ) -> Result<(), TransactionStoreError> { + let borrowed_key = self.borrowed_transactions_hashmap_name(eoa, chain_id); + let recycled_key = self.recycled_nonces_set_name(eoa, chain_id); + let pending_key = self.pending_transactions_list_name(eoa, chain_id); + let transaction_id = transaction_id.to_string(); + + self.execute_with_watch_and_retry( + eoa, + chain_id, + worker_id, + &[borrowed_key.clone()], + "borrowed->recycled", + async |conn: &mut ConnectionManager| { + // Validate that borrowed transaction actually exists + let borrowed_tx: Option = + conn.hget(&borrowed_key, nonce.to_string()).await?; + if borrowed_tx.is_none() { + return Err(TransactionStoreError::TransactionNotInBorrowedState { + transaction_id: transaction_id.clone(), + nonce, + }); + } + + Ok(()) + }, + |pipeline| { + let now = chrono::Utc::now().timestamp_millis(); + + // Remove from borrowed (we know it exists) + pipeline.hdel(&borrowed_key, nonce.to_string()); + + // Add nonce to recycled set (with timestamp as score) + pipeline.zadd(&recycled_key, nonce, now); + + // Add transaction back to pending + pipeline.lpush(&pending_key, &transaction_id); + }, + ) + .await + } + + /// Get all hashes below a certain nonce from submitted transactions + pub async fn get_hashes_below_nonce( + &self, + eoa: Address, + chain_id: u64, + below_nonce: u64, + ) -> Result, TransactionStoreError> { + let submitted_key = self.submitted_transactions_zset_name(eoa, chain_id); + let mut conn = self.redis.clone(); + + // Get all entries with nonce < below_nonce + let results: Vec<(String, u64)> = conn + .zrangebyscore_withscores(&submitted_key, 0, below_nonce - 1) + .await?; + + Ok(results + .into_iter() + .map(|(hash, nonce)| (nonce, hash)) + .collect()) + } + + /// Remove all hashes for a transaction and requeue it + /// Returns error if no hashes found for this transaction in submitted state + /// NOTE: This method keeps the original boilerplate pattern because it needs to pass + /// complex data (transaction_hashes) from validation to pipeline phase. + /// The helper pattern works best for simple validation that doesn't need to pass data. + pub async fn fail_and_requeue_transaction( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + transaction_id: &str, + ) -> Result<(), TransactionStoreError> { + let submitted_key = self.submitted_transactions_zset_name(eoa, chain_id); + let pending_key = self.pending_transactions_list_name(eoa, chain_id); + let lock_key = self.eoa_lock_key_name(eoa, chain_id); + let mut conn = self.redis.clone(); + let mut retry_count = 0; + + loop { + if retry_count >= MAX_RETRIES { + return Err(TransactionStoreError::InternalError { + message: format!( + "Exceeded max retries ({}) for fail and requeue transaction {}:{} tx:{}", + MAX_RETRIES, eoa, chain_id, transaction_id + ), + }); + } + + if retry_count > 0 { + let delay_ms = RETRY_BASE_DELAY_MS * (1 << (retry_count - 1).min(6)); + tokio::time::sleep(tokio::time::Duration::from_millis(delay_ms)).await; + } + + // WATCH lock and submitted state + let _: () = twmq::redis::cmd("WATCH") + .arg(&lock_key) + .arg(&submitted_key) + .query_async(&mut conn) + .await?; + + // Check lock ownership + let current_owner: Option = conn.get(&lock_key).await?; + if current_owner.as_deref() != Some(worker_id) { + let _: () = twmq::redis::cmd("UNWATCH").query_async(&mut conn).await?; + return Err(TransactionStoreError::LockLost { + eoa, + chain_id, + worker_id: worker_id.to_string(), + }); + } + + // Find all hashes for this transaction that actually exist in submitted + let all_hashes: Vec = conn.zrange(&submitted_key, 0, -1).await?; + let mut transaction_hashes = Vec::new(); + + for hash in all_hashes { + if let Some(tx_id) = self.get_transaction_id_for_hash(&hash).await? { + if tx_id == transaction_id { + transaction_hashes.push(hash); + } + } + } + + if transaction_hashes.is_empty() { + let _: () = twmq::redis::cmd("UNWATCH").query_async(&mut conn).await?; + return Err(TransactionStoreError::TransactionNotInSubmittedState { + transaction_id: transaction_id.to_string(), + }); + } + + // Transaction has hashes in submitted, proceed with atomic removal and requeue + let mut pipeline = twmq::redis::pipe(); + pipeline.atomic(); + + // Remove all hashes for this transaction (we know they exist) + for hash in &transaction_hashes { + pipeline.zrem(&submitted_key, hash); + let hash_to_id_key = self.transaction_hash_to_id_key_name(hash); + pipeline.del(&hash_to_id_key); + } + + // Add back to pending + pipeline.lpush(&pending_key, transaction_id); + + match pipeline + .query_async::>(&mut conn) + .await + { + Ok(_) => return Ok(()), // Success + Err(_) => { + // WATCH failed, check if it was our lock + let still_own_lock: Option = conn.get(&lock_key).await?; + if still_own_lock.as_deref() != Some(worker_id) { + return Err(TransactionStoreError::LockLost { + eoa, + chain_id, + worker_id: worker_id.to_string(), + }); + } + // Submitted state changed, retry + retry_count += 1; + continue; + } + } + } + } + + /// Check EOA health (balance, etc.) + pub async fn check_eoa_health( + &self, + eoa: Address, + chain_id: u64, + ) -> Result, TransactionStoreError> { + let health_key = self.eoa_health_key_name(eoa, chain_id); + let mut conn = self.redis.clone(); + + let health_json: Option = conn.get(&health_key).await?; + if let Some(json) = health_json { + let health: EoaHealth = serde_json::from_str(&json)?; + Ok(Some(health)) + } else { + Ok(None) + } + } + + /// Update EOA health data + pub async fn update_health_data( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + health: &EoaHealth, + ) -> Result<(), TransactionStoreError> { + self.with_lock_check(eoa, chain_id, worker_id, |pipeline| { + let health_key = self.eoa_health_key_name(eoa, chain_id); + let health_json = serde_json::to_string(health).unwrap(); + pipeline.set(&health_key, health_json); + }) + .await + } + + /// Update cached transaction count + pub async fn update_cached_transaction_count( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + transaction_count: u64, + ) -> Result<(), TransactionStoreError> { + self.with_lock_check(eoa, chain_id, worker_id, |pipeline| { + let tx_count_key = self.last_transaction_count_key_name(eoa, chain_id); + pipeline.set(&tx_count_key, transaction_count); + }) + .await + } + + /// Peek recycled nonces without removing them + pub async fn peek_recycled_nonces( + &self, + eoa: Address, + chain_id: u64, + ) -> Result, TransactionStoreError> { + let recycled_key = self.recycled_nonces_set_name(eoa, chain_id); + let mut conn = self.redis.clone(); + + // Get all nonces ordered by score (timestamp) + let nonces: Vec = conn.zrange(&recycled_key, 0, -1).await?; + Ok(nonces) + } + + /// Nuke all recycled nonces + pub async fn nuke_recycled_nonces( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + ) -> Result<(), TransactionStoreError> { + self.with_lock_check(eoa, chain_id, worker_id, |pipeline| { + let recycled_key = self.recycled_nonces_set_name(eoa, chain_id); + pipeline.del(&recycled_key); + }) + .await + } + + /// Peek at pending transactions without removing them (safe for planning) + pub async fn peek_pending_transactions( + &self, + eoa: Address, + chain_id: u64, + limit: u64, + ) -> Result, TransactionStoreError> { + let pending_key = self.pending_transactions_list_name(eoa, chain_id); + let mut conn = self.redis.clone(); + + // Use LRANGE to peek without removing + let transaction_ids: Vec = + conn.lrange(&pending_key, 0, (limit as isize) - 1).await?; + Ok(transaction_ids) + } + + /// Get inflight budget (how many new transactions can be sent) + pub async fn get_inflight_budget( + &self, + eoa: Address, + chain_id: u64, + max_inflight: u64, + ) -> Result { + let optimistic_key = self.optimistic_transaction_count_key_name(eoa, chain_id); + let last_tx_count_key = self.last_transaction_count_key_name(eoa, chain_id); + let mut conn = self.redis.clone(); + + let optimistic_nonce: Option = conn.get(&optimistic_key).await?; + let last_tx_count: Option = conn.get(&last_tx_count_key).await?; + + let optimistic = match optimistic_nonce { + Some(nonce) => nonce, + None => return Err(TransactionStoreError::NonceSyncRequired { eoa, chain_id }), + }; + let last_count = match last_tx_count { + Some(count) => count, + None => return Err(TransactionStoreError::NonceSyncRequired { eoa, chain_id }), + }; + + let current_inflight = optimistic.saturating_sub(last_count); + let available_budget = max_inflight.saturating_sub(current_inflight); + + Ok(available_budget) + } + + /// Get current optimistic nonce (without incrementing) + pub async fn get_optimistic_nonce( + &self, + eoa: Address, + chain_id: u64, + ) -> Result { + let optimistic_key = self.optimistic_transaction_count_key_name(eoa, chain_id); + let mut conn = self.redis.clone(); + + let current: Option = conn.get(&optimistic_key).await?; + match current { + Some(nonce) => Ok(nonce), + None => Err(TransactionStoreError::NonceSyncRequired { eoa, chain_id }), + } + } + + /// Complete safe transaction processing flow combining all atomic operations + /// Returns (success, used_recycled_nonce, actual_nonce) + /// + /// On specific failures (nonce not available, transaction not in pending), + /// returns success=false. On other errors, propagates the error. + pub async fn process_transaction_atomically( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + transaction_id: &str, + signed_tx: &Signed, + ) -> Result<(bool, bool, Option), TransactionStoreError> { + // Prepare borrowed transaction data + let borrowed_data = BorrowedTransactionData { + transaction_id: transaction_id.to_string(), + signed_transaction: signed_tx.clone(), + hash: signed_tx.hash().clone(), + borrowed_at: chrono::Utc::now().timestamp_millis() as u64, + }; + + // Try recycled nonces first + let recycled_nonces = self.peek_recycled_nonces(eoa, chain_id).await?; + if let Some(&nonce) = recycled_nonces.first() { + match self + .atomic_move_pending_to_borrowed_with_recycled_nonce( + eoa, + chain_id, + worker_id, + transaction_id, + nonce, + &borrowed_data, + ) + .await + { + Ok(()) => return Ok((true, true, Some(nonce))), // Success with recycled nonce + Err(TransactionStoreError::NonceNotInRecycledSet { .. }) => { + // Nonce was consumed by another worker, try new nonce + } + Err(TransactionStoreError::TransactionNotInPendingQueue { .. }) => { + // Transaction was processed by another worker + return Ok((false, false, None)); + } + Err(e) => return Err(e), // Other errors propagate + } + } + + // Try new nonce + let expected_nonce = self.get_optimistic_nonce(eoa, chain_id).await?; + match self + .atomic_move_pending_to_borrowed_with_new_nonce( + eoa, + chain_id, + worker_id, + transaction_id, + expected_nonce, + &borrowed_data, + ) + .await + { + Ok(()) => Ok((true, false, Some(expected_nonce))), // Success with new nonce + Err(TransactionStoreError::OptimisticNonceChanged { .. }) => { + // Nonce changed while we were processing, try again + Ok((false, false, None)) + } + Err(TransactionStoreError::TransactionNotInPendingQueue { .. }) => { + // Transaction was processed by another worker + Ok((false, false, None)) + } + Err(e) => Err(e), // Other errors propagate + } + } + + /// Lock key name for EOA processing + fn eoa_lock_key_name(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:lock:{chain_id}:{eoa}"), + None => format!("eoa_executor:lock:{chain_id}:{eoa}"), + } + } + + /// Get transaction ID for a given hash + pub async fn get_transaction_id_for_hash( + &self, + hash: &str, + ) -> Result, TransactionStoreError> { + let hash_to_id_key = self.transaction_hash_to_id_key_name(hash); + let mut conn = self.redis.clone(); + + let transaction_id: Option = conn.get(&hash_to_id_key).await?; + Ok(transaction_id) + } + + /// Get transaction data by transaction ID + pub async fn get_transaction_data( + &self, + transaction_id: &str, + ) -> Result, TransactionStoreError> { + let tx_data_key = self.transaction_data_key_name(transaction_id); + let mut conn = self.redis.clone(); + + // Get the hash data (the transaction data is stored as a hash) + let hash_data: HashMap = conn.hgetall(&tx_data_key).await?; + + if hash_data.is_empty() { + return Ok(None); + } + + // Extract user_request from the hash data + let user_request_json = hash_data.get("user_request").ok_or_else(|| { + TransactionStoreError::TransactionNotFound { + transaction_id: transaction_id.to_string(), + } + })?; + + let user_request: EoaTransactionRequest = serde_json::from_str(user_request_json)?; + + // Extract receipt if present + let receipt = hash_data + .get("receipt") + .and_then(|receipt_str| serde_json::from_str(receipt_str).ok()); + + // Extract attempts if present (could be multiple attempt_N fields) + let mut attempts = Vec::new(); + for (key, value) in &hash_data { + if key.starts_with("attempt_") { + if let Ok(attempt) = serde_json::from_str::(value) { + attempts.push(attempt); + } + } + } + + Ok(Some(TransactionData { + transaction_id: transaction_id.to_string(), + user_request, + receipt, + attempts, + })) + } + + /// Mark transaction as successful and remove from submitted + pub async fn succeed_transaction( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + transaction_id: &str, + hash: &str, + receipt: &str, + ) -> Result<(), TransactionStoreError> { + self.with_lock_check(eoa, chain_id, worker_id, |pipeline| { + let submitted_key = self.submitted_transactions_zset_name(eoa, chain_id); + let hash_to_id_key = self.transaction_hash_to_id_key_name(hash); + let tx_data_key = self.transaction_data_key_name(transaction_id); + let now = chrono::Utc::now(); + + // Remove this hash from submitted + pipeline.zrem(&submitted_key, hash); + + // Remove hash mapping + pipeline.del(&hash_to_id_key); + + // Update transaction data with success + pipeline.hset(&tx_data_key, "completed_at", now.timestamp()); + pipeline.hset(&tx_data_key, "receipt", receipt); + pipeline.hset(&tx_data_key, "status", "confirmed"); + }) + .await + } + + /// Add a gas bump attempt (new hash) to submitted transactions + pub async fn add_gas_bump_attempt( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + nonce: u64, + new_hash: &str, + transaction_id: &str, + attempt_number: u32, + ) -> Result<(), TransactionStoreError> { + self.with_lock_check(eoa, chain_id, worker_id, |pipeline| { + let submitted_key = self.submitted_transactions_zset_name(eoa, chain_id); + let hash_to_id_key = self.transaction_hash_to_id_key_name(new_hash); + let tx_data_key = self.transaction_data_key_name(transaction_id); + + // Add new hash to submitted (keeping old ones) + pipeline.zadd(&submitted_key, new_hash, nonce); + + // Map new hash to transaction ID + pipeline.set(&hash_to_id_key, transaction_id); + + // Record gas bump attempt + let now = chrono::Utc::now(); + let attempt_json = serde_json::json!({ + "attempt_number": attempt_number, + "hash": new_hash, + "gas_bumped_at": now.timestamp(), + "nonce": nonce, + "type": "gas_bump" + }); + pipeline.hset( + &tx_data_key, + format!("attempt_{}", attempt_number), + attempt_json.to_string(), + ); + }) + .await + } + + // ========== SEND FLOW ========== + + /// Get cached transaction count + pub async fn get_cached_transaction_count( + &self, + eoa: Address, + chain_id: u64, + ) -> Result { + let tx_count_key = self.last_transaction_count_key_name(eoa, chain_id); + let mut conn = self.redis.clone(); + + let count: Option = conn.get(&tx_count_key).await?; + match count { + Some(count) => Ok(count), + None => Err(TransactionStoreError::NonceSyncRequired { eoa, chain_id }), + } + } + + /// Peek next available nonce (recycled or new) + pub async fn peek_next_available_nonce( + &self, + eoa: Address, + chain_id: u64, + ) -> Result { + // Check recycled nonces first + let recycled = self.peek_recycled_nonces(eoa, chain_id).await?; + if !recycled.is_empty() { + return Ok(NonceType::Recycled(recycled[0])); + } + + // Get next optimistic nonce + let optimistic_key = self.optimistic_transaction_count_key_name(eoa, chain_id); + let mut conn = self.redis.clone(); + let current_optimistic: Option = conn.get(&optimistic_key).await?; + + match current_optimistic { + Some(nonce) => Ok(NonceType::Incremented(nonce)), + None => Err(TransactionStoreError::NonceSyncRequired { eoa, chain_id }), + } + } +} + +// Additional error types +#[derive(Debug, thiserror::Error)] +pub enum TransactionStoreError { + #[error("Redis error: {0}")] + RedisError(#[from] twmq::redis::RedisError), + + #[error("Serialization error: {0}")] + SerializationError(#[from] serde_json::Error), + + #[error("Transaction not found: {transaction_id}")] + TransactionNotFound { transaction_id: String }, + + #[error("Lost EOA lock: {eoa}:{chain_id} worker: {worker_id}")] + LockLost { + eoa: Address, + chain_id: u64, + worker_id: String, + }, + + #[error("Internal error - worker should quit: {message}")] + InternalError { message: String }, + + #[error("Transaction {transaction_id} not in borrowed state for nonce {nonce}")] + TransactionNotInBorrowedState { transaction_id: String, nonce: u64 }, + + #[error("Hash {hash} not found in submitted transactions")] + HashNotInSubmittedState { hash: String }, + + #[error("Transaction {transaction_id} has no hashes in submitted state")] + TransactionNotInSubmittedState { transaction_id: String }, + + #[error("Nonce {nonce} not available in recycled set")] + NonceNotInRecycledSet { nonce: u64 }, + + #[error("Transaction {transaction_id} not found in pending queue")] + TransactionNotInPendingQueue { transaction_id: String }, + + #[error("Optimistic nonce changed: expected {expected}, found {actual}")] + OptimisticNonceChanged { expected: u64, actual: u64 }, + + #[error("WATCH failed - state changed during operation")] + WatchFailed, + + #[error( + "Nonce synchronization required for {eoa}:{chain_id} - no cached transaction count available" + )] + NonceSyncRequired { eoa: Address, chain_id: u64 }, +} + +const MAX_RETRIES: u32 = 10; +const RETRY_BASE_DELAY_MS: u64 = 10; + +/// Scoped transaction store for a specific EOA, chain, and worker +/// +/// This wrapper eliminates the need to repeatedly pass EOA, chain_id, and worker_id +/// to every method call. It provides the same interface as TransactionStore but with +/// these parameters already bound. +/// +/// ## Usage: +/// ```rust +/// let scoped = ScopedTransactionStore::build(store, eoa, chain_id, worker_id).await?; +/// +/// // Much cleaner method calls: +/// scoped.peek_pending_transactions(limit).await?; +/// scoped.move_borrowed_to_submitted(nonce, hash, tx_id, attempt).await?; +/// ``` +pub struct ScopedEoaExecutorStore<'a> { + store: &'a EoaExecutorStore, + eoa: Address, + chain_id: u64, + worker_id: String, +} + +impl<'a> ScopedEoaExecutorStore<'a> { + /// Build a scoped transaction store for a specific EOA, chain, and worker + /// + /// This validates that the worker currently owns the lock for the given EOA/chain. + /// If the lock is not owned, returns a LockLost error. + pub async fn build( + store: &'a EoaExecutorStore, + eoa: Address, + chain_id: u64, + worker_id: String, + ) -> Result { + let lock_key = store.eoa_lock_key_name(eoa, chain_id); + let mut conn = store.redis.clone(); + + // Verify the worker owns the lock + let current_owner: Option = conn.get(&lock_key).await?; + if current_owner.as_deref() != Some(&worker_id) { + return Err(TransactionStoreError::LockLost { + eoa, + chain_id, + worker_id, + }); + } + + Ok(Self { + store, + eoa, + chain_id, + worker_id, + }) + } + + /// Create a scoped store without lock validation (for read-only operations) + pub fn new_unchecked( + store: &'a EoaExecutorStore, + eoa: Address, + chain_id: u64, + worker_id: String, + ) -> Self { + Self { + store, + eoa, + chain_id, + worker_id, + } + } + + // ========== ATOMIC OPERATIONS ========== + + /// Atomically move specific transaction from pending to borrowed with recycled nonce allocation + pub async fn atomic_move_pending_to_borrowed_with_recycled_nonce( + &self, + transaction_id: &str, + nonce: u64, + prepared_tx: &BorrowedTransactionData, + ) -> Result<(), TransactionStoreError> { + self.store + .atomic_move_pending_to_borrowed_with_recycled_nonce( + self.eoa, + self.chain_id, + &self.worker_id, + transaction_id, + nonce, + prepared_tx, + ) + .await + } + + /// Atomically move specific transaction from pending to borrowed with new nonce allocation + pub async fn atomic_move_pending_to_borrowed_with_new_nonce( + &self, + transaction_id: &str, + expected_nonce: u64, + prepared_tx: &BorrowedTransactionData, + ) -> Result<(), TransactionStoreError> { + self.store + .atomic_move_pending_to_borrowed_with_new_nonce( + self.eoa, + self.chain_id, + &self.worker_id, + transaction_id, + expected_nonce, + prepared_tx, + ) + .await + } + + /// Peek all borrowed transactions without removing them + pub async fn peek_borrowed_transactions( + &self, + ) -> Result, TransactionStoreError> { + self.store + .peek_borrowed_transactions(self.eoa, self.chain_id, &self.worker_id) + .await + } + + /// Atomically move borrowed transaction to submitted state + pub async fn move_borrowed_to_submitted( + &self, + nonce: u64, + hash: &str, + transaction_id: &str, + ) -> Result<(), TransactionStoreError> { + self.store + .move_borrowed_to_submitted( + self.eoa, + self.chain_id, + &self.worker_id, + nonce, + hash, + transaction_id, + ) + .await + } + + /// Atomically move borrowed transaction back to recycled nonces and pending queue + pub async fn move_borrowed_to_recycled( + &self, + nonce: u64, + transaction_id: &str, + ) -> Result<(), TransactionStoreError> { + self.store + .move_borrowed_to_recycled( + self.eoa, + self.chain_id, + &self.worker_id, + nonce, + transaction_id, + ) + .await + } + + /// Get all hashes below a certain nonce from submitted transactions + pub async fn get_hashes_below_nonce( + &self, + below_nonce: u64, + ) -> Result, TransactionStoreError> { + self.store + .get_hashes_below_nonce(self.eoa, self.chain_id, below_nonce) + .await + } + + /// Remove all hashes for a transaction and requeue it + pub async fn fail_and_requeue_transaction( + &self, + transaction_id: &str, + ) -> Result<(), TransactionStoreError> { + self.store + .fail_and_requeue_transaction(self.eoa, self.chain_id, &self.worker_id, transaction_id) + .await + } + + // ========== EOA HEALTH & NONCE MANAGEMENT ========== + + /// Check EOA health (balance, etc.) + pub async fn check_eoa_health(&self) -> Result, TransactionStoreError> { + self.store.check_eoa_health(self.eoa, self.chain_id).await + } + + /// Update EOA health data + pub async fn update_health_data( + &self, + health: &EoaHealth, + ) -> Result<(), TransactionStoreError> { + self.store + .update_health_data(self.eoa, self.chain_id, &self.worker_id, health) + .await + } + + /// Update cached transaction count + pub async fn update_cached_transaction_count( + &self, + transaction_count: u64, + ) -> Result<(), TransactionStoreError> { + self.store + .update_cached_transaction_count( + self.eoa, + self.chain_id, + &self.worker_id, + transaction_count, + ) + .await + } + + /// Peek recycled nonces without removing them + pub async fn peek_recycled_nonces(&self) -> Result, TransactionStoreError> { + self.store + .peek_recycled_nonces(self.eoa, self.chain_id) + .await + } + + /// Nuke all recycled nonces + pub async fn nuke_recycled_nonces(&self) -> Result<(), TransactionStoreError> { + self.store + .nuke_recycled_nonces(self.eoa, self.chain_id, &self.worker_id) + .await + } + + /// Peek at pending transactions without removing them + pub async fn peek_pending_transactions( + &self, + limit: u64, + ) -> Result, TransactionStoreError> { + self.store + .peek_pending_transactions(self.eoa, self.chain_id, limit) + .await + } + + /// Get inflight budget (how many new transactions can be sent) + pub async fn get_inflight_budget( + &self, + max_inflight: u64, + ) -> Result { + self.store + .get_inflight_budget(self.eoa, self.chain_id, max_inflight) + .await + } + + /// Get current optimistic nonce (without incrementing) + pub async fn get_optimistic_nonce(&self) -> Result { + self.store + .get_optimistic_nonce(self.eoa, self.chain_id) + .await + } + + /// Complete safe transaction processing flow combining all atomic operations + pub async fn process_transaction_atomically( + &self, + transaction_id: &str, + signed_tx: &Signed, + ) -> Result<(bool, bool, Option), TransactionStoreError> { + self.store + .process_transaction_atomically( + self.eoa, + self.chain_id, + &self.worker_id, + transaction_id, + signed_tx, + ) + .await + } + + /// Mark transaction as successful and remove from submitted + pub async fn succeed_transaction( + &self, + transaction_id: &str, + hash: &str, + receipt: &str, + ) -> Result<(), TransactionStoreError> { + self.store + .succeed_transaction( + self.eoa, + self.chain_id, + &self.worker_id, + transaction_id, + hash, + receipt, + ) + .await + } + + /// Add a gas bump attempt (new hash) to submitted transactions + pub async fn add_gas_bump_attempt( + &self, + nonce: u64, + new_hash: &str, + transaction_id: &str, + attempt_number: u32, + ) -> Result<(), TransactionStoreError> { + self.store + .add_gas_bump_attempt( + self.eoa, + self.chain_id, + &self.worker_id, + nonce, + new_hash, + transaction_id, + attempt_number, + ) + .await + } + + // ========== READ-ONLY OPERATIONS ========== + + /// Get cached transaction count + pub async fn get_cached_transaction_count(&self) -> Result { + self.store + .get_cached_transaction_count(self.eoa, self.chain_id) + .await + } + + /// Peek next available nonce (recycled or new) + pub async fn peek_next_available_nonce(&self) -> Result { + self.store + .peek_next_available_nonce(self.eoa, self.chain_id) + .await + } + + // ========== ACCESSORS ========== + + /// Get the EOA address this store is scoped to + pub fn eoa(&self) -> Address { + self.eoa + } + + /// Get the chain ID this store is scoped to + pub fn chain_id(&self) -> u64 { + self.chain_id + } + + /// Get the worker ID this store is scoped to + pub fn worker_id(&self) -> &str { + &self.worker_id + } + + /// Get a reference to the underlying transaction store + pub fn inner(&self) -> &EoaExecutorStore { + self.store + } + + /// Get transaction data by transaction ID + pub async fn get_transaction_data( + &self, + transaction_id: &str, + ) -> Result, TransactionStoreError> { + self.store.get_transaction_data(transaction_id).await + } +} diff --git a/executors/src/eoa/transaction_store.rs b/executors/src/eoa/transaction_store.rs deleted file mode 100644 index b5e5330..0000000 --- a/executors/src/eoa/transaction_store.rs +++ /dev/null @@ -1,445 +0,0 @@ -use alloy::consensus::{Receipt, Transaction}; -use alloy::network::AnyTransactionReceipt; -use alloy::primitives::{Address, B256, U256}; -use alloy::rpc::types::{TransactionReceipt, TransactionRequest}; -use serde::{Deserialize, Serialize}; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use thiserror::Error; -use twmq::redis::{AsyncCommands, Pipeline, aio::ConnectionManager}; - -#[derive(Debug, Error)] -pub enum TransactionStoreError { - #[error("Redis error: {0}")] - RedisError(#[from] twmq::redis::RedisError), - - #[error("Serialization error: {0}")] - SerializationError(#[from] serde_json::Error), - - #[error("Transaction not found: {transaction_id}")] - TransactionNotFound { transaction_id: String }, -} - -/// Initial transaction data from user request -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct TransactionData { - pub transaction_id: String, - pub eoa: Address, - pub chain_id: u64, - pub to: Option
, - pub value: U256, - pub data: Vec, - pub gas_limit: Option, - pub created_at: u64, -} - -/// Active attempt for a transaction (full alloy transaction + metadata) -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ActiveAttempt { - pub transaction_id: String, - pub nonce: U256, - pub transaction_hash: B256, - pub alloy_transaction: TransactionRequest, // Full serializable alloy transaction - pub sent_at: u64, - pub attempt_number: u32, -} - -impl ActiveAttempt { - /// Get the queue job ID for this attempt (includes attempt number) - pub fn queue_job_id(&self) -> String { - format!("{}_{}", self.transaction_id, self.attempt_number) - } -} - -/// Confirmation data for a successful transaction -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ConfirmationData { - pub transaction_hash: B256, - pub confirmed_at: u64, - pub receipt: TransactionReceipt, -} - -/// Transaction store focused on transaction_id operations and nonce indexing -pub struct TransactionStore { - pub redis: ConnectionManager, - pub namespace: Option, -} - -impl TransactionStore { - pub fn new(redis: ConnectionManager, namespace: Option) -> Self { - Self { redis, namespace } - } - - // Redis key methods - fn transaction_data_key(&self, transaction_id: &str) -> String { - match &self.namespace { - Some(ns) => format!("{}:eoa_tx_data:{}", ns, transaction_id), - None => format!("eoa_tx_data:{}", transaction_id), - } - } - - fn active_attempt_key(&self, transaction_id: &str) -> String { - match &self.namespace { - Some(ns) => format!("{}:eoa_active_attempt:{}", ns, transaction_id), - None => format!("eoa_active_attempt:{}", transaction_id), - } - } - - fn nonce_to_transactions_key(&self, eoa: Address, chain_id: u64, nonce: U256) -> String { - match &self.namespace { - Some(ns) => format!("{}:eoa_nonce_to_txs:{}:{}:{}", ns, chain_id, eoa, nonce), - None => format!("eoa_nonce_to_txs:{}:{}:{}", chain_id, eoa, nonce), - } - } - - fn eoa_active_transactions_key(&self, eoa: Address, chain_id: u64) -> String { - match &self.namespace { - Some(ns) => format!("{}:eoa_active_txs:{}:{}", ns, chain_id, eoa), - None => format!("eoa_active_txs:{}:{}", chain_id, eoa), - } - } - - fn confirmation_key(&self, transaction_id: &str) -> String { - match &self.namespace { - Some(ns) => format!("{}:eoa_confirmation:{}", ns, transaction_id), - None => format!("eoa_confirmation:{}", transaction_id), - } - } - - fn attempt_counter_key(&self, transaction_id: &str) -> String { - match &self.namespace { - Some(ns) => format!("{}:eoa_attempt_counter:{}", ns, transaction_id), - None => format!("eoa_attempt_counter:{}", transaction_id), - } - } - - /// Store initial transaction data - pub async fn store_transaction_data( - &self, - transaction_data: &TransactionData, - ) -> Result<(), TransactionStoreError> { - let mut conn = self.redis.clone(); - let data_key = self.transaction_data_key(&transaction_data.transaction_id); - let active_key = - self.eoa_active_transactions_key(transaction_data.eoa, transaction_data.chain_id); - - let data_json = serde_json::to_string(transaction_data)?; - - // Store transaction data - let _: () = conn.set(&data_key, data_json).await?; - - // Add to active transactions set - let _: () = conn - .sadd(&active_key, &transaction_data.transaction_id) - .await?; - - Ok(()) - } - - /// Get initial transaction data by transaction ID - pub async fn get_transaction_data( - &self, - transaction_id: &str, - ) -> Result, TransactionStoreError> { - let mut conn = self.redis.clone(); - let data_key = self.transaction_data_key(transaction_id); - - let data_json: Option = conn.get(&data_key).await?; - - match data_json { - Some(json) => { - let data: TransactionData = serde_json::from_str(&json)?; - Ok(Some(data)) - } - None => Ok(None), - } - } - - /// Add/update active attempt for a transaction - pub async fn add_active_attempt( - &self, - attempt: &ActiveAttempt, - ) -> Result<(), TransactionStoreError> { - let mut conn = self.redis.clone(); - let attempt_key = self.active_attempt_key(&attempt.transaction_id); - - // Get transaction data to determine EOA and chain_id for indexing - let tx_data = self - .get_transaction_data(&attempt.transaction_id) - .await? - .ok_or_else(|| TransactionStoreError::TransactionNotFound { - transaction_id: attempt.transaction_id.clone(), - })?; - - let nonce_key = - self.nonce_to_transactions_key(tx_data.eoa, tx_data.chain_id, attempt.nonce); - let counter_key = self.attempt_counter_key(&attempt.transaction_id); - - let attempt_json = serde_json::to_string(attempt)?; - - // Store active attempt - let _: () = conn.set(&attempt_key, attempt_json).await?; - - // Index by nonce (multiple transactions can compete for same nonce) - let _: () = conn.sadd(&nonce_key, &attempt.transaction_id).await?; - - // Increment attempt counter for observability - let _: () = conn.incr(&counter_key, 1).await?; - - Ok(()) - } - - /// Get active attempt for a transaction - pub async fn get_active_attempt( - &self, - transaction_id: &str, - ) -> Result, TransactionStoreError> { - let mut conn = self.redis.clone(); - let attempt_key = self.active_attempt_key(transaction_id); - - let attempt_json: Option = conn.get(&attempt_key).await?; - - match attempt_json { - Some(json) => { - let attempt: ActiveAttempt = serde_json::from_str(&json)?; - Ok(Some(attempt)) - } - None => Ok(None), - } - } - - /// Get all transaction IDs competing for a specific nonce - pub async fn get_transactions_by_nonce( - &self, - eoa: Address, - chain_id: u64, - nonce: U256, - ) -> Result, TransactionStoreError> { - let mut conn = self.redis.clone(); - let nonce_key = self.nonce_to_transactions_key(eoa, chain_id, nonce); - - let transaction_ids: Vec = conn.smembers(&nonce_key).await?; - Ok(transaction_ids) - } - - /// Get all active transaction IDs for an EOA - pub async fn get_active_transactions( - &self, - eoa: Address, - chain_id: u64, - ) -> Result, TransactionStoreError> { - let mut conn = self.redis.clone(); - let active_key = self.eoa_active_transactions_key(eoa, chain_id); - - let transaction_ids: Vec = conn.smembers(&active_key).await?; - Ok(transaction_ids) - } - - /// Get all sent transactions (have active attempts) for an EOA - pub async fn get_sent_transactions( - &self, - eoa: Address, - chain_id: u64, - ) -> Result, TransactionStoreError> { - let transaction_ids = self.get_active_transactions(eoa, chain_id).await?; - - let mut sent_transactions = Vec::new(); - for transaction_id in transaction_ids { - if let Some(attempt) = self.get_active_attempt(&transaction_id).await? { - sent_transactions.push((transaction_id, attempt)); - } - } - - Ok(sent_transactions) - } - - /// Mark transaction as confirmed and clean up - pub async fn mark_transaction_confirmed( - &self, - transaction_id: &str, - confirmation_data: &ConfirmationData, - ) -> Result<(), TransactionStoreError> { - let mut conn = self.redis.clone(); - let confirmation_key = self.confirmation_key(transaction_id); - - // Get transaction data to determine EOA and chain_id - let tx_data = self - .get_transaction_data(transaction_id) - .await? - .ok_or_else(|| TransactionStoreError::TransactionNotFound { - transaction_id: transaction_id.to_string(), - })?; - - let active_key = self.eoa_active_transactions_key(tx_data.eoa, tx_data.chain_id); - let attempt_key = self.active_attempt_key(transaction_id); - - // Get current attempt to clean up nonce index - if let Some(attempt) = self.get_active_attempt(transaction_id).await? { - let nonce_key = - self.nonce_to_transactions_key(tx_data.eoa, tx_data.chain_id, attempt.nonce); - let _: () = conn.srem(&nonce_key, transaction_id).await?; - } - - // Store confirmation data - let confirmation_json = serde_json::to_string(confirmation_data)?; - let _: () = conn.set(&confirmation_key, confirmation_json).await?; - - // Remove from active set - let _: () = conn.srem(&active_key, transaction_id).await?; - - // Remove active attempt - let _: () = conn.del(&attempt_key).await?; - - Ok(()) - } - - /// Mark transaction as failed and clean up - pub async fn mark_transaction_failed( - &self, - transaction_id: &str, - error_message: &str, - ) -> Result<(), TransactionStoreError> { - let mut conn = self.redis.clone(); - - // Get transaction data to determine EOA and chain_id - let tx_data = self - .get_transaction_data(transaction_id) - .await? - .ok_or_else(|| TransactionStoreError::TransactionNotFound { - transaction_id: transaction_id.to_string(), - })?; - - let active_key = self.eoa_active_transactions_key(tx_data.eoa, tx_data.chain_id); - let attempt_key = self.active_attempt_key(transaction_id); - - // Get current attempt to clean up nonce index - if let Some(attempt) = self.get_active_attempt(transaction_id).await? { - let nonce_key = - self.nonce_to_transactions_key(tx_data.eoa, tx_data.chain_id, attempt.nonce); - let _: () = conn.srem(&nonce_key, transaction_id).await?; - } - - // Remove from active set - let _: () = conn.srem(&active_key, transaction_id).await?; - - // Remove active attempt - let _: () = conn.del(&attempt_key).await?; - - Ok(()) - } - - /// Remove active attempt (for requeuing after race loss) - pub async fn remove_active_attempt( - &self, - transaction_id: &str, - ) -> Result<(), TransactionStoreError> { - let mut conn = self.redis.clone(); - let attempt_key = self.active_attempt_key(transaction_id); - - // Get current attempt to clean up nonce index - if let Some(attempt) = self.get_active_attempt(transaction_id).await? { - let tx_data = self - .get_transaction_data(transaction_id) - .await? - .ok_or_else(|| TransactionStoreError::TransactionNotFound { - transaction_id: transaction_id.to_string(), - })?; - - let nonce_key = - self.nonce_to_transactions_key(tx_data.eoa, tx_data.chain_id, attempt.nonce); - let _: () = conn.srem(&nonce_key, transaction_id).await?; - } - - // Remove active attempt (transaction stays in active set for requeuing) - let _: () = conn.del(&attempt_key).await?; - - Ok(()) - } - - /// Pipeline commands for atomic operations in hooks - pub fn add_store_transaction_command( - &self, - pipeline: &mut Pipeline, - transaction_data: &TransactionData, - ) { - let data_key = self.transaction_data_key(&transaction_data.transaction_id); - let active_key = - self.eoa_active_transactions_key(transaction_data.eoa, transaction_data.chain_id); - - let data_json = serde_json::to_string(transaction_data).unwrap(); - - pipeline.set(&data_key, data_json); - pipeline.sadd(&active_key, &transaction_data.transaction_id); - } - - pub fn add_active_attempt_command( - &self, - pipeline: &mut Pipeline, - attempt: &ActiveAttempt, - eoa: Address, - chain_id: u64, - ) { - let attempt_key = self.active_attempt_key(&attempt.transaction_id); - let nonce_key = self.nonce_to_transactions_key(eoa, chain_id, attempt.nonce); - let counter_key = self.attempt_counter_key(&attempt.transaction_id); - - let attempt_json = serde_json::to_string(attempt).unwrap(); - - pipeline.set(&attempt_key, attempt_json); - pipeline.sadd(&nonce_key, &attempt.transaction_id); - pipeline.incr(&counter_key, 1); - } - - pub fn add_remove_active_attempt_command( - &self, - pipeline: &mut Pipeline, - transaction_id: &str, - eoa: Address, - chain_id: u64, - nonce: U256, - ) { - let attempt_key = self.active_attempt_key(transaction_id); - let nonce_key = self.nonce_to_transactions_key(eoa, chain_id, nonce); - - pipeline.del(&attempt_key); - pipeline.srem(&nonce_key, transaction_id); - } - - pub fn add_mark_confirmed_command( - &self, - pipeline: &mut Pipeline, - transaction_id: &str, - confirmation_data: &ConfirmationData, - eoa: Address, - chain_id: u64, - nonce: U256, - ) { - let confirmation_key = self.confirmation_key(transaction_id); - let active_key = self.eoa_active_transactions_key(eoa, chain_id); - let attempt_key = self.active_attempt_key(transaction_id); - let nonce_key = self.nonce_to_transactions_key(eoa, chain_id, nonce); - - let confirmation_json = serde_json::to_string(confirmation_data).unwrap(); - - pipeline.set(&confirmation_key, confirmation_json); - pipeline.srem(&active_key, transaction_id); - pipeline.del(&attempt_key); - pipeline.srem(&nonce_key, transaction_id); - } - - pub fn add_mark_failed_command( - &self, - pipeline: &mut Pipeline, - transaction_id: &str, - eoa: Address, - chain_id: u64, - nonce: U256, - ) { - let active_key = self.eoa_active_transactions_key(eoa, chain_id); - let attempt_key = self.active_attempt_key(transaction_id); - let nonce_key = self.nonce_to_transactions_key(eoa, chain_id, nonce); - - pipeline.srem(&active_key, transaction_id); - pipeline.del(&attempt_key); - pipeline.srem(&nonce_key, transaction_id); - } -} diff --git a/executors/src/eoa/worker.rs b/executors/src/eoa/worker.rs new file mode 100644 index 0000000..58b018f --- /dev/null +++ b/executors/src/eoa/worker.rs @@ -0,0 +1,986 @@ +use alloy::consensus::{SignableTransaction, Signed, Transaction, TypedTransaction}; +use alloy::network::{TransactionBuilder, TransactionBuilder7702}; +use alloy::primitives::{Address, B256}; +use alloy::providers::Provider; +use alloy::rpc::types::TransactionRequest as AlloyTransactionRequest; +use alloy::transports::{RpcError, TransportErrorKind}; +use engine_core::signer::AccountSigner; +use engine_core::{ + chain::{Chain, ChainService}, + error::{AlloyRpcErrorToEngineError, RpcErrorKind}, + signer::{EoaSigner, EoaSigningOptions}, +}; +use hex; +use serde::{Deserialize, Serialize}; +use std::{sync::Arc, time::Duration}; +use tokio::time::sleep; +use twmq::{ + DurableExecution, FailHookData, NackHookData, SuccessHookData, UserCancellable, + error::TwmqError, + hooks::TransactionContext, + job::{BorrowedJob, JobResult, RequeuePosition, ToJobResult}, +}; + +use crate::eoa::store::{ + BorrowedTransactionData, EoaExecutorStore, EoaHealth, ScopedEoaExecutorStore, TransactionData, + TransactionStoreError, +}; + +// ========== JOB DATA ========== +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct EoaExecutorWorkerJobData { + pub eoa_address: Address, + pub chain_id: u64, + pub worker_id: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct EoaExecutorWorkerResult { + pub recovered_transactions: u32, + pub confirmed_transactions: u32, + pub failed_transactions: u32, + pub sent_transactions: u32, +} + +#[derive(Serialize, Deserialize, Debug, Clone, thiserror::Error)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE", tag = "errorCode")] +pub enum EoaExecutorWorkerError { + #[error("Chain service error for chainId {chain_id}: {message}")] + ChainServiceError { chain_id: u64, message: String }, + + #[error("Store error: {message}")] + StoreError { message: String }, + + #[error("RPC error: {message}")] + RpcError { message: String }, + + #[error("Transaction signing failed: {message}")] + SigningError { message: String }, + + #[error("Work still remaining: {message}")] + WorkRemaining { message: String }, + + #[error("Internal error: {message}")] + InternalError { message: String }, + + #[error("User cancelled")] + UserCancelled, +} + +impl From for EoaExecutorWorkerError { + fn from(error: TwmqError) -> Self { + EoaExecutorWorkerError::InternalError { + message: format!("Queue error: {}", error), + } + } +} + +impl From for EoaExecutorWorkerError { + fn from(error: TransactionStoreError) -> Self { + EoaExecutorWorkerError::StoreError { + message: error.to_string(), + } + } +} + +impl UserCancellable for EoaExecutorWorkerError { + fn user_cancelled() -> Self { + EoaExecutorWorkerError::UserCancelled + } +} + +// ========== SIMPLE ERROR CLASSIFICATION ========== +#[derive(Debug)] +enum SendResult { + Success, + PossiblySent, // "nonce too low", "already known" etc + DeterministicFailure, // Invalid signature, malformed tx, insufficient funds etc +} + +fn classify_send_error(error: &RpcError) -> SendResult { + let error_str = error.to_string().to_lowercase(); + + // Transaction possibly made it to mempool + if error_str.contains("nonce too low") + || error_str.contains("already known") + || error_str.contains("replacement transaction underpriced") + { + return SendResult::PossiblySent; + } + + // Clear failures that didn't consume nonce + if error_str.contains("invalid signature") + || error_str.contains("malformed") + || error_str.contains("insufficient funds") + || error_str.contains("gas limit") + || error_str.contains("intrinsic gas too low") + { + return SendResult::DeterministicFailure; + } + + // Default: assume possibly sent for safety + SendResult::PossiblySent +} + +fn is_retryable_rpc_error(kind: &RpcErrorKind) -> bool { + match kind { + RpcErrorKind::TransportHttpError { status, .. } if *status >= 400 && *status < 500 => false, + RpcErrorKind::UnsupportedFeature { .. } => false, + _ => true, + } +} + +// ========== PREPARED TRANSACTION ========== +#[derive(Debug, Clone)] +struct PreparedTransaction { + transaction_id: String, + signed_tx: Signed, + nonce: u64, +} + +// ========== MAIN WORKER ========== +pub struct EoaExecutorWorker +where + CS: ChainService + Send + Sync + 'static, +{ + pub chain_service: Arc, + pub store: Arc, + pub eoa_signer: Arc, + pub max_inflight: u64, + pub max_recycled_nonces: u64, +} + +impl DurableExecution for EoaExecutorWorker +where + CS: ChainService + Send + Sync + 'static, +{ + type Output = EoaExecutorWorkerResult; + type ErrorData = EoaExecutorWorkerError; + type JobData = EoaExecutorWorkerJobData; + + #[tracing::instrument(skip_all, fields(eoa = %job.job.data.eoa_address, chain_id = job.job.data.chain_id))] + async fn process( + &self, + job: &BorrowedJob, + ) -> JobResult { + let data = &job.job.data; + + // 1. ACQUIRE LOCK AGGRESSIVELY + tracing::info!("Acquiring EOA lock aggressively"); + self.store + .acquire_eoa_lock_aggressively(data.eoa_address, data.chain_id, &data.worker_id) + .await + .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)?; + + // 2. GET CHAIN + let chain = self + .chain_service + .get_chain(data.chain_id) + .map_err(|e| EoaExecutorWorkerError::ChainServiceError { + chain_id: data.chain_id, + message: format!("Failed to get chain: {}", e), + }) + .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)?; + + // 3. CREATE SCOPED STORE (validates lock ownership) + let scoped = ScopedEoaExecutorStore::build( + &self.store, + data.eoa_address, + data.chain_id, + data.worker_id.clone(), + ) + .await + .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)?; + + // 4. CRASH RECOVERY + let recovered = self + .recover_borrowed_state(&scoped, &chain) + .await + .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)?; + + // 5. CONFIRM FLOW + let (confirmed, failed) = self + .confirm_flow(&scoped, &chain) + .await + .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)?; + + // 6. SEND FLOW + let sent = self + .send_flow(&scoped, &chain) + .await + .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)?; + + // 7. CHECK FOR REMAINING WORK + let pending_count = scoped + .peek_pending_transactions(1000) + .await + .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)? + .len(); + let borrowed_count = scoped + .peek_borrowed_transactions() + .await + .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)? + .len(); + let recycled_count = scoped + .peek_recycled_nonces() + .await + .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)? + .len(); + + if pending_count > 0 || borrowed_count > 0 || recycled_count > 0 { + return Err(EoaExecutorWorkerError::WorkRemaining { + message: format!( + "Work remaining: {} pending, {} borrowed, {} recycled", + pending_count, borrowed_count, recycled_count + ), + }) + .map_err_nack(Some(Duration::from_secs(5)), RequeuePosition::Last); + } + + // Only succeed if no work remains + Ok(EoaExecutorWorkerResult { + recovered_transactions: recovered, + confirmed_transactions: confirmed, + failed_transactions: failed, + sent_transactions: sent, + }) + } + + async fn on_success( + &self, + _job: &BorrowedJob, + _success_data: SuccessHookData<'_, Self::Output>, + _tx: &mut TransactionContext<'_>, + ) { + // No additional operations needed for EOA worker success + } + + async fn on_nack( + &self, + _job: &BorrowedJob, + _nack_data: NackHookData<'_, Self::ErrorData>, + _tx: &mut TransactionContext<'_>, + ) { + // No additional operations needed for EOA worker nack + } + + async fn on_fail( + &self, + _job: &BorrowedJob, + _fail_data: FailHookData<'_, Self::ErrorData>, + _tx: &mut TransactionContext<'_>, + ) { + // EOA locks use a takeover pattern - no explicit release needed + // Other workers will forcefully take over the lock when needed + } +} + +impl EoaExecutorWorker +where + CS: ChainService + Send + Sync + 'static, +{ + // ========== CRASH RECOVERY ========== + async fn recover_borrowed_state( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + chain: &impl Chain, + ) -> Result { + let borrowed_transactions = scoped.peek_borrowed_transactions().await?; + let mut recovered_count = 0; + + for borrowed in borrowed_transactions { + tracing::info!( + transaction_id = %borrowed.transaction_id, + nonce = borrowed.signed_transaction.nonce(), + "Recovering borrowed transaction" + ); + + // Rebroadcast the transaction + let send_result = chain + .provider() + .send_tx_envelope(borrowed.signed_transaction.clone().into()) + .await; + let nonce = borrowed.signed_transaction.nonce(); + + match send_result { + Ok(_) => { + // Transaction was sent successfully + scoped + .move_borrowed_to_submitted( + nonce, + &format!("{:?}", borrowed.hash), + &borrowed.transaction_id, + ) + .await?; + tracing::info!(transaction_id = %borrowed.transaction_id, nonce = nonce, "Moved recovered transaction to submitted"); + } + Err(e) => { + match classify_send_error(&e) { + SendResult::PossiblySent => { + // Transaction possibly sent, move to submitted + scoped + .move_borrowed_to_submitted( + nonce, + &format!("{:?}", borrowed.hash), + &borrowed.transaction_id, + ) + .await?; + tracing::info!(transaction_id = %borrowed.transaction_id, nonce = nonce, "Moved recovered transaction to submitted (possibly sent)"); + } + SendResult::DeterministicFailure => { + // Transaction is broken, recycle nonce and requeue + scoped + .move_borrowed_to_recycled(nonce, &borrowed.transaction_id) + .await?; + tracing::warn!(transaction_id = %borrowed.transaction_id, nonce = nonce, error = %e, "Recycled failed transaction"); + } + SendResult::Success => { + // This case is handled by Ok(_) above + unreachable!() + } + } + } + } + + recovered_count += 1; + } + + Ok(recovered_count) + } + + // ========== CONFIRM FLOW ========== + async fn confirm_flow( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + chain: &impl Chain, + ) -> Result<(u32, u32), EoaExecutorWorkerError> { + // Get fresh on-chain transaction count + let current_chain_nonce = chain + .provider() + .get_transaction_count(scoped.eoa()) + .await + .map_err(|e| { + let engine_error = e.to_engine_error(chain); + EoaExecutorWorkerError::RpcError { + message: format!("Failed to get transaction count: {}", engine_error), + } + })?; + + let cached_nonce = scoped.get_cached_transaction_count().await?; + + if current_chain_nonce == cached_nonce { + tracing::debug!("No nonce progress, skipping confirm flow"); + return Ok((0, 0)); + } + + tracing::info!( + current_chain_nonce = current_chain_nonce, + cached_nonce = cached_nonce, + "Processing confirmations" + ); + + // Get all hashes below the current chain nonce + let pending_hashes = scoped.get_hashes_below_nonce(current_chain_nonce).await?; + + let mut confirmed_count = 0; + let mut failed_count = 0; + + // Process receipts in parallel batches + let batch_size = 10; + for batch in pending_hashes.chunks(batch_size) { + let receipt_futures: Vec<_> = batch + .iter() + .map(|(nonce, hash)| async { + let receipt = chain + .provider() + .get_transaction_receipt(hash.parse::().unwrap()) + .await; + (*nonce, hash.clone(), receipt) + }) + .collect(); + + let results = futures::future::join_all(receipt_futures).await; + + for (nonce, hash, receipt_result) in results { + match receipt_result { + Ok(Some(receipt)) => { + // Transaction confirmed! + if let Ok(Some(tx_id)) = self.store.get_transaction_id_for_hash(&hash).await + { + scoped + .succeed_transaction( + &tx_id, + &hash, + &serde_json::to_string(&receipt).unwrap(), + ) + .await?; + confirmed_count += 1; + tracing::info!(transaction_id = %tx_id, nonce = nonce, "Transaction confirmed"); + } + } + Ok(None) | Err(_) => { + // Transaction failed or dropped + if let Ok(Some(tx_id)) = self.store.get_transaction_id_for_hash(&hash).await + { + scoped.fail_and_requeue_transaction(&tx_id).await?; + failed_count += 1; + tracing::warn!(transaction_id = %tx_id, nonce = nonce, "Transaction failed, requeued"); + } + } + } + } + } + + // Update cached transaction count + scoped + .update_cached_transaction_count(current_chain_nonce) + .await?; + + Ok((confirmed_count, failed_count)) + } + + // ========== SEND FLOW ========== + async fn send_flow( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + chain: &impl Chain, + ) -> Result { + // 1. Check and update EOA health + self.check_and_update_eoa_health(scoped, chain).await?; + + let health = scoped.check_eoa_health().await?; + if health.map(|h| h.balance.is_zero()).unwrap_or(true) { + tracing::warn!("EOA has insufficient balance, skipping send flow"); + return Ok(0); + } + + let mut total_sent = 0; + + // 2. Process recycled nonces first + total_sent += self.process_recycled_nonces(scoped, chain).await?; + + // 3. Only proceed to new nonces if we successfully used all recycled nonces + let remaining_recycled = scoped.peek_recycled_nonces().await?.len(); + if remaining_recycled == 0 { + let inflight_budget = scoped.get_inflight_budget(self.max_inflight).await?; + if inflight_budget > 0 { + total_sent += self + .process_new_transactions(scoped, chain, inflight_budget) + .await?; + } + } else { + tracing::warn!( + "Still have {} recycled nonces, not sending new transactions", + remaining_recycled + ); + } + + Ok(total_sent) + } + + async fn process_recycled_nonces( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + chain: &impl Chain, + ) -> Result { + let recycled_nonces = scoped.peek_recycled_nonces().await?; + + // If too many recycled nonces, nuke them + if recycled_nonces.len() as u64 > self.max_recycled_nonces { + tracing::warn!( + "Too many recycled nonces ({}), nuking them", + recycled_nonces.len() + ); + scoped.nuke_recycled_nonces().await?; + return Ok(0); + } + + if recycled_nonces.is_empty() { + return Ok(0); + } + + // Get pending transactions (one per recycled nonce) + let pending_txs = scoped + .peek_pending_transactions(recycled_nonces.len() as u64) + .await?; + let mut sent_count = 0; + + // Process each recycled nonce sequentially with delays + for (i, nonce) in recycled_nonces.into_iter().enumerate() { + if i > 0 { + sleep(Duration::from_millis(50)).await; // 50ms delay between consecutive nonces + } + + if let Some(tx_id) = pending_txs.get(i) { + // Try to send transaction with this recycled nonce + match self + .send_single_transaction_with_recycled_nonce(scoped, chain, tx_id, nonce) + .await + { + Ok(true) => sent_count += 1, + Ok(false) => {} // Failed to send, but handled + Err(e) => tracing::error!("Error processing recycled nonce {}: {}", nonce, e), + } + } else { + // No pending transactions, send no-op + match self.send_noop_transaction(scoped, chain, nonce).await { + Ok(true) => sent_count += 1, + Ok(false) => {} // Failed to send no-op + Err(e) => tracing::error!("Error sending no-op for nonce {}: {}", nonce, e), + } + } + } + + Ok(sent_count) + } + + async fn process_new_transactions( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + chain: &impl Chain, + budget: u64, + ) -> Result { + if budget == 0 { + return Ok(0); + } + + // 1. Get pending transactions + let pending_txs = scoped.peek_pending_transactions(budget).await?; + if pending_txs.is_empty() { + return Ok(0); + } + + let optimistic_nonce = scoped.get_optimistic_nonce().await?; + + // 2. Build and sign all transactions in parallel + let build_tasks: Vec<_> = pending_txs + .iter() + .enumerate() + .map(|(i, tx_id)| { + let expected_nonce = optimistic_nonce + i as u64; + self.build_and_sign_single_transaction(scoped, tx_id, expected_nonce, chain) + }) + .collect(); + + let prepared_results = futures::future::join_all(build_tasks).await; + + // 3. Move successful transactions to borrowed state serially (to maintain nonce order) + let mut prepared_txs = Vec::new(); + for (i, result) in prepared_results.into_iter().enumerate() { + match result { + Ok(prepared) => { + let borrowed_data = BorrowedTransactionData { + transaction_id: prepared.transaction_id.clone(), + signed_transaction: prepared.signed_tx.clone(), + hash: *prepared.signed_tx.hash(), + borrowed_at: std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs(), + }; + + match scoped + .atomic_move_pending_to_borrowed_with_new_nonce( + &prepared.transaction_id, + prepared.nonce, + &borrowed_data, + ) + .await + { + Ok(()) => prepared_txs.push(prepared), + Err(TransactionStoreError::OptimisticNonceChanged { .. }) => { + tracing::debug!( + "Nonce changed for transaction {}, skipping", + prepared.transaction_id + ); + break; // Stop processing if nonce changed + } + Err(TransactionStoreError::TransactionNotInPendingQueue { .. }) => { + tracing::debug!( + "Transaction {} already processed, skipping", + prepared.transaction_id + ); + continue; + } + Err(e) => { + tracing::error!( + "Failed to move transaction {} to borrowed: {}", + prepared.transaction_id, + e + ); + continue; + } + } + } + Err(e) => { + tracing::warn!("Failed to build transaction {}: {}", pending_txs[i], e); + // Individual transaction failure doesn't stop the worker + continue; + } + } + } + + // 4. Send all prepared transactions sequentially with delays + let mut sent_count = 0; + for (i, prepared) in prepared_txs.iter().enumerate() { + if i > 0 { + sleep(Duration::from_millis(50)).await; // 50ms delay between consecutive nonces + } + + match Self::send_prepared_transaction(scoped, chain, prepared).await { + Ok(true) => sent_count += 1, + Ok(false) => {} // Failed to send, but handled + Err(e) => tracing::error!("Error sending transaction: {}", e), + } + } + + Ok(sent_count) + } + + // ========== TRANSACTION BUILDING & SENDING ========== + async fn build_and_sign_single_transaction( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + transaction_id: &str, + nonce: u64, + chain: &impl Chain, + ) -> Result { + // Get transaction data + let tx_data = scoped + .get_transaction_data(transaction_id) + .await? + .ok_or_else(|| EoaExecutorWorkerError::StoreError { + message: format!("Transaction not found: {}", transaction_id), + })?; + + // Build and sign transaction + let signed_tx = self + .build_and_sign_transaction(&tx_data, nonce, chain) + .await?; + + Ok(PreparedTransaction { + transaction_id: transaction_id.to_string(), + signed_tx, + nonce, + }) + } + + async fn send_prepared_transaction( + scoped: &ScopedEoaExecutorStore<'_>, + chain: &impl Chain, + prepared: &PreparedTransaction, + ) -> Result { + // Send to RPC + let send_result = chain + .provider() + .send_tx_envelope(prepared.signed_tx.clone().into()) + .await; + + match send_result { + Ok(_) => { + // Transaction was sent successfully + scoped + .move_borrowed_to_submitted( + prepared.nonce, + &format!("{:?}", prepared.signed_tx.hash()), + &prepared.transaction_id, + ) + .await?; + + tracing::info!( + transaction_id = %prepared.transaction_id, + nonce = prepared.nonce, + hash = ?prepared.signed_tx.hash(), + "Successfully sent transaction" + ); + + Ok(true) + } + Err(e) => { + match classify_send_error(&e) { + SendResult::PossiblySent => { + // Move to submitted state + scoped + .move_borrowed_to_submitted( + prepared.nonce, + &format!("{:?}", prepared.signed_tx.hash()), + &prepared.transaction_id, + ) + .await?; + + tracing::info!( + transaction_id = %prepared.transaction_id, + nonce = prepared.nonce, + hash = ?prepared.signed_tx.hash(), + "Transaction possibly sent" + ); + + Ok(true) + } + SendResult::DeterministicFailure => { + // Move back to recycled/pending + scoped + .move_borrowed_to_recycled(prepared.nonce, &prepared.transaction_id) + .await?; + + tracing::warn!( + transaction_id = %prepared.transaction_id, + nonce = prepared.nonce, + error = %e, + "Transaction failed deterministically, recycled nonce" + ); + + Ok(false) + } + SendResult::Success => { + // This case is handled by Ok(_) above + unreachable!() + } + } + } + } + } + + async fn send_single_transaction_with_recycled_nonce( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + chain: &impl Chain, + transaction_id: &str, + nonce: u64, + ) -> Result { + // Build and sign transaction + match self + .build_and_sign_single_transaction(scoped, transaction_id, nonce, chain) + .await + { + Ok(prepared) => { + let borrowed_data = BorrowedTransactionData { + transaction_id: transaction_id.to_string(), + signed_transaction: prepared.signed_tx.clone(), + hash: *prepared.signed_tx.hash(), + borrowed_at: std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs(), + }; + + // Atomically move from pending to borrowed with recycled nonce + match scoped + .atomic_move_pending_to_borrowed_with_recycled_nonce( + transaction_id, + nonce, + &borrowed_data, + ) + .await + { + Ok(()) => { + // Successfully moved to borrowed, now send + Self::send_prepared_transaction(scoped, chain, &prepared).await + } + Err(TransactionStoreError::NonceNotInRecycledSet { .. }) => { + // Nonce was consumed by another worker + Ok(false) + } + Err(TransactionStoreError::TransactionNotInPendingQueue { .. }) => { + // Transaction was processed by another worker + Ok(false) + } + Err(e) => Err(e.into()), + } + } + Err(EoaExecutorWorkerError::StoreError { .. }) => { + // Individual transaction failed (e.g., gas estimation revert) + // Just skip this transaction, don't fail the worker + tracing::warn!( + "Skipping transaction {} due to build failure", + transaction_id + ); + Ok(false) + } + Err(e) => Err(e), // Other errors (RPC, signing) should propagate + } + } + + async fn send_noop_transaction( + &self, + _scoped: &ScopedEoaExecutorStore<'_>, + _chain: &impl Chain, + nonce: u64, + ) -> Result { + // For now, just log that we would send a no-op + // TODO: Implement proper no-op transaction if needed + tracing::info!( + nonce = nonce, + "Would send no-op transaction (not implemented)" + ); + Ok(false) + } + + // ========== HELPER METHODS ========== + async fn check_and_update_eoa_health( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + chain: &impl Chain, + ) -> Result<(), EoaExecutorWorkerError> { + let current_health = scoped.check_eoa_health().await?; + let now = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs(); + + let should_update = current_health + .as_ref() + .map(|h| now - h.balance_fetched_at > 300) // 5 minutes + .unwrap_or(true); + + if should_update { + let balance = chain + .provider() + .get_balance(scoped.eoa()) + .await + .map_err(|e| { + let engine_error = e.to_engine_error(chain); + EoaExecutorWorkerError::RpcError { + message: format!("Failed to get balance: {}", engine_error), + } + })?; + + let health = EoaHealth { + balance, + balance_fetched_at: now, + last_confirmation_at: current_health.as_ref().and_then(|h| h.last_confirmation_at), + nonce_resets: current_health.map(|h| h.nonce_resets).unwrap_or_default(), + }; + + scoped.update_health_data(&health).await?; + } + + Ok(()) + } + + async fn build_and_sign_transaction( + &self, + tx_data: &TransactionData, + nonce: u64, + chain: &impl Chain, + ) -> Result, EoaExecutorWorkerError> { + // Build transaction request from stored data + let mut tx_request = AlloyTransactionRequest::default() + .with_from(tx_data.user_request.from) + .with_value(tx_data.user_request.value) + .with_input(tx_data.user_request.data.clone()) + .with_chain_id(tx_data.user_request.chain_id) + .with_nonce(nonce); + + if let Some(to) = tx_data.user_request.to { + tx_request = tx_request.with_to(to); + } + + if let Some(gas_limit) = tx_data.user_request.gas_limit { + tx_request = tx_request.with_gas_limit(gas_limit); + } + + // Apply transaction type specific settings + if let Some(type_data) = &tx_data.user_request.transaction_type_data { + tx_request = match type_data { + crate::eoa::store::EoaTransactionTypeData::Eip1559(data) => { + let mut req = tx_request; + if let Some(max_fee) = data.max_fee_per_gas { + req = req.with_max_fee_per_gas(max_fee); + } + if let Some(max_priority) = data.max_priority_fee_per_gas { + req = req.with_max_priority_fee_per_gas(max_priority); + } + req + } + crate::eoa::store::EoaTransactionTypeData::Legacy(data) => { + if let Some(gas_price) = data.gas_price { + tx_request.with_gas_price(gas_price) + } else { + tx_request + } + } + crate::eoa::store::EoaTransactionTypeData::Eip7702(data) => { + let mut req = tx_request; + if let Some(authorization_list) = &data.authorization_list { + req = req.with_authorization_list(authorization_list.clone()); + } + if let Some(max_fee) = data.max_fee_per_gas { + req = req.with_max_fee_per_gas(max_fee); + } + if let Some(max_priority) = data.max_priority_fee_per_gas { + req = req.with_max_priority_fee_per_gas(max_priority); + } + req + } + }; + } + + // Estimate gas if needed + if tx_request.gas.is_none() { + match chain.provider().estimate_gas(tx_request.clone()).await { + Ok(gas_limit) => { + tx_request = tx_request.with_gas_limit(gas_limit * 110 / 100); // 10% buffer + } + Err(e) => { + // Check if this is a revert + if let RpcError::ErrorResp(error_payload) = &e { + if let Some(revert_data) = error_payload.as_revert_data() { + // This is a revert - the transaction is fundamentally broken + // This should fail the individual transaction, not the worker + return Err(EoaExecutorWorkerError::StoreError { + message: format!( + "Transaction reverted during gas estimation: {} (revert: {})", + error_payload.message, + hex::encode(&revert_data) + ), + }); + } + } + + // Not a revert - could be RPC issue, this should nack the worker + let engine_error = e.to_engine_error(chain); + return Err(EoaExecutorWorkerError::RpcError { + message: format!("Gas estimation failed: {}", engine_error), + }); + } + } + } + + // Build typed transaction + let typed_tx = + tx_request + .build_typed_tx() + .map_err(|e| EoaExecutorWorkerError::StoreError { + message: format!("Failed to build typed transaction: {:?}", e), + })?; + + // Sign transaction + let signing_options = EoaSigningOptions { + from: tx_data.user_request.from, + chain_id: Some(tx_data.user_request.chain_id), + }; + + let signature = self + .eoa_signer + .sign_transaction( + signing_options, + typed_tx.clone(), + tx_data.user_request.signing_credential.clone(), + ) + .await + .map_err(|engine_error| EoaExecutorWorkerError::SigningError { + message: format!("Failed to sign transaction: {}", engine_error), + })?; + + let signed_tx = typed_tx.into_signed(signature.parse().map_err(|e| { + EoaExecutorWorkerError::StoreError { + message: format!("Failed to parse signature: {}", e), + } + })?); + + Ok(signed_tx) + } +} diff --git a/twmq/src/job.rs b/twmq/src/job.rs index 43b37d3..afc382b 100644 --- a/twmq/src/job.rs +++ b/twmq/src/job.rs @@ -56,17 +56,20 @@ pub trait ToJobResult { fn map_err_fail(self) -> JobResult; } -impl ToJobResult for Result { +impl ToJobResult for Result +where + ErrorType: Into, +{ fn map_err_nack(self, delay: Option, position: RequeuePosition) -> JobResult { self.map_err(|e| JobError::Nack { - error: e, + error: e.into(), delay, position, }) } fn map_err_fail(self) -> JobResult { - self.map_err(|e| JobError::Fail(e)) + self.map_err(|e| JobError::Fail(e.into())) } } @@ -156,16 +159,16 @@ impl BorrowedJob { pub fn new(job: Job, lease_token: String) -> Self { Self { job, lease_token } } - + // Convenience methods to access job fields pub fn id(&self) -> &str { &self.job.id } - + pub fn data(&self) -> &T { &self.job.data } - + pub fn attempts(&self) -> u32 { self.job.attempts } From f3d84257f4641f7979199bd344db377b496e831e Mon Sep 17 00:00:00 2001 From: Prithvish Baidya Date: Sun, 6 Jul 2025 21:00:48 +0530 Subject: [PATCH 6/8] new EOA execution model --- executors/src/eoa/store.rs | 411 +++++++++++++++++++++++------------- executors/src/eoa/worker.rs | 39 +++- 2 files changed, 290 insertions(+), 160 deletions(-) diff --git a/executors/src/eoa/store.rs b/executors/src/eoa/store.rs index 3a555d6..13418d5 100644 --- a/executors/src/eoa/store.rs +++ b/executors/src/eoa/store.rs @@ -2,13 +2,219 @@ use alloy::consensus::{Signed, TypedTransaction}; use alloy::eips::eip7702::SignedAuthorization; use alloy::network::AnyTransactionReceipt; use alloy::primitives::{Address, B256, Bytes, U256}; +use chrono; use engine_core::chain::RpcCredentials; use engine_core::credentials::SigningCredential; use engine_core::execution_options::WebhookOptions; use serde::{Deserialize, Serialize}; use std::collections::HashMap; +use std::future::Future; use twmq::redis::{AsyncCommands, Pipeline, aio::ConnectionManager}; +pub trait SafeRedisTransaction: Send + Sync { + fn name(&self) -> &str; + fn operation(&self, pipeline: &mut Pipeline); + fn validation( + &self, + conn: &mut ConnectionManager, + ) -> impl Future> + Send; + fn watch_keys(&self) -> Vec; +} + +struct MovePendingToBorrowedWithRecycledNonce { + recycled_key: String, + pending_key: String, + transaction_id: String, + borrowed_key: String, + nonce: u64, + prepared_tx_json: String, +} + +impl SafeRedisTransaction for MovePendingToBorrowedWithRecycledNonce { + fn name(&self) -> &str { + "pending->borrowed with recycled nonce" + } + + fn operation(&self, pipeline: &mut Pipeline) { + // Remove nonce from recycled set (we know it exists) + pipeline.zrem(&self.recycled_key, self.nonce); + // Remove transaction from pending (we know it exists) + pipeline.lrem(&self.pending_key, 0, &self.transaction_id); + // Store borrowed transaction + pipeline.hset(&self.borrowed_key, self.nonce.to_string(), &self.prepared_tx_json); + } + + fn watch_keys(&self) -> Vec { + vec![self.recycled_key.clone(), self.pending_key.clone()] + } + + async fn validation(&self, conn: &mut ConnectionManager) -> Result<(), TransactionStoreError> { + // Check if nonce exists in recycled set + let nonce_score: Option = conn.zscore(&self.recycled_key, self.nonce).await?; + if nonce_score.is_none() { + return Err(TransactionStoreError::NonceNotInRecycledSet { nonce: self.nonce }); + } + + // Check if transaction exists in pending + let pending_transactions: Vec = conn.lrange(&self.pending_key, 0, -1).await?; + if !pending_transactions.contains(&self.transaction_id) { + return Err(TransactionStoreError::TransactionNotInPendingQueue { + transaction_id: self.transaction_id.clone(), + }); + } + + Ok(()) + } +} + +struct MovePendingToBorrowedWithNewNonce { + optimistic_key: String, + pending_key: String, + nonce: u64, + prepared_tx_json: String, + transaction_id: String, + borrowed_key: String, + eoa: Address, + chain_id: u64, +} + +impl SafeRedisTransaction for MovePendingToBorrowedWithNewNonce { + fn name(&self) -> &str { + "pending->borrowed with new nonce" + } + + fn operation(&self, pipeline: &mut Pipeline) { + // Increment optimistic nonce + pipeline.incr(&self.optimistic_key, 1); + // Remove transaction from pending + pipeline.lrem(&self.pending_key, 0, &self.transaction_id); + // Store borrowed transaction + pipeline.hset(&self.borrowed_key, self.nonce.to_string(), &self.prepared_tx_json); + } + + fn watch_keys(&self) -> Vec { + vec![self.optimistic_key.clone(), self.pending_key.clone()] + } + + async fn validation(&self, conn: &mut ConnectionManager) -> Result<(), TransactionStoreError> { + // Check current optimistic nonce + let current_optimistic: Option = conn.get(&self.optimistic_key).await?; + let current_nonce = match current_optimistic { + Some(nonce) => nonce, + None => return Err(TransactionStoreError::NonceSyncRequired { + eoa: self.eoa, + chain_id: self.chain_id + }), + }; + + if current_nonce != self.nonce { + return Err(TransactionStoreError::OptimisticNonceChanged { + expected: self.nonce, + actual: current_nonce, + }); + } + + // Check if transaction exists in pending + let pending_transactions: Vec = conn.lrange(&self.pending_key, 0, -1).await?; + if !pending_transactions.contains(&self.transaction_id) { + return Err(TransactionStoreError::TransactionNotInPendingQueue { + transaction_id: self.transaction_id.clone(), + }); + } + + Ok(()) + } +} + +struct MoveBorrowedToSubmitted { + nonce: u64, + hash: String, + transaction_id: String, + borrowed_key: String, + submitted_key: String, + hash_to_id_key: String, +} + +impl SafeRedisTransaction for MoveBorrowedToSubmitted { + fn name(&self) -> &str { + "borrowed->submitted" + } + + fn operation(&self, pipeline: &mut Pipeline) { + // Remove from borrowed (we know it exists) + pipeline.hdel(&self.borrowed_key, self.nonce.to_string()); + + // Add to submitted + pipeline.zadd(&self.submitted_key, self.nonce, &self.hash); + + // Map hash to transaction ID + pipeline.set(&self.hash_to_id_key, &self.transaction_id); + } + + fn watch_keys(&self) -> Vec { + vec![self.borrowed_key.clone()] + } + + async fn validation(&self, conn: &mut ConnectionManager) -> Result<(), TransactionStoreError> { + // Validate that borrowed transaction actually exists + let borrowed_tx: Option = conn + .hget(&self.borrowed_key, self.nonce.to_string()) + .await?; + if borrowed_tx.is_none() { + return Err(TransactionStoreError::TransactionNotInBorrowedState { + transaction_id: self.transaction_id.clone(), + nonce: self.nonce, + }); + } + Ok(()) + } +} + +struct MoveBorrowedToRecycled { + nonce: u64, + transaction_id: String, + borrowed_key: String, + recycled_key: String, + pending_key: String, +} + +impl SafeRedisTransaction for MoveBorrowedToRecycled { + fn name(&self) -> &str { + "borrowed->recycled" + } + + fn operation(&self, pipeline: &mut Pipeline) { + let now = chrono::Utc::now().timestamp_millis(); + + // Remove from borrowed (we know it exists) + pipeline.hdel(&self.borrowed_key, self.nonce.to_string()); + + // Add nonce to recycled set (with timestamp as score) + pipeline.zadd(&self.recycled_key, now, self.nonce); + + // Add transaction back to pending + pipeline.lpush(&self.pending_key, &self.transaction_id); + } + + fn watch_keys(&self) -> Vec { + vec![self.borrowed_key.clone()] + } + + async fn validation(&self, conn: &mut ConnectionManager) -> Result<(), TransactionStoreError> { + // Validate that borrowed transaction actually exists + let borrowed_tx: Option = conn + .hget(&self.borrowed_key, self.nonce.to_string()) + .await?; + if borrowed_tx.is_none() { + return Err(TransactionStoreError::TransactionNotInBorrowedState { + transaction_id: self.transaction_id.clone(), + nonce: self.nonce, + }); + } + Ok(()) + } +} + /// The actual user request data #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "camelCase")] @@ -330,20 +536,13 @@ impl EoaExecutorStore { /// } /// ).await /// ``` - async fn execute_with_watch_and_retry( + async fn execute_with_watch_and_retry( &self, eoa: Address, chain_id: u64, worker_id: &str, - watch_keys: &[String], - operation_name: &str, - validation: V, - operation: F, - ) -> Result<(), TransactionStoreError> - where - V: AsyncFn(&mut ConnectionManager) -> Result<(), TransactionStoreError>, - F: Fn(&mut Pipeline), - { + safe_tx: &impl SafeRedisTransaction, + ) -> Result<(), TransactionStoreError> { let lock_key = self.eoa_lock_key_name(eoa, chain_id); let mut conn = self.redis.clone(); let mut retry_count = 0; @@ -353,7 +552,10 @@ impl EoaExecutorStore { return Err(TransactionStoreError::InternalError { message: format!( "Exceeded max retries ({}) for {} on {}:{}", - MAX_RETRIES, operation_name, eoa, chain_id + MAX_RETRIES, + safe_tx.name(), + eoa, + chain_id ), }); } @@ -367,7 +569,7 @@ impl EoaExecutorStore { delay_ms = delay_ms, eoa = %eoa, chain_id = chain_id, - operation = operation_name, + operation = safe_tx.name(), "Retrying atomic operation" ); } @@ -375,7 +577,7 @@ impl EoaExecutorStore { // WATCH all specified keys including lock let mut watch_cmd = twmq::redis::cmd("WATCH"); watch_cmd.arg(&lock_key); - for key in watch_keys { + for key in safe_tx.watch_keys() { watch_cmd.arg(key); } let _: () = watch_cmd.query_async(&mut conn).await?; @@ -392,12 +594,12 @@ impl EoaExecutorStore { } // Execute validation - match validation(&mut conn).await { + match safe_tx.validation(&mut conn).await { Ok(()) => { // Build and execute pipeline let mut pipeline = twmq::redis::pipe(); pipeline.atomic(); - operation(&mut pipeline); + safe_tx.operation(&mut pipeline); match pipeline .query_async::>(&mut conn) @@ -440,51 +642,17 @@ impl EoaExecutorStore { nonce: u64, prepared_tx: &BorrowedTransactionData, ) -> Result<(), TransactionStoreError> { - let recycled_key = self.recycled_nonces_set_name(eoa, chain_id); - let borrowed_key = self.borrowed_transactions_hashmap_name(eoa, chain_id); - let pending_key = self.pending_transactions_list_name(eoa, chain_id); - let prepared_tx_json = serde_json::to_string(prepared_tx)?; - let transaction_id = transaction_id.to_string(); - - let recyled_key_for_validation = recycled_key.clone(); - let pending_key_for_validation = pending_key.clone(); - let transaction_id_for_validation = transaction_id.clone(); - let borrowed_key_for_validation = borrowed_key.clone(); - - self.execute_with_watch_and_retry( - eoa, - chain_id, - worker_id, - &[recycled_key.clone(), pending_key.clone()], - "pending->borrowed with recycled nonce", - async move |conn: &mut ConnectionManager| { - // Validation phase - check preconditions - let nonce_score: Option = conn.zscore(recycled_key.clone(), nonce).await?; - if nonce_score.is_none() { - return Err(TransactionStoreError::NonceNotInRecycledSet { nonce }); - } - - let pending_transactions: Vec = - conn.lrange(pending_key.clone(), 0, -1).await?; - if !pending_transactions.contains(&transaction_id.clone()) { - return Err(TransactionStoreError::TransactionNotInPendingQueue { - transaction_id: transaction_id.clone(), - }); - } + let safe_tx = MovePendingToBorrowedWithRecycledNonce { + recycled_key: self.recycled_nonces_set_name(eoa, chain_id), + pending_key: self.pending_transactions_list_name(eoa, chain_id), + transaction_id: transaction_id.to_string(), + borrowed_key: self.borrowed_transactions_hashmap_name(eoa, chain_id), + nonce, + prepared_tx_json: serde_json::to_string(prepared_tx)?, + }; - Ok(()) - }, - |pipeline: &mut Pipeline| { - pipeline.zrem(recyled_key_for_validation, nonce); - pipeline.lrem(pending_key_for_validation, 0, transaction_id_for_validation); - pipeline.hset( - borrowed_key_for_validation, - nonce.to_string(), - &prepared_tx_json, - ); - }, - ) - .await?; + self.execute_with_watch_and_retry(eoa, chain_id, worker_id, &safe_tx) + .await?; Ok(()) } @@ -509,44 +677,15 @@ impl EoaExecutorStore { eoa, chain_id, worker_id, - &[optimistic_key.clone(), pending_key.clone()], - "pending->borrowed with new nonce", - async move |conn: &mut ConnectionManager| { - // Check current optimistic nonce - let current_optimistic: Option = conn.get(optimistic_key.clone()).await?; - let current_nonce = match current_optimistic { - Some(nonce) => nonce, - None => return Err(TransactionStoreError::NonceSyncRequired { eoa, chain_id }), - }; - - if current_nonce != expected_nonce { - return Err(TransactionStoreError::OptimisticNonceChanged { - expected: expected_nonce, - actual: current_nonce, - }); - } - - // Check if transaction exists in pending - let pending_transactions: Vec = conn.lrange(&pending_key, 0, -1).await?; - if !pending_transactions.contains(&transaction_id.clone()) { - return Err(TransactionStoreError::TransactionNotInPendingQueue { - transaction_id: transaction_id.clone(), - }); - } - - Ok(()) - }, - move |pipeline| { - // Increment optimistic nonce - pipeline.incr(optimistic_key.clone(), 1); - // Remove transaction from pending - pipeline.lrem(pending_key.clone(), 0, transaction_id.clone()); - // Store borrowed transaction - pipeline.hset( - borrowed_key.clone(), - expected_nonce.to_string(), - &prepared_tx_json, - ); + &MovePendingToBorrowedWithNewNonce { + nonce: expected_nonce, + prepared_tx_json, + transaction_id, + borrowed_key, + optimistic_key, + pending_key, + eoa, + chain_id, }, ) .await @@ -763,7 +902,7 @@ impl EoaExecutorStore { let borrowed_map: HashMap = conn.hgetall(&borrowed_key).await?; let mut result = Vec::new(); - for (nonce_str, transaction_json) in borrowed_map { + for (_nonce_str, transaction_json) in borrowed_map { let borrowed_data: BorrowedTransactionData = serde_json::from_str(&transaction_json)?; result.push(borrowed_data); } @@ -792,30 +931,13 @@ impl EoaExecutorStore { eoa, chain_id, worker_id, - &[borrowed_key.clone()], - "borrowed->submitted", - async |conn: &mut ConnectionManager| { - // Validate that borrowed transaction actually exists - let borrowed_tx: Option = - conn.hget(&borrowed_key, nonce.to_string()).await?; - if borrowed_tx.is_none() { - return Err(TransactionStoreError::TransactionNotInBorrowedState { - transaction_id: transaction_id.clone(), - nonce, - }); - } - - Ok(()) - }, - |pipeline| { - // Remove from borrowed (we know it exists) - pipeline.hdel(&borrowed_key, nonce.to_string()); - - // Add to submitted - pipeline.zadd(&submitted_key, &hash, nonce); - - // Map hash to transaction ID - pipeline.set(&hash_to_id_key, &transaction_id); + &MoveBorrowedToSubmitted { + nonce, + hash: hash.to_string(), + transaction_id, + borrowed_key, + submitted_key, + hash_to_id_key, }, ) .await @@ -840,32 +962,12 @@ impl EoaExecutorStore { eoa, chain_id, worker_id, - &[borrowed_key.clone()], - "borrowed->recycled", - async |conn: &mut ConnectionManager| { - // Validate that borrowed transaction actually exists - let borrowed_tx: Option = - conn.hget(&borrowed_key, nonce.to_string()).await?; - if borrowed_tx.is_none() { - return Err(TransactionStoreError::TransactionNotInBorrowedState { - transaction_id: transaction_id.clone(), - nonce, - }); - } - - Ok(()) - }, - |pipeline| { - let now = chrono::Utc::now().timestamp_millis(); - - // Remove from borrowed (we know it exists) - pipeline.hdel(&borrowed_key, nonce.to_string()); - - // Add nonce to recycled set (with timestamp as score) - pipeline.zadd(&recycled_key, nonce, now); - - // Add transaction back to pending - pipeline.lpush(&pending_key, &transaction_id); + &MoveBorrowedToRecycled { + nonce, + transaction_id, + borrowed_key, + recycled_key, + pending_key, }, ) .await @@ -1103,8 +1205,13 @@ impl EoaExecutorStore { let last_tx_count_key = self.last_transaction_count_key_name(eoa, chain_id); let mut conn = self.redis.clone(); - let optimistic_nonce: Option = conn.get(&optimistic_key).await?; - let last_tx_count: Option = conn.get(&last_tx_count_key).await?; + // Read both values atomically to avoid race conditions + let (optimistic_nonce, last_tx_count): (Option, Option) = + twmq::redis::pipe() + .get(&optimistic_key) + .get(&last_tx_count_key) + .query_async(&mut conn) + .await?; let optimistic = match optimistic_nonce { Some(nonce) => nonce, @@ -1154,7 +1261,7 @@ impl EoaExecutorStore { let borrowed_data = BorrowedTransactionData { transaction_id: transaction_id.to_string(), signed_transaction: signed_tx.clone(), - hash: signed_tx.hash().clone(), + hash: *signed_tx.hash(), borrowed_at: chrono::Utc::now().timestamp_millis() as u64, }; @@ -1324,7 +1431,7 @@ impl EoaExecutorStore { let tx_data_key = self.transaction_data_key_name(transaction_id); // Add new hash to submitted (keeping old ones) - pipeline.zadd(&submitted_key, new_hash, nonce); + pipeline.zadd(&submitted_key, nonce, new_hash); // Map new hash to transaction ID pipeline.set(&hash_to_id_key, transaction_id); diff --git a/executors/src/eoa/worker.rs b/executors/src/eoa/worker.rs index 58b018f..7e0a753 100644 --- a/executors/src/eoa/worker.rs +++ b/executors/src/eoa/worker.rs @@ -1,12 +1,13 @@ use alloy::consensus::{SignableTransaction, Signed, Transaction, TypedTransaction}; use alloy::network::{TransactionBuilder, TransactionBuilder7702}; -use alloy::primitives::{Address, B256}; +use alloy::primitives::{Address, B256, Bytes, U256}; use alloy::providers::Provider; use alloy::rpc::types::TransactionRequest as AlloyTransactionRequest; use alloy::transports::{RpcError, TransportErrorKind}; use engine_core::signer::AccountSigner; use engine_core::{ - chain::{Chain, ChainService}, + chain::{Chain, ChainService, RpcCredentials}, + credentials::SigningCredential, error::{AlloyRpcErrorToEngineError, RpcErrorKind}, signer::{EoaSigner, EoaSigningOptions}, }; @@ -22,7 +23,7 @@ use twmq::{ }; use crate::eoa::store::{ - BorrowedTransactionData, EoaExecutorStore, EoaHealth, ScopedEoaExecutorStore, TransactionData, + BorrowedTransactionData, EoaExecutorStore, EoaHealth, EoaTransactionRequest, ScopedEoaExecutorStore, TransactionData, TransactionStoreError, }; @@ -287,7 +288,28 @@ where scoped: &ScopedEoaExecutorStore<'_>, chain: &impl Chain, ) -> Result { - let borrowed_transactions = scoped.peek_borrowed_transactions().await?; + let mut borrowed_transactions = scoped.peek_borrowed_transactions().await?; + + // Sort borrowed transactions by nonce to ensure proper ordering + borrowed_transactions.sort_by_key(|tx| tx.signed_transaction.nonce()); + + // Check for stale borrowed transactions (older than 5 minutes) + let now = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs(); + + for borrowed in &borrowed_transactions { + if now - borrowed.borrowed_at > 300 { // 5 minutes + tracing::warn!( + transaction_id = %borrowed.transaction_id, + nonce = borrowed.signed_transaction.nonce(), + age_seconds = now - borrowed.borrowed_at, + "Found stale borrowed transaction - possible worker crash or system issue" + ); + } + } + let mut recovered_count = 0; for borrowed in borrowed_transactions { @@ -809,11 +831,12 @@ where _chain: &impl Chain, nonce: u64, ) -> Result { - // For now, just log that we would send a no-op - // TODO: Implement proper no-op transaction if needed - tracing::info!( + // TODO: Implement proper no-op transaction for recycled nonces + // This requires handling signing credentials and creating atomic operations + // for consuming recycled nonces without pending transactions + tracing::warn!( nonce = nonce, - "Would send no-op transaction (not implemented)" + "No-op transaction not implemented - recycled nonce will remain unconsumed" ); Ok(false) } From e3758fc38612c340043dcf63ab4b93da26147705 Mon Sep 17 00:00:00 2001 From: Prithvish Baidya Date: Mon, 7 Jul 2025 04:12:51 +0530 Subject: [PATCH 7/8] fixes and parallelism improvements --- Cargo.lock | 471 ++--------- Cargo.toml | 5 + aa-core/Cargo.toml | 6 +- core/Cargo.toml | 4 +- executors/src/eoa/store.rs | 734 ++++++++++------ executors/src/eoa/worker.rs | 1581 ++++++++++++++++++++++++++--------- server/Cargo.toml | 6 +- thirdweb-core/Cargo.toml | 2 +- twmq/src/lib.rs | 118 ++- 9 files changed, 1859 insertions(+), 1068 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 021ef5f..68ec643 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -75,43 +75,27 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" -[[package]] -name = "alloy" -version = "0.15.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b064bd1cea105e70557a258cd2b317731896753ec08edf51da2d1fced587b05" -dependencies = [ - "alloy-consensus 0.15.11", - "alloy-core", - "alloy-eips 0.15.11", - "alloy-serde 0.15.11", - "alloy-signer 0.15.11", - "alloy-signer-aws 0.15.11", - "alloy-signer-gcp 0.15.11", - "alloy-signer-ledger 0.15.11", -] - [[package]] name = "alloy" version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e0d1aecf3cab3d0e7383064ce488616434b4ade10d8904dff422e74203c712f" dependencies = [ - "alloy-consensus 1.0.17", + "alloy-consensus", "alloy-contract", "alloy-core", - "alloy-eips 1.0.17", + "alloy-eips", "alloy-genesis", - "alloy-json-rpc 1.0.17", - "alloy-network 1.0.17", + "alloy-json-rpc", + "alloy-network", "alloy-provider", "alloy-rpc-client", "alloy-rpc-types", - "alloy-serde 1.0.17", - "alloy-signer 1.0.17", - "alloy-signer-aws 1.0.17", - "alloy-signer-gcp 1.0.17", - "alloy-signer-ledger 1.0.17", + "alloy-serde", + "alloy-signer", + "alloy-signer-aws", + "alloy-signer-gcp", + "alloy-signer-ledger", "alloy-signer-local", "alloy-transport", "alloy-transport-http", @@ -128,41 +112,17 @@ dependencies = [ "strum", ] -[[package]] -name = "alloy-consensus" -version = "0.15.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32c3f3bc4f2a6b725970cd354e78e9738ea1e8961a91898f57bf6317970b1915" -dependencies = [ - "alloy-eips 0.15.11", - "alloy-primitives", - "alloy-rlp", - "alloy-serde 0.15.11", - "alloy-trie 0.8.1", - "auto_impl", - "c-kzg", - "derive_more", - "either", - "k256", - "once_cell", - "rand 0.8.5", - "secp256k1", - "serde", - "serde_with", - "thiserror 2.0.12", -] - [[package]] name = "alloy-consensus" version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9c6ad411efe0f49e0e99b9c7d8749a1eb55f6dbf74a1bc6953ab285b02c4f67" dependencies = [ - "alloy-eips 1.0.17", + "alloy-eips", "alloy-primitives", "alloy-rlp", - "alloy-serde 1.0.17", - "alloy-trie 0.9.0", + "alloy-serde", + "alloy-trie", "alloy-tx-macros", "auto_impl", "c-kzg", @@ -177,31 +137,17 @@ dependencies = [ "thiserror 2.0.12", ] -[[package]] -name = "alloy-consensus-any" -version = "0.15.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dda014fb5591b8d8d24cab30f52690117d238e52254c6fb40658e91ea2ccd6c3" -dependencies = [ - "alloy-consensus 0.15.11", - "alloy-eips 0.15.11", - "alloy-primitives", - "alloy-rlp", - "alloy-serde 0.15.11", - "serde", -] - [[package]] name = "alloy-consensus-any" version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bf397edad57b696501702d5887e4e14d7d0bbae9fbb6439e148d361f7254f45" dependencies = [ - "alloy-consensus 1.0.17", - "alloy-eips 1.0.17", + "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", - "alloy-serde 1.0.17", + "alloy-serde", "serde", ] @@ -211,14 +157,14 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "977b97d271159578afcb26e39e1ca5ce1a7f937697793d7d571b0166dd8b8225" dependencies = [ - "alloy-consensus 1.0.17", + "alloy-consensus", "alloy-dyn-abi", "alloy-json-abi", - "alloy-network 1.0.17", - "alloy-network-primitives 1.0.17", + "alloy-network", + "alloy-network-primitives", "alloy-primitives", "alloy-provider", - "alloy-rpc-types-eth 1.0.17", + "alloy-rpc-types-eth", "alloy-sol-types", "alloy-transport", "futures", @@ -293,26 +239,6 @@ dependencies = [ "thiserror 2.0.12", ] -[[package]] -name = "alloy-eips" -version = "0.15.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f7b2f7010581f29bcace81776cf2f0e022008d05a7d326884763f16f3044620" -dependencies = [ - "alloy-eip2124", - "alloy-eip2930", - "alloy-eip7702", - "alloy-primitives", - "alloy-rlp", - "alloy-serde 0.15.11", - "auto_impl", - "c-kzg", - "derive_more", - "either", - "serde", - "sha2", -] - [[package]] name = "alloy-eips" version = "1.0.17" @@ -324,7 +250,7 @@ dependencies = [ "alloy-eip7702", "alloy-primitives", "alloy-rlp", - "alloy-serde 1.0.17", + "alloy-serde", "auto_impl", "c-kzg", "derive_more", @@ -339,10 +265,10 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5fcbae2107f3f2df2b02bb7d9e81e8aa730ae371ca9dd7fd0c81c3d0cb78a452" dependencies = [ - "alloy-eips 1.0.17", + "alloy-eips", "alloy-primitives", - "alloy-serde 1.0.17", - "alloy-trie 0.9.0", + "alloy-serde", + "alloy-trie", "serde", "serde_with", ] @@ -359,20 +285,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "alloy-json-rpc" -version = "0.15.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca1e31b50f4ed9a83689ae97263d366b15b935a67c4acb5dd46d5b1c3b27e8e6" -dependencies = [ - "alloy-primitives", - "alloy-sol-types", - "serde", - "serde_json", - "thiserror 2.0.12", - "tracing", -] - [[package]] name = "alloy-json-rpc" version = "1.0.17" @@ -388,48 +300,22 @@ dependencies = [ "tracing", ] -[[package]] -name = "alloy-network" -version = "0.15.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879afc0f4a528908c8fe6935b2ab0bc07f77221a989186f71583f7592831689e" -dependencies = [ - "alloy-consensus 0.15.11", - "alloy-consensus-any 0.15.11", - "alloy-eips 0.15.11", - "alloy-json-rpc 0.15.11", - "alloy-network-primitives 0.15.11", - "alloy-primitives", - "alloy-rpc-types-any 0.15.11", - "alloy-rpc-types-eth 0.15.11", - "alloy-serde 0.15.11", - "alloy-signer 0.15.11", - "alloy-sol-types", - "async-trait", - "auto_impl", - "derive_more", - "futures-utils-wasm", - "serde", - "serde_json", - "thiserror 2.0.12", -] - [[package]] name = "alloy-network" version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaeb681024cf71f5ca14f3d812c0a8d8b49f13f7124713538e66d74d3bfe6aff" dependencies = [ - "alloy-consensus 1.0.17", - "alloy-consensus-any 1.0.17", - "alloy-eips 1.0.17", - "alloy-json-rpc 1.0.17", - "alloy-network-primitives 1.0.17", + "alloy-consensus", + "alloy-consensus-any", + "alloy-eips", + "alloy-json-rpc", + "alloy-network-primitives", "alloy-primitives", - "alloy-rpc-types-any 1.0.17", - "alloy-rpc-types-eth 1.0.17", - "alloy-serde 1.0.17", - "alloy-signer 1.0.17", + "alloy-rpc-types-any", + "alloy-rpc-types-eth", + "alloy-serde", + "alloy-signer", "alloy-sol-types", "async-trait", "auto_impl", @@ -440,29 +326,16 @@ dependencies = [ "thiserror 2.0.12", ] -[[package]] -name = "alloy-network-primitives" -version = "0.15.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec185bac9d32df79c1132558a450d48f6db0bfb5adef417dbb1a0258153f879b" -dependencies = [ - "alloy-consensus 0.15.11", - "alloy-eips 0.15.11", - "alloy-primitives", - "alloy-serde 0.15.11", - "serde", -] - [[package]] name = "alloy-network-primitives" version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a03ad273e1c55cc481889b4130e82860e33624e6969e9a08854e0f3ebe659295" dependencies = [ - "alloy-consensus 1.0.17", - "alloy-eips 1.0.17", + "alloy-consensus", + "alloy-eips", "alloy-primitives", - "alloy-serde 1.0.17", + "alloy-serde", "serde", ] @@ -500,15 +373,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abc164acf8c41c756e76c7aea3be8f0fb03f8a3ef90a33e3ddcea5d1614d8779" dependencies = [ "alloy-chains", - "alloy-consensus 1.0.17", - "alloy-eips 1.0.17", - "alloy-json-rpc 1.0.17", - "alloy-network 1.0.17", - "alloy-network-primitives 1.0.17", + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-network", + "alloy-network-primitives", "alloy-primitives", "alloy-rpc-client", - "alloy-rpc-types-eth 1.0.17", - "alloy-signer 1.0.17", + "alloy-rpc-types-eth", + "alloy-signer", "alloy-sol-types", "alloy-transport", "alloy-transport-http", @@ -561,7 +434,7 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03c44d31bcb9afad460915fe1fba004a2af5a07a3376c307b9bdfeec3678c209" dependencies = [ - "alloy-json-rpc 1.0.17", + "alloy-json-rpc", "alloy-primitives", "alloy-transport", "alloy-transport-http", @@ -587,51 +460,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ba2cf3d3c6ece87f1c6bb88324a997f28cf0ad7e98d5e0b6fa91c4003c30916" dependencies = [ "alloy-primitives", - "alloy-rpc-types-eth 1.0.17", - "alloy-serde 1.0.17", + "alloy-rpc-types-eth", + "alloy-serde", "serde", ] -[[package]] -name = "alloy-rpc-types-any" -version = "0.15.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5a8f1efd77116915dad61092f9ef9295accd0b0b251062390d9c4e81599344" -dependencies = [ - "alloy-consensus-any 0.15.11", - "alloy-rpc-types-eth 0.15.11", - "alloy-serde 0.15.11", -] - [[package]] name = "alloy-rpc-types-any" version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef5b22062142ce3b2ed3374337d4b343437e5de6959397f55d2c9fe2c2ce0162" dependencies = [ - "alloy-consensus-any 1.0.17", - "alloy-rpc-types-eth 1.0.17", - "alloy-serde 1.0.17", -] - -[[package]] -name = "alloy-rpc-types-eth" -version = "0.15.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc1323310d87f9d950fb3ff58d943fdf832f5e10e6f902f405c0eaa954ffbaf1" -dependencies = [ - "alloy-consensus 0.15.11", - "alloy-consensus-any 0.15.11", - "alloy-eips 0.15.11", - "alloy-network-primitives 0.15.11", - "alloy-primitives", - "alloy-rlp", - "alloy-serde 0.15.11", - "alloy-sol-types", - "itertools 0.14.0", - "serde", - "serde_json", - "thiserror 2.0.12", + "alloy-consensus-any", + "alloy-rpc-types-eth", + "alloy-serde", ] [[package]] @@ -640,13 +482,13 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "391e59f81bacbffc7bddd2da3a26d6eec0e2058e9237c279e9b1052bdf21b49e" dependencies = [ - "alloy-consensus 1.0.17", - "alloy-consensus-any 1.0.17", - "alloy-eips 1.0.17", - "alloy-network-primitives 1.0.17", + "alloy-consensus", + "alloy-consensus-any", + "alloy-eips", + "alloy-network-primitives", "alloy-primitives", "alloy-rlp", - "alloy-serde 1.0.17", + "alloy-serde", "alloy-sol-types", "itertools 0.14.0", "serde", @@ -654,17 +496,6 @@ dependencies = [ "thiserror 2.0.12", ] -[[package]] -name = "alloy-serde" -version = "0.15.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05ace2ef3da874544c3ffacfd73261cdb1405d8631765deb991436a53ec6069" -dependencies = [ - "alloy-primitives", - "serde", - "serde_json", -] - [[package]] name = "alloy-serde" version = "1.0.17" @@ -676,23 +507,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "alloy-signer" -version = "0.15.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67fdabad99ad3c71384867374c60bcd311fc1bb90ea87f5f9c779fd8c7ec36aa" -dependencies = [ - "alloy-dyn-abi", - "alloy-primitives", - "alloy-sol-types", - "async-trait", - "auto_impl", - "either", - "elliptic-curve", - "k256", - "thiserror 2.0.12", -] - [[package]] name = "alloy-signer" version = "1.0.17" @@ -710,34 +524,16 @@ dependencies = [ "thiserror 2.0.12", ] -[[package]] -name = "alloy-signer-aws" -version = "0.15.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7469e93151629b7780d4d37b3adc463376e524af95f1fc1c2877f10837eb3e4" -dependencies = [ - "alloy-consensus 0.15.11", - "alloy-network 0.15.11", - "alloy-primitives", - "alloy-signer 0.15.11", - "async-trait", - "aws-sdk-kms", - "k256", - "spki", - "thiserror 2.0.12", - "tracing", -] - [[package]] name = "alloy-signer-aws" version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7942b850ec7be43de89b2680321d7921b7620b25be53b9981aae6fb29daa9e97" dependencies = [ - "alloy-consensus 1.0.17", - "alloy-network 1.0.17", + "alloy-consensus", + "alloy-network", "alloy-primitives", - "alloy-signer 1.0.17", + "alloy-signer", "async-trait", "aws-sdk-kms", "k256", @@ -746,34 +542,16 @@ dependencies = [ "tracing", ] -[[package]] -name = "alloy-signer-gcp" -version = "0.15.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "110f26e897e58e1fa50c5eeddd3731c9b69d231f20687844a1c031cacc176b15" -dependencies = [ - "alloy-consensus 0.15.11", - "alloy-network 0.15.11", - "alloy-primitives", - "alloy-signer 0.15.11", - "async-trait", - "gcloud-sdk", - "k256", - "spki", - "thiserror 2.0.12", - "tracing", -] - [[package]] name = "alloy-signer-gcp" version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74809e45053bd43d24338e618202ebea68d5660aa9632d77b0244faa2dcaa9d1" dependencies = [ - "alloy-consensus 1.0.17", - "alloy-network 1.0.17", + "alloy-consensus", + "alloy-network", "alloy-primitives", - "alloy-signer 1.0.17", + "alloy-signer", "async-trait", "gcloud-sdk", "k256", @@ -782,37 +560,17 @@ dependencies = [ "tracing", ] -[[package]] -name = "alloy-signer-ledger" -version = "0.15.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3abc91dcedcdd72f950179df239ca838deaf74fe9ec5cd818c9ba682360325a" -dependencies = [ - "alloy-consensus 0.15.11", - "alloy-dyn-abi", - "alloy-network 0.15.11", - "alloy-primitives", - "alloy-signer 0.15.11", - "alloy-sol-types", - "async-trait", - "coins-ledger", - "futures-util", - "semver 1.0.26", - "thiserror 2.0.12", - "tracing", -] - [[package]] name = "alloy-signer-ledger" version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "63c7e67367bc2b1d5790236448d2402865a4f0bc2b53cfda06d71b7ba3dbdffd" dependencies = [ - "alloy-consensus 1.0.17", + "alloy-consensus", "alloy-dyn-abi", - "alloy-network 1.0.17", + "alloy-network", "alloy-primitives", - "alloy-signer 1.0.17", + "alloy-signer", "alloy-sol-types", "async-trait", "coins-ledger", @@ -828,10 +586,10 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14d95902d29e1290809e1c967a1e974145b44b78f6e3e12fc07a60c1225e3df0" dependencies = [ - "alloy-consensus 1.0.17", - "alloy-network 1.0.17", + "alloy-consensus", + "alloy-network", "alloy-primitives", - "alloy-signer 1.0.17", + "alloy-signer", "async-trait", "k256", "rand 0.8.5", @@ -917,7 +675,7 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcdf4b7fc58ebb2605b2fc5a33dae5cf15527ea70476978351cc0db1c596ea93" dependencies = [ - "alloy-json-rpc 1.0.17", + "alloy-json-rpc", "alloy-primitives", "base64 0.22.1", "derive_more", @@ -940,7 +698,7 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c4b0f3a9c28bcd3761504d9eb3578838d6d115c8959fc1ea05f59a3a8f691af" dependencies = [ - "alloy-json-rpc 1.0.17", + "alloy-json-rpc", "alloy-transport", "reqwest", "serde_json", @@ -949,22 +707,6 @@ dependencies = [ "url", ] -[[package]] -name = "alloy-trie" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "983d99aa81f586cef9dae38443245e585840fcf0fc58b09aee0b1f27aed1d500" -dependencies = [ - "alloy-primitives", - "alloy-rlp", - "arrayvec", - "derive_more", - "nybbles 0.3.4", - "serde", - "smallvec", - "tracing", -] - [[package]] name = "alloy-trie" version = "0.9.0" @@ -975,7 +717,7 @@ dependencies = [ "alloy-rlp", "arrayvec", "derive_more", - "nybbles 0.4.0", + "nybbles", "serde", "smallvec", "tracing", @@ -2390,21 +2132,21 @@ dependencies = [ name = "engine-aa-core" version = "0.1.0" dependencies = [ - "alloy 1.0.17", + "alloy", "engine-aa-types", "engine-core", "serde", "tokio", "tracing", - "vault-sdk 0.1.0 (git+ssh://git@github.com/thirdweb-dev/vault.git?branch=pb%2Fupdate-alloy)", - "vault-types 0.1.0 (git+ssh://git@github.com/thirdweb-dev/vault.git?branch=pb%2Fupdate-alloy)", + "vault-sdk", + "vault-types", ] [[package]] name = "engine-aa-types" version = "0.1.0" dependencies = [ - "alloy 1.0.17", + "alloy", "schemars 0.8.22", "serde", "serde_json", @@ -2416,7 +2158,7 @@ dependencies = [ name = "engine-core" version = "0.1.0" dependencies = [ - "alloy 1.0.17", + "alloy", "engine-aa-types", "schemars 0.8.22", "serde", @@ -2429,15 +2171,15 @@ dependencies = [ "twmq", "utoipa", "uuid", - "vault-sdk 0.1.0 (git+ssh://git@github.com/thirdweb-dev/vault.git?branch=pb%2Fupdate-alloy)", - "vault-types 0.1.0 (git+ssh://git@github.com/thirdweb-dev/vault.git?branch=pb%2Fupdate-alloy)", + "vault-sdk", + "vault-types", ] [[package]] name = "engine-executors" version = "0.1.0" dependencies = [ - "alloy 1.0.17", + "alloy", "chrono", "engine-aa-core", "engine-aa-types", @@ -3701,19 +3443,6 @@ dependencies = [ "syn 2.0.104", ] -[[package]] -name = "nybbles" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" -dependencies = [ - "alloy-rlp", - "const-hex", - "proptest", - "serde", - "smallvec", -] - [[package]] name = "nybbles" version = "0.4.0" @@ -5339,7 +5068,7 @@ dependencies = [ name = "thirdweb-core" version = "0.1.0" dependencies = [ - "alloy 1.0.17", + "alloy", "engine-aa-types", "moka", "reqwest", @@ -5357,7 +5086,7 @@ name = "thirdweb-engine" version = "0.1.0" dependencies = [ "aide", - "alloy 1.0.17", + "alloy", "anyhow", "axum", "config", @@ -5379,8 +5108,8 @@ dependencies = [ "utoipa", "utoipa-axum", "utoipa-scalar", - "vault-sdk 0.1.0 (git+ssh://git@github.com/thirdweb-dev/vault.git?branch=main)", - "vault-types 0.1.0 (git+ssh://git@github.com/thirdweb-dev/vault.git?branch=main)", + "vault-sdk", + "vault-types", ] [[package]] @@ -5991,33 +5720,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" -[[package]] -name = "vault-sdk" -version = "0.1.0" -source = "git+ssh://git@github.com/thirdweb-dev/vault.git?branch=main#a9d62a85ae69d47b2f341e886d16c12611644235" -dependencies = [ - "alloy 0.15.11", - "chacha20poly1305", - "chrono", - "hex", - "hkdf", - "jsonwebtoken", - "reqwest", - "serde", - "serde_json", - "sha2", - "thiserror 2.0.12", - "uuid", - "vault-types 0.1.0 (git+ssh://git@github.com/thirdweb-dev/vault.git?branch=main)", - "x25519-dalek", -] - [[package]] name = "vault-sdk" version = "0.1.0" source = "git+ssh://git@github.com/thirdweb-dev/vault.git?branch=pb%2Fupdate-alloy#b0a72f93335ff05f722c070f32f0697c5478243a" dependencies = [ - "alloy 1.0.17", + "alloy", "chacha20poly1305", "chrono", "hex", @@ -6029,29 +5737,16 @@ dependencies = [ "sha2", "thiserror 2.0.12", "uuid", - "vault-types 0.1.0 (git+ssh://git@github.com/thirdweb-dev/vault.git?branch=pb%2Fupdate-alloy)", + "vault-types", "x25519-dalek", ] -[[package]] -name = "vault-types" -version = "0.1.0" -source = "git+ssh://git@github.com/thirdweb-dev/vault.git?branch=main#a9d62a85ae69d47b2f341e886d16c12611644235" -dependencies = [ - "alloy 0.15.11", - "bincode", - "chrono", - "serde", - "serde_json", - "uuid", -] - [[package]] name = "vault-types" version = "0.1.0" source = "git+ssh://git@github.com/thirdweb-dev/vault.git?branch=pb%2Fupdate-alloy#b0a72f93335ff05f722c070f32f0697c5478243a" dependencies = [ - "alloy 1.0.17", + "alloy", "bincode", "chrono", "serde", diff --git a/Cargo.toml b/Cargo.toml index 05f3fc6..ef7b824 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,3 +9,8 @@ members = [ "twmq", ] resolver = "2" + +[workspace.dependencies] +alloy = { version = "1.0.8" } +vault-types = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "pb/update-alloy" } +vault-sdk = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "pb/update-alloy" } diff --git a/aa-core/Cargo.toml b/aa-core/Cargo.toml index dcd8702..ae93cb9 100644 --- a/aa-core/Cargo.toml +++ b/aa-core/Cargo.toml @@ -4,11 +4,11 @@ version = "0.1.0" edition = "2024" [dependencies] -alloy = { version = "1.0.8", features = ["serde"] } +alloy = { workspace = true, features = ["serde"] } tokio = "1.44.2" engine-aa-types = { path = "../aa-types" } engine-core = { path = "../core" } -vault-types = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "pb/update-alloy" } -vault-sdk = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "pb/update-alloy" } +vault-types = { workspace = true } +vault-sdk = { workspace = true } serde = "1.0.219" tracing = "0.1.41" diff --git a/core/Cargo.toml b/core/Cargo.toml index 51d0ea2..5427590 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -10,8 +10,8 @@ schemars = "0.8.22" serde = { version = "1.0.219", features = ["derive"] } serde_json = "1.0.140" thiserror = "2.0.12" -vault-types = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "pb/update-alloy" } -vault-sdk = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "pb/update-alloy" } +vault-types = { workspace = true } +vault-sdk = { workspace = true } tower = "0.5.2" tracing = "0.1.41" twmq = { version = "0.1.0", path = "../twmq" } diff --git a/executors/src/eoa/store.rs b/executors/src/eoa/store.rs index 13418d5..90fdbe6 100644 --- a/executors/src/eoa/store.rs +++ b/executors/src/eoa/store.rs @@ -1,4 +1,4 @@ -use alloy::consensus::{Signed, TypedTransaction}; +use alloy::consensus::{Signed, Transaction, TypedTransaction}; use alloy::eips::eip7702::SignedAuthorization; use alloy::network::AnyTransactionReceipt; use alloy::primitives::{Address, B256, Bytes, U256}; @@ -41,7 +41,11 @@ impl SafeRedisTransaction for MovePendingToBorrowedWithRecycledNonce { // Remove transaction from pending (we know it exists) pipeline.lrem(&self.pending_key, 0, &self.transaction_id); // Store borrowed transaction - pipeline.hset(&self.borrowed_key, self.nonce.to_string(), &self.prepared_tx_json); + pipeline.hset( + &self.borrowed_key, + self.nonce.to_string(), + &self.prepared_tx_json, + ); } fn watch_keys(&self) -> Vec { @@ -89,7 +93,11 @@ impl SafeRedisTransaction for MovePendingToBorrowedWithNewNonce { // Remove transaction from pending pipeline.lrem(&self.pending_key, 0, &self.transaction_id); // Store borrowed transaction - pipeline.hset(&self.borrowed_key, self.nonce.to_string(), &self.prepared_tx_json); + pipeline.hset( + &self.borrowed_key, + self.nonce.to_string(), + &self.prepared_tx_json, + ); } fn watch_keys(&self) -> Vec { @@ -101,10 +109,12 @@ impl SafeRedisTransaction for MovePendingToBorrowedWithNewNonce { let current_optimistic: Option = conn.get(&self.optimistic_key).await?; let current_nonce = match current_optimistic { Some(nonce) => nonce, - None => return Err(TransactionStoreError::NonceSyncRequired { - eoa: self.eoa, - chain_id: self.chain_id - }), + None => { + return Err(TransactionStoreError::NonceSyncRequired { + eoa: self.eoa, + chain_id: self.chain_id, + }); + } }; if current_nonce != self.nonce { @@ -143,18 +153,19 @@ impl SafeRedisTransaction for MoveBorrowedToSubmitted { fn operation(&self, pipeline: &mut Pipeline) { // Remove from borrowed (we know it exists) pipeline.hdel(&self.borrowed_key, self.nonce.to_string()); - - // Add to submitted - pipeline.zadd(&self.submitted_key, self.nonce, &self.hash); - - // Map hash to transaction ID + + // Add to submitted with hash:id format + let hash_id_value = format!("{}:{}", self.hash, self.transaction_id); + pipeline.zadd(&self.submitted_key, &hash_id_value, self.nonce); + + // Still maintain hash-to-ID mapping for backward compatibility and external lookups pipeline.set(&self.hash_to_id_key, &self.transaction_id); } fn watch_keys(&self) -> Vec { vec![self.borrowed_key.clone()] } - + async fn validation(&self, conn: &mut ConnectionManager) -> Result<(), TransactionStoreError> { // Validate that borrowed transaction actually exists let borrowed_tx: Option = conn @@ -184,14 +195,12 @@ impl SafeRedisTransaction for MoveBorrowedToRecycled { } fn operation(&self, pipeline: &mut Pipeline) { - let now = chrono::Utc::now().timestamp_millis(); - // Remove from borrowed (we know it exists) pipeline.hdel(&self.borrowed_key, self.nonce.to_string()); - + // Add nonce to recycled set (with timestamp as score) - pipeline.zadd(&self.recycled_key, now, self.nonce); - + pipeline.zadd(&self.recycled_key, self.nonce, self.nonce); + // Add transaction back to pending pipeline.lpush(&self.pending_key, &self.transaction_id); } @@ -272,7 +281,7 @@ pub struct EoaSendLegacyJobData { pub struct TransactionAttempt { pub transaction_id: String, pub details: Signed, - pub sent_at: chrono::DateTime, + pub sent_at: u64, // Unix timestamp in milliseconds pub attempt_number: u32, } @@ -303,6 +312,12 @@ impl EoaExecutorStore { } /// Name of the key for the transaction data + /// + /// Transaction data is stored as a Redis HSET with the following fields: + /// - "user_request": JSON string containing EoaTransactionRequest + /// - "receipt": JSON string containing AnyTransactionReceipt (optional) + /// - "status": String status ("confirmed", "failed", etc.) + /// - "completed_at": String Unix timestamp (optional) fn transaction_data_key_name(&self, transaction_id: &str) -> String { match &self.namespace { Some(ns) => format!("{ns}:eoa_executor:tx_data:{transaction_id}"), @@ -310,6 +325,17 @@ impl EoaExecutorStore { } } + /// Name of the list for transaction attempts + /// + /// Attempts are stored as a separate Redis LIST where each element is a JSON blob + /// of a TransactionAttempt. This allows efficient append operations. + fn transaction_attempts_list_name(&self, transaction_id: &str) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:tx_attempts:{transaction_id}"), + None => format!("eoa_executor:tx_attempts:{transaction_id}"), + } + } + /// Name of the list for pending transactions fn pending_transactions_list_name(&self, eoa: Address, chain_id: u64) -> String { match &self.namespace { @@ -318,7 +344,7 @@ impl EoaExecutorStore { } } - /// Name of the zset for submitted transactions. nonce -> hash + /// Name of the zset for submitted transactions. nonce -> hash:id /// Same transaction might appear multiple times in the zset with different nonces/gas prices (and thus different hashes) fn submitted_transactions_zset_name(&self, eoa: Address, chain_id: u64) -> String { match &self.namespace { @@ -335,7 +361,7 @@ impl EoaExecutorStore { } } - /// Name of the hashmap that maps transaction id to borrowed transactions + /// Name of the hashmap that maps `transaction_id` -> `BorrowedTransactionData` /// /// This is used for crash recovery. Before submitting a transaction, we atomically move from pending to this borrowed hashmap. /// @@ -410,16 +436,19 @@ impl EoaExecutorStore { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct EoaHealth { pub balance: U256, + /// Update the balance threshold when we see out of funds errors + pub balance_threshold: U256, pub balance_fetched_at: u64, - pub last_confirmation_at: Option, - pub nonce_resets: Vec, // Last 5 reset timestamps + pub last_confirmation_at: u64, + pub last_nonce_movement_at: u64, // Track when nonce last moved for gas bump detection + pub nonce_resets: Vec, // Last 5 reset timestamps } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct BorrowedTransactionData { pub transaction_id: String, pub signed_transaction: Signed, - pub hash: B256, + pub hash: String, pub borrowed_at: u64, } @@ -499,6 +528,54 @@ impl EoaExecutorStore { Ok(()) } + /// Release EOA lock following the spec's finally pattern + pub async fn release_eoa_lock( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + ) -> Result<(), TransactionStoreError> { + // Use existing utility method that handles all the atomic lock checking + match self + .with_lock_check(eoa, chain_id, worker_id, |pipeline| { + let lock_key = self.eoa_lock_key_name(eoa, chain_id); + pipeline.del(&lock_key); + }) + .await + { + Ok(()) => { + tracing::debug!( + eoa = %eoa, + chain_id = %chain_id, + worker_id = %worker_id, + "Successfully released EOA lock" + ); + Ok(()) + } + Err(TransactionStoreError::LockLost { .. }) => { + // Lock was already taken over, which is fine for release + tracing::debug!( + eoa = %eoa, + chain_id = %chain_id, + worker_id = %worker_id, + "Lock already released or taken over by another worker" + ); + Ok(()) + } + Err(e) => { + // Other errors shouldn't fail the worker, just log + tracing::warn!( + eoa = %eoa, + chain_id = %chain_id, + worker_id = %worker_id, + error = %e, + "Failed to release EOA lock" + ); + Ok(()) + } + } + } + /// Helper to execute atomic operations with proper retry logic and watch handling /// /// This helper centralizes all the boilerplate for WATCH/MULTI/EXEC operations: @@ -507,35 +584,35 @@ impl EoaExecutorStore { /// - WATCH key management /// - Error handling and UNWATCH cleanup /// - /// ## When to use this helper: - /// - Simple validation that doesn't need to pass data to pipeline phase - /// - Operations that can cleanly separate validation from pipeline commands - /// - Cases where reducing boilerplate is more important than complex data flow + /// ## Usage: + /// Implement the `SafeRedisTransaction` trait for your operation, then call this method. + /// The trait separates validation (async) from pipeline operations (sync) for clean patterns. /// - /// ## When NOT to use this helper: - /// - Complex validation that needs to pass computed data to pipeline - /// - Operations requiring custom retry logic - /// - Cases where validation and pipeline phases are tightly coupled + /// ## Example: + /// ```rust + /// let safe_tx = MovePendingToBorrowedWithNewNonce { + /// nonce: expected_nonce, + /// prepared_tx_json, + /// transaction_id, + /// borrowed_key, + /// optimistic_key, + /// pending_key, + /// eoa, + /// chain_id, + /// }; /// - /// ## Example usage: - /// ``` - /// self.execute_with_watch_and_retry( - /// eoa, chain_id, worker_id, - /// &[key1, key2], // Keys to WATCH - /// "operation name", - /// async |conn| { // Validation phase - /// let data = conn.get("key").await?; - /// if !is_valid(data) { - /// return Err(SomeError); - /// } - /// Ok(()) - /// }, - /// |pipeline| { // Pipeline phase - /// pipeline.set("key", "value"); - /// pipeline.incr("counter", 1); - /// } - /// ).await + /// self.execute_with_watch_and_retry(eoa, chain_id, worker_id, &safe_tx).await?; /// ``` + /// + /// ## When to use this helper: + /// - Operations that implement `SafeRedisTransaction` trait + /// - Need atomic WATCH/MULTI/EXEC with retry logic + /// - Want centralized lock checking and error handling + /// + /// ## When NOT to use this helper: + /// - Simple operations that can use `with_lock_check` instead + /// - Operations that don't need WATCH on multiple keys + /// - Read-only operations that don't modify state async fn execute_with_watch_and_retry( &self, eoa: Address, @@ -894,7 +971,6 @@ impl EoaExecutorStore { &self, eoa: Address, chain_id: u64, - worker_id: &str, ) -> Result, TransactionStoreError> { let borrowed_key = self.borrowed_transactions_hashmap_name(eoa, chain_id); let mut conn = self.redis.clone(); @@ -974,12 +1050,13 @@ impl EoaExecutorStore { } /// Get all hashes below a certain nonce from submitted transactions + /// Returns (nonce, hash, transaction_id) tuples pub async fn get_hashes_below_nonce( &self, eoa: Address, chain_id: u64, below_nonce: u64, - ) -> Result, TransactionStoreError> { + ) -> Result, TransactionStoreError> { let submitted_key = self.submitted_transactions_zset_name(eoa, chain_id); let mut conn = self.redis.clone(); @@ -988,10 +1065,57 @@ impl EoaExecutorStore { .zrangebyscore_withscores(&submitted_key, 0, below_nonce - 1) .await?; - Ok(results - .into_iter() - .map(|(hash, nonce)| (nonce, hash)) - .collect()) + let mut parsed_results = Vec::new(); + for (hash_id_value, nonce) in results { + // Parse hash:id format + if let Some((hash, transaction_id)) = hash_id_value.split_once(':') { + parsed_results.push((nonce, hash.to_string(), transaction_id.to_string())); + } else { + // Fallback for old format (just hash) - look up transaction ID + if let Some(transaction_id) = + self.get_transaction_id_for_hash(&hash_id_value).await? + { + parsed_results.push((nonce, hash_id_value, transaction_id)); + } + } + } + + Ok(parsed_results) + } + + /// Get all transaction IDs for a specific nonce + pub async fn get_transaction_ids_for_nonce( + &self, + eoa: Address, + chain_id: u64, + nonce: u64, + ) -> Result, TransactionStoreError> { + let submitted_key = self.submitted_transactions_zset_name(eoa, chain_id); + let mut conn = self.redis.clone(); + + // Get all members with the exact nonce + let members: Vec = conn + .zrangebyscore(&submitted_key, nonce, nonce) + .await + .map_err(|e| TransactionStoreError::RedisError { + message: format!("Failed to get transaction IDs for nonce {}: {}", nonce, e), + })?; + + let mut transaction_ids = Vec::new(); + for value in members { + // Parse the value as hash:id format, with fallback to old format + if let Some((_, transaction_id)) = value.split_once(':') { + // New format: hash:id + transaction_ids.push(transaction_id.to_string()); + } else { + // Old format: just hash - look up transaction ID + if let Some(transaction_id) = self.get_transaction_id_for_hash(&value).await? { + transaction_ids.push(transaction_id); + } + } + } + + Ok(transaction_ids) } /// Remove all hashes for a transaction and requeue it @@ -1046,13 +1170,21 @@ impl EoaExecutorStore { } // Find all hashes for this transaction that actually exist in submitted - let all_hashes: Vec = conn.zrange(&submitted_key, 0, -1).await?; + let all_hash_id_values: Vec = conn.zrange(&submitted_key, 0, -1).await?; let mut transaction_hashes = Vec::new(); - for hash in all_hashes { - if let Some(tx_id) = self.get_transaction_id_for_hash(&hash).await? { + for hash_id_value in all_hash_id_values { + // Parse hash:id format + if let Some((hash, tx_id)) = hash_id_value.split_once(':') { if tx_id == transaction_id { - transaction_hashes.push(hash); + transaction_hashes.push(hash.to_string()); + } + } else { + // Fallback for old format (just hash) - look up transaction ID + if let Some(tx_id) = self.get_transaction_id_for_hash(&hash_id_value).await? { + if tx_id == transaction_id { + transaction_hashes.push(hash_id_value); + } } } } @@ -1068,9 +1200,13 @@ impl EoaExecutorStore { let mut pipeline = twmq::redis::pipe(); pipeline.atomic(); - // Remove all hashes for this transaction (we know they exist) + // Remove all hash:id values for this transaction (we know they exist) for hash in &transaction_hashes { - pipeline.zrem(&submitted_key, hash); + // Remove the hash:id value from the zset + let hash_id_value = format!("{}:{}", hash, transaction_id); + pipeline.zrem(&submitted_key, &hash_id_value); + + // Also remove the separate hash-to-ID mapping for backward compatibility let hash_to_id_key = self.transaction_hash_to_id_key_name(hash); pipeline.del(&hash_to_id_key); } @@ -1127,10 +1263,10 @@ impl EoaExecutorStore { worker_id: &str, health: &EoaHealth, ) -> Result<(), TransactionStoreError> { + let health_json = serde_json::to_string(health)?; self.with_lock_check(eoa, chain_id, worker_id, |pipeline| { let health_key = self.eoa_health_key_name(eoa, chain_id); - let health_json = serde_json::to_string(health).unwrap(); - pipeline.set(&health_key, health_json); + pipeline.set(&health_key, &health_json); }) .await } @@ -1159,25 +1295,10 @@ impl EoaExecutorStore { let recycled_key = self.recycled_nonces_set_name(eoa, chain_id); let mut conn = self.redis.clone(); - // Get all nonces ordered by score (timestamp) let nonces: Vec = conn.zrange(&recycled_key, 0, -1).await?; Ok(nonces) } - /// Nuke all recycled nonces - pub async fn nuke_recycled_nonces( - &self, - eoa: Address, - chain_id: u64, - worker_id: &str, - ) -> Result<(), TransactionStoreError> { - self.with_lock_check(eoa, chain_id, worker_id, |pipeline| { - let recycled_key = self.recycled_nonces_set_name(eoa, chain_id); - pipeline.del(&recycled_key); - }) - .await - } - /// Peek at pending transactions without removing them (safe for planning) pub async fn peek_pending_transactions( &self, @@ -1206,12 +1327,11 @@ impl EoaExecutorStore { let mut conn = self.redis.clone(); // Read both values atomically to avoid race conditions - let (optimistic_nonce, last_tx_count): (Option, Option) = - twmq::redis::pipe() - .get(&optimistic_key) - .get(&last_tx_count_key) - .query_async(&mut conn) - .await?; + let (optimistic_nonce, last_tx_count): (Option, Option) = twmq::redis::pipe() + .get(&optimistic_key) + .get(&last_tx_count_key) + .query_async(&mut conn) + .await?; let optimistic = match optimistic_nonce { Some(nonce) => nonce, @@ -1244,79 +1364,6 @@ impl EoaExecutorStore { } } - /// Complete safe transaction processing flow combining all atomic operations - /// Returns (success, used_recycled_nonce, actual_nonce) - /// - /// On specific failures (nonce not available, transaction not in pending), - /// returns success=false. On other errors, propagates the error. - pub async fn process_transaction_atomically( - &self, - eoa: Address, - chain_id: u64, - worker_id: &str, - transaction_id: &str, - signed_tx: &Signed, - ) -> Result<(bool, bool, Option), TransactionStoreError> { - // Prepare borrowed transaction data - let borrowed_data = BorrowedTransactionData { - transaction_id: transaction_id.to_string(), - signed_transaction: signed_tx.clone(), - hash: *signed_tx.hash(), - borrowed_at: chrono::Utc::now().timestamp_millis() as u64, - }; - - // Try recycled nonces first - let recycled_nonces = self.peek_recycled_nonces(eoa, chain_id).await?; - if let Some(&nonce) = recycled_nonces.first() { - match self - .atomic_move_pending_to_borrowed_with_recycled_nonce( - eoa, - chain_id, - worker_id, - transaction_id, - nonce, - &borrowed_data, - ) - .await - { - Ok(()) => return Ok((true, true, Some(nonce))), // Success with recycled nonce - Err(TransactionStoreError::NonceNotInRecycledSet { .. }) => { - // Nonce was consumed by another worker, try new nonce - } - Err(TransactionStoreError::TransactionNotInPendingQueue { .. }) => { - // Transaction was processed by another worker - return Ok((false, false, None)); - } - Err(e) => return Err(e), // Other errors propagate - } - } - - // Try new nonce - let expected_nonce = self.get_optimistic_nonce(eoa, chain_id).await?; - match self - .atomic_move_pending_to_borrowed_with_new_nonce( - eoa, - chain_id, - worker_id, - transaction_id, - expected_nonce, - &borrowed_data, - ) - .await - { - Ok(()) => Ok((true, false, Some(expected_nonce))), // Success with new nonce - Err(TransactionStoreError::OptimisticNonceChanged { .. }) => { - // Nonce changed while we were processing, try again - Ok((false, false, None)) - } - Err(TransactionStoreError::TransactionNotInPendingQueue { .. }) => { - // Transaction was processed by another worker - Ok((false, false, None)) - } - Err(e) => Err(e), // Other errors propagate - } - } - /// Lock key name for EOA processing fn eoa_lock_key_name(&self, eoa: Address, chain_id: u64) -> String { match &self.namespace { @@ -1366,13 +1413,13 @@ impl EoaExecutorStore { .get("receipt") .and_then(|receipt_str| serde_json::from_str(receipt_str).ok()); - // Extract attempts if present (could be multiple attempt_N fields) + // Extract attempts from separate list + let attempts_key = self.transaction_attempts_list_name(transaction_id); + let attempts_json_list: Vec = conn.lrange(&attempts_key, 0, -1).await?; let mut attempts = Vec::new(); - for (key, value) in &hash_data { - if key.starts_with("attempt_") { - if let Ok(attempt) = serde_json::from_str::(value) { - attempts.push(attempt); - } + for attempt_json in attempts_json_list { + if let Ok(attempt) = serde_json::from_str::(&attempt_json) { + attempts.push(attempt); } } @@ -1398,16 +1445,17 @@ impl EoaExecutorStore { let submitted_key = self.submitted_transactions_zset_name(eoa, chain_id); let hash_to_id_key = self.transaction_hash_to_id_key_name(hash); let tx_data_key = self.transaction_data_key_name(transaction_id); - let now = chrono::Utc::now(); + let now = chrono::Utc::now().timestamp_millis().max(0) as u64; - // Remove this hash from submitted - pipeline.zrem(&submitted_key, hash); + // Remove this hash:id from submitted + let hash_id_value = format!("{}:{}", hash, transaction_id); + pipeline.zrem(&submitted_key, &hash_id_value); // Remove hash mapping pipeline.del(&hash_to_id_key); // Update transaction data with success - pipeline.hset(&tx_data_key, "completed_at", now.timestamp()); + pipeline.hset(&tx_data_key, "completed_at", now); pipeline.hset(&tx_data_key, "receipt", receipt); pipeline.hset(&tx_data_key, "status", "confirmed"); }) @@ -1420,36 +1468,115 @@ impl EoaExecutorStore { eoa: Address, chain_id: u64, worker_id: &str, - nonce: u64, - new_hash: &str, transaction_id: &str, - attempt_number: u32, + signed_transaction: Signed, + ) -> Result<(), TransactionStoreError> { + let new_hash = signed_transaction.hash().to_string(); + let nonce = signed_transaction.nonce(); + + // Create new attempt + let new_attempt = TransactionAttempt { + transaction_id: transaction_id.to_string(), + details: signed_transaction, + sent_at: chrono::Utc::now().timestamp_millis().max(0) as u64, + attempt_number: 0, // Will be set correctly when reading all attempts + }; + + // Serialize the new attempt + let attempt_json = serde_json::to_string(&new_attempt)?; + + // Get key names + let attempts_list_key = self.transaction_attempts_list_name(transaction_id); + let submitted_key = self.submitted_transactions_zset_name(eoa, chain_id); + let hash_to_id_key = self.transaction_hash_to_id_key_name(&new_hash); + let hash_id_value = format!("{}:{}", new_hash, transaction_id); + + // Now perform the atomic update + self.with_lock_check(eoa, chain_id, worker_id, |pipeline| { + // Add new hash:id to submitted (keeping old ones) + pipeline.zadd(&submitted_key, &hash_id_value, nonce); + + // Still maintain separate hash-to-ID mapping for backward compatibility + pipeline.set(&hash_to_id_key, transaction_id); + + // Simply push the new attempt to the attempts list + pipeline.lpush(&attempts_list_key, &attempt_json); + }) + .await + } + + /// Efficiently batch fail and requeue multiple transactions + /// This avoids hash-to-ID lookups since we already have both pieces of information + pub async fn batch_fail_and_requeue_transactions( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + failures: Vec, ) -> Result<(), TransactionStoreError> { + if failures.is_empty() { + return Ok(()); + } + self.with_lock_check(eoa, chain_id, worker_id, |pipeline| { let submitted_key = self.submitted_transactions_zset_name(eoa, chain_id); - let hash_to_id_key = self.transaction_hash_to_id_key_name(new_hash); - let tx_data_key = self.transaction_data_key_name(transaction_id); + let pending_key = self.pending_transactions_list_name(eoa, chain_id); - // Add new hash to submitted (keeping old ones) - pipeline.zadd(&submitted_key, nonce, new_hash); + // Remove all hash:id values from submitted + for failure in &failures { + let hash_id_value = format!("{}:{}", failure.hash, failure.transaction_id); + pipeline.zrem(&submitted_key, &hash_id_value); - // Map new hash to transaction ID - pipeline.set(&hash_to_id_key, transaction_id); + // Remove separate hash-to-ID mapping + let hash_to_id_key = self.transaction_hash_to_id_key_name(&failure.hash); + pipeline.del(&hash_to_id_key); + } - // Record gas bump attempt - let now = chrono::Utc::now(); - let attempt_json = serde_json::json!({ - "attempt_number": attempt_number, - "hash": new_hash, - "gas_bumped_at": now.timestamp(), - "nonce": nonce, - "type": "gas_bump" - }); - pipeline.hset( - &tx_data_key, - format!("attempt_{}", attempt_number), - attempt_json.to_string(), - ); + // Add unique transaction IDs back to pending (avoid duplicates) + let mut unique_tx_ids = std::collections::HashSet::new(); + for failure in &failures { + unique_tx_ids.insert(&failure.transaction_id); + } + + for transaction_id in unique_tx_ids { + pipeline.lpush(&pending_key, transaction_id); + } + }) + .await + } + + /// Efficiently batch succeed multiple transactions + /// This avoids hash-to-ID lookups since we already have both pieces of information + pub async fn batch_succeed_transactions( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + successes: Vec, + ) -> Result<(), TransactionStoreError> { + if successes.is_empty() { + return Ok(()); + } + + self.with_lock_check(eoa, chain_id, worker_id, |pipeline| { + let submitted_key = self.submitted_transactions_zset_name(eoa, chain_id); + let now = chrono::Utc::now().timestamp_millis().max(0) as u64; + + for success in &successes { + // Remove hash:id from submitted + let hash_id_value = format!("{}:{}", success.hash, success.transaction_id); + pipeline.zrem(&submitted_key, &hash_id_value); + + // Remove separate hash-to-ID mapping + let hash_to_id_key = self.transaction_hash_to_id_key_name(&success.hash); + pipeline.del(&hash_to_id_key); + + // Update transaction data with success (following existing Redis hash pattern) + let tx_data_key = self.transaction_data_key_name(&success.transaction_id); + pipeline.hset(&tx_data_key, "completed_at", now); + pipeline.hset(&tx_data_key, "receipt", &success.receipt_data); + pipeline.hset(&tx_data_key, "status", "confirmed"); + } }) .await } @@ -1494,16 +1621,102 @@ impl EoaExecutorStore { None => Err(TransactionStoreError::NonceSyncRequired { eoa, chain_id }), } } + + /// Synchronize nonces with the chain + /// + /// Part of standard nonce management flow, called in the confirm stage when chain nonce advances, and we need to update our cached nonce + pub async fn synchronize_nonces_with_chain( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + current_chain_tx_count: u64, + ) -> Result<(), TransactionStoreError> { + let now = chrono::Utc::now().timestamp_millis().max(0) as u64; + + // First, read current health data + let current_health = self.check_eoa_health(eoa, chain_id).await?; + + // Prepare health update if health data exists + let health_update = if let Some(mut health) = current_health { + health.last_nonce_movement_at = now; + health.last_confirmation_at = now; + Some(serde_json::to_string(&health)?) + } else { + None + }; + + self.with_lock_check(eoa, chain_id, worker_id, |pipeline| { + let tx_count_key = self.last_transaction_count_key_name(eoa, chain_id); + + // Update cached transaction count + pipeline.set(&tx_count_key, current_chain_tx_count); + + // Update health data only if it exists + if let Some(ref health_json) = health_update { + let health_key = self.eoa_health_key_name(eoa, chain_id); + pipeline.set(&health_key, health_json); + } + }) + .await + } + + /// Reset nonces to specified value + /// + /// This is called when we have too many recycled nonces and detect something wrong + /// We want to start fresh, with the chain nonce as the new optimistic nonce + pub async fn reset_nonces( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + current_chain_tx_count: u64, + ) -> Result<(), TransactionStoreError> { + let now = chrono::Utc::now().timestamp_millis().max(0) as u64; + + let current_health = self.check_eoa_health(eoa, chain_id).await?; + + // Prepare health update if health data exists + let health_update = if let Some(mut health) = current_health { + health.nonce_resets.push(now); + Some(serde_json::to_string(&health)?) + } else { + None + }; + + self.with_lock_check(eoa, chain_id, worker_id, |pipeline| { + let optimistic_key = self.optimistic_transaction_count_key_name(eoa, chain_id); + let cached_nonce_key = self.last_transaction_count_key_name(eoa, chain_id); + let recycled_key = self.recycled_nonces_set_name(eoa, chain_id); + + // Update health data only if it exists + if let Some(ref health_json) = health_update { + let health_key = self.eoa_health_key_name(eoa, chain_id); + pipeline.set(&health_key, health_json); + } + + // Reset the optimistic nonce + pipeline.set(&optimistic_key, current_chain_tx_count); + + // Reset the cached nonce + pipeline.set(&cached_nonce_key, current_chain_tx_count); + + // Reset the recycled nonces + pipeline.del(recycled_key); + }) + .await + } } // Additional error types -#[derive(Debug, thiserror::Error)] +#[derive(Debug, thiserror::Error, Serialize, Deserialize, Clone)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE", tag = "errorCode")] pub enum TransactionStoreError { - #[error("Redis error: {0}")] - RedisError(#[from] twmq::redis::RedisError), + #[error("Redis error: {message}")] + RedisError { message: String }, - #[error("Serialization error: {0}")] - SerializationError(#[from] serde_json::Error), + #[error("Serialization error: {message}")] + DeserError { message: String, text: String }, #[error("Transaction not found: {transaction_id}")] TransactionNotFound { transaction_id: String }, @@ -1545,6 +1758,23 @@ pub enum TransactionStoreError { NonceSyncRequired { eoa: Address, chain_id: u64 }, } +impl From for TransactionStoreError { + fn from(error: twmq::redis::RedisError) -> Self { + TransactionStoreError::RedisError { + message: error.to_string(), + } + } +} + +impl From for TransactionStoreError { + fn from(error: serde_json::Error) -> Self { + TransactionStoreError::DeserError { + message: error.to_string(), + text: error.to_string(), + } + } +} + const MAX_RETRIES: u32 = 10; const RETRY_BASE_DELAY_MS: u64 = 10; @@ -1572,26 +1802,28 @@ pub struct ScopedEoaExecutorStore<'a> { impl<'a> ScopedEoaExecutorStore<'a> { /// Build a scoped transaction store for a specific EOA, chain, and worker /// - /// This validates that the worker currently owns the lock for the given EOA/chain. - /// If the lock is not owned, returns a LockLost error. + /// This acquires the lock for the given EOA/chain. + /// If the lock is not acquired, returns a LockLost error. + #[tracing::instrument(skip_all, fields(eoa = %eoa, chain_id = chain_id, worker_id = %worker_id))] pub async fn build( store: &'a EoaExecutorStore, eoa: Address, chain_id: u64, worker_id: String, ) -> Result { - let lock_key = store.eoa_lock_key_name(eoa, chain_id); - let mut conn = store.redis.clone(); - - // Verify the worker owns the lock - let current_owner: Option = conn.get(&lock_key).await?; - if current_owner.as_deref() != Some(&worker_id) { - return Err(TransactionStoreError::LockLost { - eoa, - chain_id, - worker_id, - }); - } + // 1. ACQUIRE LOCK AGGRESSIVELY + tracing::info!("Acquiring EOA lock aggressively"); + store + .acquire_eoa_lock_aggressively(eoa, chain_id, &worker_id) + .await + .map_err(|e| { + tracing::error!("Failed to acquire EOA lock: {}", e); + TransactionStoreError::LockLost { + eoa, + chain_id, + worker_id: worker_id.clone(), + } + })?; Ok(Self { store, @@ -1661,7 +1893,7 @@ impl<'a> ScopedEoaExecutorStore<'a> { &self, ) -> Result, TransactionStoreError> { self.store - .peek_borrowed_transactions(self.eoa, self.chain_id, &self.worker_id) + .peek_borrowed_transactions(self.eoa, self.chain_id) .await } @@ -1702,15 +1934,26 @@ impl<'a> ScopedEoaExecutorStore<'a> { } /// Get all hashes below a certain nonce from submitted transactions + /// Returns (nonce, hash, transaction_id) tuples pub async fn get_hashes_below_nonce( &self, below_nonce: u64, - ) -> Result, TransactionStoreError> { + ) -> Result, TransactionStoreError> { self.store .get_hashes_below_nonce(self.eoa, self.chain_id, below_nonce) .await } + /// Get all transaction IDs for a specific nonce + pub async fn get_transaction_ids_for_nonce( + &self, + nonce: u64, + ) -> Result, TransactionStoreError> { + self.store + .get_transaction_ids_for_nonce(self.eoa, self.chain_id, nonce) + .await + } + /// Remove all hashes for a transaction and requeue it pub async fn fail_and_requeue_transaction( &self, @@ -1721,6 +1964,26 @@ impl<'a> ScopedEoaExecutorStore<'a> { .await } + /// Efficiently batch fail and requeue multiple transactions + pub async fn batch_fail_and_requeue_transactions( + &self, + failures: Vec, + ) -> Result<(), TransactionStoreError> { + self.store + .batch_fail_and_requeue_transactions(self.eoa, self.chain_id, &self.worker_id, failures) + .await + } + + /// Efficiently batch succeed multiple transactions + pub async fn batch_succeed_transactions( + &self, + successes: Vec, + ) -> Result<(), TransactionStoreError> { + self.store + .batch_succeed_transactions(self.eoa, self.chain_id, &self.worker_id, successes) + .await + } + // ========== EOA HEALTH & NONCE MANAGEMENT ========== /// Check EOA health (balance, etc.) @@ -1760,13 +2023,6 @@ impl<'a> ScopedEoaExecutorStore<'a> { .await } - /// Nuke all recycled nonces - pub async fn nuke_recycled_nonces(&self) -> Result<(), TransactionStoreError> { - self.store - .nuke_recycled_nonces(self.eoa, self.chain_id, &self.worker_id) - .await - } - /// Peek at pending transactions without removing them pub async fn peek_pending_transactions( &self, @@ -1794,23 +2050,6 @@ impl<'a> ScopedEoaExecutorStore<'a> { .await } - /// Complete safe transaction processing flow combining all atomic operations - pub async fn process_transaction_atomically( - &self, - transaction_id: &str, - signed_tx: &Signed, - ) -> Result<(bool, bool, Option), TransactionStoreError> { - self.store - .process_transaction_atomically( - self.eoa, - self.chain_id, - &self.worker_id, - transaction_id, - signed_tx, - ) - .await - } - /// Mark transaction as successful and remove from submitted pub async fn succeed_transaction( &self, @@ -1833,24 +2072,35 @@ impl<'a> ScopedEoaExecutorStore<'a> { /// Add a gas bump attempt (new hash) to submitted transactions pub async fn add_gas_bump_attempt( &self, - nonce: u64, - new_hash: &str, transaction_id: &str, - attempt_number: u32, + signed_transaction: Signed, ) -> Result<(), TransactionStoreError> { self.store .add_gas_bump_attempt( self.eoa, self.chain_id, &self.worker_id, - nonce, - new_hash, transaction_id, - attempt_number, + signed_transaction, ) .await } + pub async fn synchronize_nonces_with_chain( + &self, + nonce: u64, + ) -> Result<(), TransactionStoreError> { + self.store + .synchronize_nonces_with_chain(self.eoa, self.chain_id, &self.worker_id, nonce) + .await + } + + pub async fn reset_nonces(&self, nonce: u64) -> Result<(), TransactionStoreError> { + self.store + .reset_nonces(self.eoa, self.chain_id, &self.worker_id, nonce) + .await + } + // ========== READ-ONLY OPERATIONS ========== /// Get cached transaction count diff --git a/executors/src/eoa/worker.rs b/executors/src/eoa/worker.rs index 7e0a753..9adc9d4 100644 --- a/executors/src/eoa/worker.rs +++ b/executors/src/eoa/worker.rs @@ -1,9 +1,15 @@ -use alloy::consensus::{SignableTransaction, Signed, Transaction, TypedTransaction}; +use alloy::consensus::{ + SignableTransaction, Signed, Transaction, TxEip4844Variant, TxEip4844WithSidecar, + TypedTransaction, +}; use alloy::network::{TransactionBuilder, TransactionBuilder7702}; +use alloy::primitives::utils::Unit; use alloy::primitives::{Address, B256, Bytes, U256}; use alloy::providers::Provider; use alloy::rpc::types::TransactionRequest as AlloyTransactionRequest; +use alloy::signers::Signature; use alloy::transports::{RpcError, TransportErrorKind}; +use engine_core::error::EngineError; use engine_core::signer::AccountSigner; use engine_core::{ chain::{Chain, ChainService, RpcCredentials}, @@ -23,10 +29,18 @@ use twmq::{ }; use crate::eoa::store::{ - BorrowedTransactionData, EoaExecutorStore, EoaHealth, EoaTransactionRequest, ScopedEoaExecutorStore, TransactionData, - TransactionStoreError, + BorrowedTransactionData, EoaExecutorStore, EoaHealth, EoaTransactionRequest, + ScopedEoaExecutorStore, TransactionData, TransactionStoreError, }; +// ========== SPEC-COMPLIANT CONSTANTS ========== +const MAX_INFLIGHT_PER_EOA: u64 = 100; // Default from spec +const MAX_RECYCLED_THRESHOLD: u64 = 50; // Circuit breaker from spec +const TARGET_TRANSACTIONS_PER_EOA: u64 = 10; // Fleet management from spec +const MIN_TRANSACTIONS_PER_EOA: u64 = 1; // Fleet management from spec +const HEALTH_CHECK_INTERVAL: u64 = 300; // 5 minutes in seconds +const NONCE_STALL_TIMEOUT: u64 = 300_000; // 5 minutes in milliseconds - after this time, attempt gas bump + // ========== JOB DATA ========== #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "camelCase")] @@ -52,13 +66,37 @@ pub enum EoaExecutorWorkerError { ChainServiceError { chain_id: u64, message: String }, #[error("Store error: {message}")] - StoreError { message: String }, + StoreError { + message: String, + inner_error: TransactionStoreError, + }, + + #[error("Transaction not found: {transaction_id}")] + TransactionNotFound { transaction_id: String }, + + #[error("Transaction simulation failed: {message}")] + TransactionSimulationFailed { + message: String, + inner_error: EngineError, + }, + + #[error("Transaction build failed: {message}")] + TransactionBuildFailed { message: String }, #[error("RPC error: {message}")] - RpcError { message: String }, + RpcError { + message: String, + inner_error: EngineError, + }, + + #[error("Signature parsing failed: {message}")] + SignatureParsingFailed { message: String }, #[error("Transaction signing failed: {message}")] - SigningError { message: String }, + SigningError { + message: String, + inner_error: EngineError, + }, #[error("Work still remaining: {message}")] WorkRemaining { message: String }, @@ -82,6 +120,7 @@ impl From for EoaExecutorWorkerError { fn from(error: TransactionStoreError) -> Self { EoaExecutorWorkerError::StoreError { message: error.to_string(), + inner_error: error, } } } @@ -94,35 +133,76 @@ impl UserCancellable for EoaExecutorWorkerError { // ========== SIMPLE ERROR CLASSIFICATION ========== #[derive(Debug)] -enum SendResult { - Success, +enum SendErrorClassification { PossiblySent, // "nonce too low", "already known" etc DeterministicFailure, // Invalid signature, malformed tx, insufficient funds etc } -fn classify_send_error(error: &RpcError) -> SendResult { +#[derive(PartialEq, Eq, Debug)] +enum SendContext { + Rebroadcast, + InitialBroadcast, +} + +#[tracing::instrument(skip_all, fields(error = %error, context = ?context))] +fn classify_send_error( + error: &RpcError, + context: SendContext, +) -> SendErrorClassification { + if !error.is_error_resp() { + return SendErrorClassification::DeterministicFailure; + } + let error_str = error.to_string().to_lowercase(); - // Transaction possibly made it to mempool + // Deterministic failures that didn't consume nonce (spec-compliant) + if error_str.contains("invalid signature") + || error_str.contains("malformed transaction") + || (context == SendContext::InitialBroadcast && error_str.contains("insufficient funds")) + || error_str.contains("invalid transaction format") + || error_str.contains("nonce too high") + // Should trigger nonce reset + { + return SendErrorClassification::DeterministicFailure; + } + + // Transaction possibly made it to mempool (spec-compliant) if error_str.contains("nonce too low") || error_str.contains("already known") || error_str.contains("replacement transaction underpriced") { - return SendResult::PossiblySent; + return SendErrorClassification::PossiblySent; } - // Clear failures that didn't consume nonce - if error_str.contains("invalid signature") - || error_str.contains("malformed") - || error_str.contains("insufficient funds") + // Additional common failures that didn't consume nonce + if error_str.contains("malformed") || error_str.contains("gas limit") || error_str.contains("intrinsic gas too low") { - return SendResult::DeterministicFailure; + return SendErrorClassification::DeterministicFailure; } + tracing::warn!( + "Unknown send error: {}. PLEASE REPORT FOR ADDING CORRECT CLASSIFICATION [NOTIFY]", + error_str + ); + // Default: assume possibly sent for safety - SendResult::PossiblySent + SendErrorClassification::PossiblySent +} + +fn should_trigger_nonce_reset(error: &RpcError) -> bool { + let error_str = error.to_string().to_lowercase(); + + // "nonce too high" should trigger nonce reset as per spec + error_str.contains("nonce too high") +} + +fn should_update_balance_threshold(error: &RpcError) -> bool { + let error_str = error.to_string().to_lowercase(); + + // "insufficient funds" should update the balance threshold + error_str.contains("insufficient funds") } fn is_retryable_rpc_error(kind: &RpcErrorKind) -> bool { @@ -141,7 +221,59 @@ struct PreparedTransaction { nonce: u64, } +// ========== CONFIRMATION FLOW DATA STRUCTURES ========== +#[derive(Debug, Clone)] +struct PendingTransaction { + nonce: u64, + hash: String, + transaction_id: String, +} + +#[derive(Debug, Clone)] +struct ConfirmedTransaction { + nonce: u64, + hash: String, + transaction_id: String, + receipt: alloy::rpc::types::TransactionReceipt, +} + +#[derive(Debug, Clone)] +struct FailedTransaction { + hash: String, + transaction_id: String, +} + +// ========== STORE BATCH OPERATION TYPES ========== +#[derive(Debug, Clone)] +pub struct TransactionSuccess { + pub hash: String, + pub transaction_id: String, + pub receipt_data: String, +} + +#[derive(Debug, Clone)] +pub struct TransactionFailure { + pub hash: String, + pub transaction_id: String, +} + // ========== MAIN WORKER ========== +/// EOA Executor Worker +/// +/// ## Core Workflow: +/// 1. **Acquire Lock Aggressively** - Takes over stalled workers using force acquisition. This is a lock over EOA:CHAIN +/// 2. **Crash Recovery** - Rebroadcasts borrowed transactions, handles deterministic failures +/// 3. **Confirmation Flow** - Fetches receipts, confirms transactions, handles nonce sync, requeues replaced transactions +/// 4. **Send Flow** - Processes recycled nonces first, then new transactions with in-flight budget control +/// 5. **Lock Release** - Explicit release in finally pattern as per spec +/// +/// ## Key Features: +/// - **Atomic Operations**: All state transitions use Redis WATCH/MULTI/EXEC for durability +/// - **Borrowed State**: Mid-send crash recovery with atomic pending->borrowed->submitted transitions +/// - **Nonce Management**: Optimistic nonce tracking with recycled nonce priority +/// - **Error Classification**: Spec-compliant deterministic vs. possibly-sent error handling +/// - **Circuit Breakers**: Automatic recycled nonce nuking when threshold exceeded +/// - **Health Monitoring**: Balance checking with configurable thresholds pub struct EoaExecutorWorker where CS: ChainService + Send + Sync + 'static, @@ -149,8 +281,8 @@ where pub chain_service: Arc, pub store: Arc, pub eoa_signer: Arc, - pub max_inflight: u64, - pub max_recycled_nonces: u64, + pub max_inflight: u64, // Note: Spec uses MAX_INFLIGHT_PER_EOA constant + pub max_recycled_nonces: u64, // Note: Spec uses MAX_RECYCLED_THRESHOLD constant } impl DurableExecution for EoaExecutorWorker @@ -168,14 +300,7 @@ where ) -> JobResult { let data = &job.job.data; - // 1. ACQUIRE LOCK AGGRESSIVELY - tracing::info!("Acquiring EOA lock aggressively"); - self.store - .acquire_eoa_lock_aggressively(data.eoa_address, data.chain_id, &data.worker_id) - .await - .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)?; - - // 2. GET CHAIN + // 1. GET CHAIN let chain = self .chain_service .get_chain(data.chain_id) @@ -185,7 +310,7 @@ where }) .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)?; - // 3. CREATE SCOPED STORE (validates lock ownership) + // 2. CREATE SCOPED STORE (acquires lock) let scoped = ScopedEoaExecutorStore::build( &self.store, data.eoa_address, @@ -195,25 +320,90 @@ where .await .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)?; - // 4. CRASH RECOVERY + // initiate health data if doesn't exist + self.get_eoa_health(&scoped, &chain) + .await + .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)?; + + // Execute main workflow with proper error handling + self.execute_main_workflow(&scoped, &chain).await + } + + async fn on_success( + &self, + job: &BorrowedJob, + _success_data: SuccessHookData<'_, Self::Output>, + _tx: &mut TransactionContext<'_>, + ) { + // Release EOA lock on success + self.release_eoa_lock( + job.job.data.eoa_address, + job.job.data.chain_id, + &job.job.data.worker_id, + ) + .await; + } + + async fn on_nack( + &self, + job: &BorrowedJob, + _nack_data: NackHookData<'_, Self::ErrorData>, + _tx: &mut TransactionContext<'_>, + ) { + // Release EOA lock on nack + self.release_eoa_lock( + job.job.data.eoa_address, + job.job.data.chain_id, + &job.job.data.worker_id, + ) + .await; + } + + async fn on_fail( + &self, + job: &BorrowedJob, + _fail_data: FailHookData<'_, Self::ErrorData>, + _tx: &mut TransactionContext<'_>, + ) { + // Release EOA lock on fail + self.release_eoa_lock( + job.job.data.eoa_address, + job.job.data.chain_id, + &job.job.data.worker_id, + ) + .await; + } +} + +impl EoaExecutorWorker +where + CS: ChainService + Send + Sync + 'static, +{ + /// Execute the main EOA worker workflow + async fn execute_main_workflow( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + chain: &impl Chain, + ) -> JobResult { + // 1. CRASH RECOVERY let recovered = self - .recover_borrowed_state(&scoped, &chain) + .recover_borrowed_state(scoped, chain) .await .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)?; - // 5. CONFIRM FLOW + // 2. CONFIRM FLOW let (confirmed, failed) = self - .confirm_flow(&scoped, &chain) + .confirm_flow(scoped, chain) .await .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)?; - // 6. SEND FLOW + // 3. SEND FLOW let sent = self - .send_flow(&scoped, &chain) + .send_flow(scoped, chain) .await .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)?; - // 7. CHECK FOR REMAINING WORK + // 4. CHECK FOR REMAINING WORK let pending_count = scoped .peek_pending_transactions(1000) .await @@ -230,6 +420,7 @@ where .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)? .len(); + // NACK here is a yield, when you think of the queue as a distributed EOA scheduler if pending_count > 0 || borrowed_count > 0 || recycled_count > 0 { return Err(EoaExecutorWorkerError::WorkRemaining { message: format!( @@ -237,7 +428,7 @@ where pending_count, borrowed_count, recycled_count ), }) - .map_err_nack(Some(Duration::from_secs(5)), RequeuePosition::Last); + .map_err_nack(Some(Duration::from_secs(2)), RequeuePosition::Last); } // Only succeed if no work remains @@ -249,39 +440,19 @@ where }) } - async fn on_success( - &self, - _job: &BorrowedJob, - _success_data: SuccessHookData<'_, Self::Output>, - _tx: &mut TransactionContext<'_>, - ) { - // No additional operations needed for EOA worker success - } - - async fn on_nack( - &self, - _job: &BorrowedJob, - _nack_data: NackHookData<'_, Self::ErrorData>, - _tx: &mut TransactionContext<'_>, - ) { - // No additional operations needed for EOA worker nack - } - - async fn on_fail( - &self, - _job: &BorrowedJob, - _fail_data: FailHookData<'_, Self::ErrorData>, - _tx: &mut TransactionContext<'_>, - ) { - // EOA locks use a takeover pattern - no explicit release needed - // Other workers will forcefully take over the lock when needed + /// Release EOA lock following the spec's finally pattern + async fn release_eoa_lock(&self, eoa: Address, chain_id: u64, worker_id: &str) { + if let Err(e) = self.store.release_eoa_lock(eoa, chain_id, worker_id).await { + tracing::error!( + eoa = %eoa, + chain_id = %chain_id, + worker_id = %worker_id, + error = %e, + "Failed to release EOA lock" + ); + } } -} -impl EoaExecutorWorker -where - CS: ChainService + Send + Sync + 'static, -{ // ========== CRASH RECOVERY ========== async fn recover_borrowed_state( &self, @@ -289,41 +460,45 @@ where chain: &impl Chain, ) -> Result { let mut borrowed_transactions = scoped.peek_borrowed_transactions().await?; - + + if borrowed_transactions.is_empty() { + return Ok(0); + } + + tracing::warn!( + "Recovering {} borrowed transactions. This indicates a worker crash or system issue", + borrowed_transactions.len() + ); + // Sort borrowed transactions by nonce to ensure proper ordering borrowed_transactions.sort_by_key(|tx| tx.signed_transaction.nonce()); - - // Check for stale borrowed transactions (older than 5 minutes) - let now = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_secs(); - - for borrowed in &borrowed_transactions { - if now - borrowed.borrowed_at > 300 { // 5 minutes - tracing::warn!( - transaction_id = %borrowed.transaction_id, - nonce = borrowed.signed_transaction.nonce(), - age_seconds = now - borrowed.borrowed_at, - "Found stale borrowed transaction - possible worker crash or system issue" + + // Rebroadcast all transactions in parallel + let rebroadcast_futures: Vec<_> = borrowed_transactions + .iter() + .map(|borrowed| { + let tx_envelope = borrowed.signed_transaction.clone().into(); + let nonce = borrowed.signed_transaction.nonce(); + let transaction_id = borrowed.transaction_id.clone(); + + tracing::info!( + transaction_id = %transaction_id, + nonce = nonce, + "Recovering borrowed transaction" ); - } - } - - let mut recovered_count = 0; - for borrowed in borrowed_transactions { - tracing::info!( - transaction_id = %borrowed.transaction_id, - nonce = borrowed.signed_transaction.nonce(), - "Recovering borrowed transaction" - ); + async move { + let send_result = chain.provider().send_tx_envelope(tx_envelope).await; + (borrowed, send_result) + } + }) + .collect(); - // Rebroadcast the transaction - let send_result = chain - .provider() - .send_tx_envelope(borrowed.signed_transaction.clone().into()) - .await; + let rebroadcast_results = futures::future::join_all(rebroadcast_futures).await; + + // Process results sequentially for Redis state changes + let mut recovered_count = 0; + for (borrowed, send_result) in rebroadcast_results { let nonce = borrowed.signed_transaction.nonce(); match send_result { @@ -339,8 +514,8 @@ where tracing::info!(transaction_id = %borrowed.transaction_id, nonce = nonce, "Moved recovered transaction to submitted"); } Err(e) => { - match classify_send_error(&e) { - SendResult::PossiblySent => { + match classify_send_error(&e, SendContext::Rebroadcast) { + SendErrorClassification::PossiblySent => { // Transaction possibly sent, move to submitted scoped .move_borrowed_to_submitted( @@ -351,16 +526,26 @@ where .await?; tracing::info!(transaction_id = %borrowed.transaction_id, nonce = nonce, "Moved recovered transaction to submitted (possibly sent)"); } - SendResult::DeterministicFailure => { + SendErrorClassification::DeterministicFailure => { // Transaction is broken, recycle nonce and requeue scoped .move_borrowed_to_recycled(nonce, &borrowed.transaction_id) .await?; tracing::warn!(transaction_id = %borrowed.transaction_id, nonce = nonce, error = %e, "Recycled failed transaction"); - } - SendResult::Success => { - // This case is handled by Ok(_) above - unreachable!() + + if should_update_balance_threshold(&e) { + self.update_balance_threshold(scoped, chain).await?; + } + + // Check if this should trigger nonce reset + if should_trigger_nonce_reset(&e) { + tracing::warn!( + eoa = %scoped.eoa(), + chain_id = %scoped.chain_id(), + "Nonce too high error detected, may need nonce synchronization" + ); + // The next confirm_flow will fetch fresh nonce and auto-sync + } } } } @@ -387,12 +572,48 @@ where let engine_error = e.to_engine_error(chain); EoaExecutorWorkerError::RpcError { message: format!("Failed to get transaction count: {}", engine_error), + inner_error: engine_error, } })?; - let cached_nonce = scoped.get_cached_transaction_count().await?; + let cached_nonce = match scoped.get_cached_transaction_count().await { + Err(e) => match e { + TransactionStoreError::NonceSyncRequired { .. } => { + scoped.reset_nonces(current_chain_nonce).await?; + current_chain_nonce + } + _ => return Err(e.into()), + }, + Ok(cached_nonce) => cached_nonce, + }; + // no nonce progress if current_chain_nonce == cached_nonce { + let current_health = self.get_eoa_health(scoped, chain).await?; + let now = chrono::Utc::now().timestamp_millis().max(0) as u64; + // No nonce progress - check if we should attempt gas bumping for stalled nonce + let time_since_movement = now.saturating_sub(current_health.last_nonce_movement_at); + + if time_since_movement > NONCE_STALL_TIMEOUT { + tracing::info!( + time_since_movement = time_since_movement, + stall_timeout = NONCE_STALL_TIMEOUT, + current_chain_nonce = current_chain_nonce, + "Nonce has been stalled, attempting gas bump" + ); + + // Attempt gas bump for the next expected nonce + if let Err(e) = self + .attempt_gas_bump_for_stalled_nonce(scoped, chain, current_chain_nonce) + .await + { + tracing::warn!( + error = %e, + "Failed to attempt gas bump for stalled nonce" + ); + } + } + tracing::debug!("No nonce progress, skipping confirm flow"); return Ok((0, 0)); } @@ -403,63 +624,104 @@ where "Processing confirmations" ); - // Get all hashes below the current chain nonce - let pending_hashes = scoped.get_hashes_below_nonce(current_chain_nonce).await?; - - let mut confirmed_count = 0; - let mut failed_count = 0; - - // Process receipts in parallel batches - let batch_size = 10; - for batch in pending_hashes.chunks(batch_size) { - let receipt_futures: Vec<_> = batch - .iter() - .map(|(nonce, hash)| async { - let receipt = chain - .provider() - .get_transaction_receipt(hash.parse::().unwrap()) - .await; - (*nonce, hash.clone(), receipt) - }) - .collect(); + // Get all pending transactions below the current chain nonce + let pending_txs = self + .get_pending_transactions_below_nonce(scoped, current_chain_nonce) + .await?; - let results = futures::future::join_all(receipt_futures).await; + if pending_txs.is_empty() { + tracing::debug!("No pending transactions to confirm"); + return Ok((0, 0)); + } - for (nonce, hash, receipt_result) in results { - match receipt_result { - Ok(Some(receipt)) => { - // Transaction confirmed! - if let Ok(Some(tx_id)) = self.store.get_transaction_id_for_hash(&hash).await - { - scoped - .succeed_transaction( - &tx_id, - &hash, - &serde_json::to_string(&receipt).unwrap(), - ) - .await?; - confirmed_count += 1; - tracing::info!(transaction_id = %tx_id, nonce = nonce, "Transaction confirmed"); + // Fetch receipts and categorize transactions + let (confirmed_txs, failed_txs) = self + .fetch_and_categorize_transactions(chain, pending_txs) + .await; + + // Process confirmed transactions + let confirmed_count = if !confirmed_txs.is_empty() { + let successes: Vec = confirmed_txs + .into_iter() + .map(|tx| { + let receipt_data = match serde_json::to_string(&tx.receipt) { + Ok(receipt_json) => receipt_json, + Err(e) => { + tracing::warn!( + transaction_id = %tx.transaction_id, + hash = %tx.hash, + error = %e, + "Failed to serialize receipt as JSON, using debug format" + ); + format!("{:?}", tx.receipt) } + }; + + tracing::info!( + transaction_id = %tx.transaction_id, + nonce = tx.nonce, + hash = %tx.hash, + "Transaction confirmed" + ); + + TransactionSuccess { + hash: tx.hash, + transaction_id: tx.transaction_id, + receipt_data, } - Ok(None) | Err(_) => { - // Transaction failed or dropped - if let Ok(Some(tx_id)) = self.store.get_transaction_id_for_hash(&hash).await - { - scoped.fail_and_requeue_transaction(&tx_id).await?; - failed_count += 1; - tracing::warn!(transaction_id = %tx_id, nonce = nonce, "Transaction failed, requeued"); - } + }) + .collect(); + + let count = successes.len() as u32; + scoped.batch_succeed_transactions(successes).await?; + count + } else { + 0 + }; + + // Process failed transactions + let failed_count = if !failed_txs.is_empty() { + let failures: Vec = failed_txs + .into_iter() + .map(|tx| { + tracing::warn!( + transaction_id = %tx.transaction_id, + hash = %tx.hash, + "Transaction failed, requeued" + ); + TransactionFailure { + hash: tx.hash, + transaction_id: tx.transaction_id, } - } - } - } + }) + .collect(); + + let count = failures.len() as u32; + scoped.batch_fail_and_requeue_transactions(failures).await?; + count + } else { + 0 + }; // Update cached transaction count scoped .update_cached_transaction_count(current_chain_nonce) .await?; + // Synchronize nonces to ensure consistency + if let Err(e) = self + .store + .synchronize_nonces_with_chain( + scoped.eoa(), + scoped.chain_id(), + scoped.worker_id(), + current_chain_nonce, + ) + .await + { + tracing::warn!(error = %e, "Failed to synchronize nonces with chain"); + } + Ok((confirmed_count, failed_count)) } @@ -469,12 +731,34 @@ where scoped: &ScopedEoaExecutorStore<'_>, chain: &impl Chain, ) -> Result { - // 1. Check and update EOA health - self.check_and_update_eoa_health(scoped, chain).await?; + // 1. Get EOA health (initializes if needed) and check if we should update balance + let mut health = self.get_eoa_health(scoped, chain).await?; + let now = chrono::Utc::now().timestamp_millis().max(0) as u64; - let health = scoped.check_eoa_health().await?; - if health.map(|h| h.balance.is_zero()).unwrap_or(true) { - tracing::warn!("EOA has insufficient balance, skipping send flow"); + // Update balance if it's stale + if now - health.balance_fetched_at > HEALTH_CHECK_INTERVAL { + let balance = chain + .provider() + .get_balance(scoped.eoa()) + .await + .map_err(|e| { + let engine_error = e.to_engine_error(chain); + EoaExecutorWorkerError::RpcError { + message: format!("Failed to get balance: {}", engine_error), + inner_error: engine_error, + } + })?; + + health.balance = balance; + health.balance_fetched_at = now; + scoped.update_health_data(&health).await?; + } + + if health.balance < health.balance_threshold { + tracing::warn!( + "EOA has insufficient balance (< {} wei), skipping send flow", + health.balance_threshold + ); return Ok(0); } @@ -509,16 +793,6 @@ where ) -> Result { let recycled_nonces = scoped.peek_recycled_nonces().await?; - // If too many recycled nonces, nuke them - if recycled_nonces.len() as u64 > self.max_recycled_nonces { - tracing::warn!( - "Too many recycled nonces ({}), nuking them", - recycled_nonces.len() - ); - scoped.nuke_recycled_nonces().await?; - return Ok(0); - } - if recycled_nonces.is_empty() { return Ok(0); } @@ -527,30 +801,213 @@ where let pending_txs = scoped .peek_pending_transactions(recycled_nonces.len() as u64) .await?; - let mut sent_count = 0; - // Process each recycled nonce sequentially with delays + // 1. SEQUENTIAL REDIS: Collect nonce-transaction pairs + let mut nonce_tx_pairs = Vec::new(); for (i, nonce) in recycled_nonces.into_iter().enumerate() { - if i > 0 { - sleep(Duration::from_millis(50)).await; // 50ms delay between consecutive nonces - } - if let Some(tx_id) = pending_txs.get(i) { - // Try to send transaction with this recycled nonce - match self - .send_single_transaction_with_recycled_nonce(scoped, chain, tx_id, nonce) - .await - { - Ok(true) => sent_count += 1, - Ok(false) => {} // Failed to send, but handled - Err(e) => tracing::error!("Error processing recycled nonce {}: {}", nonce, e), + // Get transaction data + if let Some(tx_data) = scoped.get_transaction_data(tx_id).await? { + nonce_tx_pairs.push((nonce, tx_id.clone(), tx_data)); + } else { + tracing::warn!("Transaction data not found for {}", tx_id); + continue; } } else { - // No pending transactions, send no-op - match self.send_noop_transaction(scoped, chain, nonce).await { - Ok(true) => sent_count += 1, - Ok(false) => {} // Failed to send no-op - Err(e) => tracing::error!("Error sending no-op for nonce {}: {}", nonce, e), + // No pending transactions - skip recycled nonces without pending transactions + tracing::debug!("No pending transaction for recycled nonce {}", nonce); + continue; + } + } + + if nonce_tx_pairs.is_empty() { + return Ok(0); + } + + // 2. PARALLEL BUILD/SIGN: Build and sign all transactions in parallel + let build_futures: Vec<_> = nonce_tx_pairs + .iter() + .map(|(nonce, transaction_id, tx_data)| async move { + let prepared = self + .build_and_sign_transaction(tx_data, *nonce, chain) + .await; + (*nonce, transaction_id, prepared) + }) + .collect(); + + let build_results = futures::future::join_all(build_futures).await; + + // 3. SEQUENTIAL REDIS: Move successfully built transactions to borrowed state + let mut prepared_txs = Vec::new(); + for (nonce, transaction_id, build_result) in build_results { + match build_result { + Ok(signed_tx) => { + let borrowed_data = BorrowedTransactionData { + transaction_id: transaction_id.clone(), + signed_transaction: signed_tx.clone(), + hash: signed_tx.hash().to_string(), + borrowed_at: chrono::Utc::now().timestamp_millis().max(0) as u64, + }; + + // Try to atomically move from pending to borrowed with recycled nonce + match scoped + .atomic_move_pending_to_borrowed_with_recycled_nonce( + transaction_id, + nonce, + &borrowed_data, + ) + .await + { + Ok(()) => { + let prepared = PreparedTransaction { + transaction_id: transaction_id.clone(), + signed_tx, + nonce, + }; + prepared_txs.push(prepared); + } + Err(TransactionStoreError::NonceNotInRecycledSet { .. }) => { + tracing::debug!("Nonce {} was consumed by another worker", nonce); + continue; + } + Err(TransactionStoreError::TransactionNotInPendingQueue { .. }) => { + tracing::debug!("Transaction {} already processed", transaction_id); + continue; + } + Err(e) => { + tracing::error!("Failed to move {} to borrowed: {}", transaction_id, e); + continue; + } + } + } + Err(e) => { + tracing::warn!("Failed to build transaction {}: {}", transaction_id, e); + continue; + } + } + } + + if prepared_txs.is_empty() { + return Ok(0); + } + + // 4. PARALLEL SEND: Send all transactions in parallel + let send_futures: Vec<_> = prepared_txs + .iter() + .map(|prepared| async move { + let result = chain + .provider() + .send_tx_envelope(prepared.signed_tx.clone().into()) + .await; + (prepared, result) + }) + .collect(); + + let send_results = futures::future::join_all(send_futures).await; + + // 5. SEQUENTIAL REDIS: Process results and update states + let mut sent_count = 0; + for (prepared, send_result) in send_results { + match send_result { + Ok(_) => { + // Transaction sent successfully + match scoped + .move_borrowed_to_submitted( + prepared.nonce, + &format!("{:?}", prepared.signed_tx.hash()), + &prepared.transaction_id, + ) + .await + { + Ok(()) => { + sent_count += 1; + tracing::info!( + transaction_id = %prepared.transaction_id, + nonce = prepared.nonce, + hash = ?prepared.signed_tx.hash(), + "Successfully sent recycled transaction" + ); + } + Err(e) => { + tracing::error!( + "Failed to move {} to submitted: {}", + prepared.transaction_id, + e + ); + } + } + } + Err(e) => { + match classify_send_error(&e, SendContext::InitialBroadcast) { + SendErrorClassification::PossiblySent => { + // Move to submitted state + match scoped + .move_borrowed_to_submitted( + prepared.nonce, + &format!("{:?}", prepared.signed_tx.hash()), + &prepared.transaction_id, + ) + .await + { + Ok(()) => { + sent_count += 1; + tracing::info!( + transaction_id = %prepared.transaction_id, + nonce = prepared.nonce, + "Recycled transaction possibly sent" + ); + } + Err(e) => { + tracing::error!( + "Failed to move {} to submitted: {}", + prepared.transaction_id, + e + ); + } + } + } + SendErrorClassification::DeterministicFailure => { + // Recycle nonce and requeue transaction + match scoped + .move_borrowed_to_recycled(prepared.nonce, &prepared.transaction_id) + .await + { + Ok(()) => { + tracing::warn!( + transaction_id = %prepared.transaction_id, + nonce = prepared.nonce, + error = %e, + "Recycled transaction failed, re-recycled nonce" + ); + + if should_update_balance_threshold(&e) { + if let Err(e) = + self.update_balance_threshold(scoped, chain).await + { + tracing::error!( + "Failed to update balance threshold: {}", + e + ); + } + } + + if should_trigger_nonce_reset(&e) { + tracing::warn!( + nonce = prepared.nonce, + "Nonce too high error detected, may need nonce synchronization" + ); + } + } + Err(e) => { + tracing::error!( + "Failed to move {} back to recycled: {}", + prepared.transaction_id, + e + ); + } + } + } + } } } } @@ -568,7 +1025,7 @@ where return Ok(0); } - // 1. Get pending transactions + // 1. SEQUENTIAL REDIS: Get pending transactions let pending_txs = scoped.peek_pending_transactions(budget).await?; if pending_txs.is_empty() { return Ok(0); @@ -576,7 +1033,7 @@ where let optimistic_nonce = scoped.get_optimistic_nonce().await?; - // 2. Build and sign all transactions in parallel + // 2. PARALLEL BUILD/SIGN: Build and sign all transactions in parallel let build_tasks: Vec<_> = pending_txs .iter() .enumerate() @@ -588,7 +1045,7 @@ where let prepared_results = futures::future::join_all(build_tasks).await; - // 3. Move successful transactions to borrowed state serially (to maintain nonce order) + // 3. SEQUENTIAL REDIS: Move successful transactions to borrowed state (maintain nonce order) let mut prepared_txs = Vec::new(); for (i, result) in prepared_results.into_iter().enumerate() { match result { @@ -596,11 +1053,8 @@ where let borrowed_data = BorrowedTransactionData { transaction_id: prepared.transaction_id.clone(), signed_transaction: prepared.signed_tx.clone(), - hash: *prepared.signed_tx.hash(), - borrowed_at: std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_secs(), + hash: prepared.signed_tx.hash().to_string(), + borrowed_at: chrono::Utc::now().timestamp_millis().max(0) as u64, }; match scoped @@ -644,17 +1098,134 @@ where } } - // 4. Send all prepared transactions sequentially with delays - let mut sent_count = 0; - for (i, prepared) in prepared_txs.iter().enumerate() { - if i > 0 { - sleep(Duration::from_millis(50)).await; // 50ms delay between consecutive nonces - } + if prepared_txs.is_empty() { + return Ok(0); + } + + // 4. PARALLEL SEND (but ordered): Send all transactions in parallel but in nonce order + let send_futures: Vec<_> = prepared_txs + .iter() + .enumerate() + .map(|(i, prepared)| async move { + // Add delay for ordering (except first transaction) + if i > 0 { + sleep(Duration::from_millis(50)).await; // 50ms delay between consecutive nonces + } - match Self::send_prepared_transaction(scoped, chain, prepared).await { - Ok(true) => sent_count += 1, - Ok(false) => {} // Failed to send, but handled - Err(e) => tracing::error!("Error sending transaction: {}", e), + let result = chain + .provider() + .send_tx_envelope(prepared.signed_tx.clone().into()) + .await; + (prepared, result) + }) + .collect(); + + let send_results = futures::future::join_all(send_futures).await; + + // 5. SEQUENTIAL REDIS: Process results and update states + let mut sent_count = 0; + for (prepared, send_result) in send_results { + match send_result { + Ok(_) => { + // Transaction sent successfully + match scoped + .move_borrowed_to_submitted( + prepared.nonce, + &format!("{:?}", prepared.signed_tx.hash()), + &prepared.transaction_id, + ) + .await + { + Ok(()) => { + sent_count += 1; + tracing::info!( + transaction_id = %prepared.transaction_id, + nonce = prepared.nonce, + hash = ?prepared.signed_tx.hash(), + "Successfully sent new transaction" + ); + } + Err(e) => { + tracing::error!( + "Failed to move {} to submitted: {}", + prepared.transaction_id, + e + ); + } + } + } + Err(e) => { + match classify_send_error(&e, SendContext::InitialBroadcast) { + SendErrorClassification::PossiblySent => { + // Move to submitted state + match scoped + .move_borrowed_to_submitted( + prepared.nonce, + &format!("{:?}", prepared.signed_tx.hash()), + &prepared.transaction_id, + ) + .await + { + Ok(()) => { + sent_count += 1; + tracing::info!( + transaction_id = %prepared.transaction_id, + nonce = prepared.nonce, + "New transaction possibly sent" + ); + } + Err(e) => { + tracing::error!( + "Failed to move {} to submitted: {}", + prepared.transaction_id, + e + ); + } + } + } + SendErrorClassification::DeterministicFailure => { + // Recycle nonce and requeue transaction + match scoped + .move_borrowed_to_recycled(prepared.nonce, &prepared.transaction_id) + .await + { + Ok(()) => { + tracing::warn!( + transaction_id = %prepared.transaction_id, + nonce = prepared.nonce, + error = %e, + "New transaction failed, recycled nonce" + ); + + if should_update_balance_threshold(&e) { + if let Err(e) = + self.update_balance_threshold(scoped, chain).await + { + tracing::error!( + "Failed to update balance threshold: {}", + e + ); + } + } + + if should_trigger_nonce_reset(&e) { + tracing::warn!( + nonce = prepared.nonce, + "Nonce too high error detected, may need nonce synchronization" + ); + } + } + Err(e) => { + tracing::error!( + "Failed to move {} to recycled: {}", + prepared.transaction_id, + e + ); + } + } + } + } + } } } @@ -673,8 +1244,8 @@ where let tx_data = scoped .get_transaction_data(transaction_id) .await? - .ok_or_else(|| EoaExecutorWorkerError::StoreError { - message: format!("Transaction not found: {}", transaction_id), + .ok_or_else(|| EoaExecutorWorkerError::TransactionNotFound { + transaction_id: transaction_id.to_string(), })?; // Build and sign transaction @@ -689,206 +1260,376 @@ where }) } - async fn send_prepared_transaction( + async fn send_noop_transaction( + &self, scoped: &ScopedEoaExecutorStore<'_>, chain: &impl Chain, - prepared: &PreparedTransaction, + nonce: u64, ) -> Result { - // Send to RPC - let send_result = chain - .provider() - .send_tx_envelope(prepared.signed_tx.clone().into()) - .await; - - match send_result { - Ok(_) => { - // Transaction was sent successfully - scoped - .move_borrowed_to_submitted( - prepared.nonce, - &format!("{:?}", prepared.signed_tx.hash()), - &prepared.transaction_id, - ) - .await?; - - tracing::info!( - transaction_id = %prepared.transaction_id, - nonce = prepared.nonce, - hash = ?prepared.signed_tx.hash(), - "Successfully sent transaction" - ); + // Create a minimal transaction to consume the recycled nonce + // Send 0 ETH to self with minimal gas + let eoa = scoped.eoa(); - Ok(true) + // Build no-op transaction (send 0 to self) + let mut tx_request = AlloyTransactionRequest::default() + .with_from(eoa) + .with_to(eoa) // Send to self + .with_value(U256::ZERO) // Send 0 value + .with_input(Bytes::new()) // No data + .with_chain_id(scoped.chain_id()) + .with_nonce(nonce) + .with_gas_limit(21000); // Minimal gas for basic transfer + + // Estimate gas to ensure the transaction is valid + match chain.provider().estimate_gas(tx_request.clone()).await { + Ok(gas_limit) => { + tx_request = tx_request.with_gas_limit(gas_limit); } Err(e) => { - match classify_send_error(&e) { - SendResult::PossiblySent => { - // Move to submitted state - scoped - .move_borrowed_to_submitted( - prepared.nonce, - &format!("{:?}", prepared.signed_tx.hash()), - &prepared.transaction_id, - ) - .await?; - - tracing::info!( - transaction_id = %prepared.transaction_id, - nonce = prepared.nonce, - hash = ?prepared.signed_tx.hash(), - "Transaction possibly sent" - ); - - Ok(true) - } - SendResult::DeterministicFailure => { - // Move back to recycled/pending - scoped - .move_borrowed_to_recycled(prepared.nonce, &prepared.transaction_id) - .await?; - - tracing::warn!( - transaction_id = %prepared.transaction_id, - nonce = prepared.nonce, - error = %e, - "Transaction failed deterministically, recycled nonce" - ); - - Ok(false) - } - SendResult::Success => { - // This case is handled by Ok(_) above - unreachable!() - } - } + tracing::warn!( + nonce = nonce, + error = %e, + "Failed to estimate gas for no-op transaction" + ); + return Ok(false); } } + + // Build typed transaction + let typed_tx = match tx_request.build_typed_tx() { + Ok(tx) => tx, + Err(e) => { + tracing::warn!( + nonce = nonce, + error = ?e, + "Failed to build typed transaction for no-op" + ); + return Ok(false); + } + }; + + // Get signing credential from health or use default approach + // For no-op transactions, we need to find a valid signing credential + // This is a limitation of the current design - no-op transactions + // need access to signing credentials which are transaction-specific + tracing::warn!( + nonce = nonce, + "No-op transaction requires signing credential access - recycled nonce will remain unconsumed" + ); + Ok(false) } - async fn send_single_transaction_with_recycled_nonce( + // ========== GAS BUMP METHODS ========== + + /// Attempt to gas bump a stalled transaction for the next expected nonce + async fn attempt_gas_bump_for_stalled_nonce( &self, scoped: &ScopedEoaExecutorStore<'_>, chain: &impl Chain, - transaction_id: &str, - nonce: u64, + expected_nonce: u64, ) -> Result { - // Build and sign transaction - match self - .build_and_sign_single_transaction(scoped, transaction_id, nonce, chain) - .await - { - Ok(prepared) => { - let borrowed_data = BorrowedTransactionData { - transaction_id: transaction_id.to_string(), - signed_transaction: prepared.signed_tx.clone(), - hash: *prepared.signed_tx.hash(), - borrowed_at: std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_secs(), - }; + tracing::info!( + nonce = expected_nonce, + "Attempting gas bump for stalled nonce" + ); - // Atomically move from pending to borrowed with recycled nonce - match scoped - .atomic_move_pending_to_borrowed_with_recycled_nonce( - transaction_id, - nonce, - &borrowed_data, - ) - .await - { - Ok(()) => { - // Successfully moved to borrowed, now send - Self::send_prepared_transaction(scoped, chain, &prepared).await - } - Err(TransactionStoreError::NonceNotInRecycledSet { .. }) => { - // Nonce was consumed by another worker - Ok(false) - } - Err(TransactionStoreError::TransactionNotInPendingQueue { .. }) => { - // Transaction was processed by another worker - Ok(false) + // Get all transaction IDs for this nonce + let transaction_ids = scoped.get_transaction_ids_for_nonce(expected_nonce).await?; + + if transaction_ids.is_empty() { + tracing::debug!( + nonce = expected_nonce, + "No transactions found for stalled nonce" + ); + return Ok(false); + } + + // Load transaction data for all IDs and find the newest one + let mut newest_transaction: Option<(String, TransactionData)> = None; + let mut newest_submitted_at = 0u64; + + for transaction_id in transaction_ids { + if let Some(tx_data) = scoped.get_transaction_data(&transaction_id).await? { + // Find the most recent attempt for this transaction + if let Some(latest_attempt) = tx_data.attempts.last() { + let submitted_at = latest_attempt.sent_at; + if submitted_at > newest_submitted_at { + newest_submitted_at = submitted_at; + newest_transaction = Some((transaction_id, tx_data)); } - Err(e) => Err(e.into()), } } - Err(EoaExecutorWorkerError::StoreError { .. }) => { - // Individual transaction failed (e.g., gas estimation revert) - // Just skip this transaction, don't fail the worker - tracing::warn!( - "Skipping transaction {} due to build failure", - transaction_id - ); - Ok(false) + } + + if let Some((transaction_id, tx_data)) = newest_transaction { + tracing::info!( + transaction_id = %transaction_id, + nonce = expected_nonce, + "Found newest transaction for gas bump" + ); + + // Get the latest attempt to extract gas values from + // Build typed transaction -> manually bump -> sign + let typed_tx = self + .build_typed_transaction(&tx_data, expected_nonce, chain) + .await?; + let bumped_typed_tx = self.apply_gas_bump_to_typed_transaction(typed_tx, 120); // 20% increase + let bumped_tx = self.sign_transaction(bumped_typed_tx, &tx_data).await?; + + // Record the gas bump attempt + scoped + .add_gas_bump_attempt(&transaction_id, bumped_tx.clone()) + .await?; + + // Send the bumped transaction + let tx_envelope = bumped_tx.into(); + match chain.provider().send_tx_envelope(tx_envelope).await { + Ok(_) => { + tracing::info!( + transaction_id = %transaction_id, + nonce = expected_nonce, + "Successfully sent gas bumped transaction" + ); + return Ok(true); + } + Err(e) => { + tracing::warn!( + transaction_id = %transaction_id, + nonce = expected_nonce, + error = %e, + "Failed to send gas bumped transaction" + ); + // Don't fail the worker, just log the error + return Ok(false); + } } - Err(e) => Err(e), // Other errors (RPC, signing) should propagate } + + Ok(false) } - async fn send_noop_transaction( + // ========== HEALTH ACCESSOR ========== + + /// Get EOA health, initializing it if it doesn't exist + /// This method ensures the health data is always available for the worker + async fn get_eoa_health( &self, - _scoped: &ScopedEoaExecutorStore<'_>, - _chain: &impl Chain, - nonce: u64, - ) -> Result { - // TODO: Implement proper no-op transaction for recycled nonces - // This requires handling signing credentials and creating atomic operations - // for consuming recycled nonces without pending transactions - tracing::warn!( - nonce = nonce, - "No-op transaction not implemented - recycled nonce will remain unconsumed" - ); - Ok(false) + scoped: &ScopedEoaExecutorStore<'_>, + chain: &impl Chain, + ) -> Result { + let store_health = scoped.check_eoa_health().await?; + let now = chrono::Utc::now().timestamp_millis().max(0) as u64; + + match store_health { + Some(health) => Ok(health), + None => { + // Initialize with fresh data from chain + let balance = chain + .provider() + .get_balance(scoped.eoa()) + .await + .map_err(|e| { + let engine_error = e.to_engine_error(chain); + EoaExecutorWorkerError::RpcError { + message: format!( + "Failed to get balance during initialization: {}", + engine_error + ), + inner_error: engine_error, + } + })?; + + let health = EoaHealth { + balance, + balance_threshold: U256::ZERO, + balance_fetched_at: now, + last_confirmation_at: now, + last_nonce_movement_at: now, + nonce_resets: Vec::new(), + }; + + // Save to store + scoped.update_health_data(&health).await?; + Ok(health) + } + } } - // ========== HELPER METHODS ========== - async fn check_and_update_eoa_health( + async fn update_balance_threshold( &self, scoped: &ScopedEoaExecutorStore<'_>, chain: &impl Chain, ) -> Result<(), EoaExecutorWorkerError> { - let current_health = scoped.check_eoa_health().await?; - let now = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_secs(); - - let should_update = current_health - .as_ref() - .map(|h| now - h.balance_fetched_at > 300) // 5 minutes - .unwrap_or(true); - - if should_update { - let balance = chain - .provider() - .get_balance(scoped.eoa()) - .await - .map_err(|e| { - let engine_error = e.to_engine_error(chain); - EoaExecutorWorkerError::RpcError { - message: format!("Failed to get balance: {}", engine_error), - } - })?; + let mut health = self.get_eoa_health(scoped, chain).await?; + let balance_threshold = chain + .provider() + .get_balance(scoped.eoa()) + .await + .map_err(|e| { + let engine_error = e.to_engine_error(chain); + EoaExecutorWorkerError::RpcError { + message: format!("Failed to get balance: {}", engine_error), + inner_error: engine_error, + } + })?; + + health.balance_threshold = balance_threshold; + scoped.update_health_data(&health).await?; + Ok(()) + } - let health = EoaHealth { - balance, - balance_fetched_at: now, - last_confirmation_at: current_health.as_ref().and_then(|h| h.last_confirmation_at), - nonce_resets: current_health.map(|h| h.nonce_resets).unwrap_or_default(), - }; + // ========== CONFIRMATION FLOW HELPERS ========== - scoped.update_health_data(&health).await?; + /// Get pending transactions below the given nonce + async fn get_pending_transactions_below_nonce( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + nonce: u64, + ) -> Result, EoaExecutorWorkerError> { + let pending_hashes = scoped.get_hashes_below_nonce(nonce).await?; + + let pending_txs = pending_hashes + .into_iter() + .map(|(nonce, hash, transaction_id)| PendingTransaction { + nonce, + hash, + transaction_id, + }) + .collect(); + + Ok(pending_txs) + } + + /// Fetch receipts for all pending transactions and categorize them + async fn fetch_and_categorize_transactions( + &self, + chain: &impl Chain, + pending_txs: Vec, + ) -> (Vec, Vec) { + // Fetch all receipts in parallel + let receipt_futures: Vec<_> = pending_txs + .iter() + .filter_map(|tx| match tx.hash.parse::() { + Ok(hash_bytes) => Some(async move { + let receipt = chain.provider().get_transaction_receipt(hash_bytes).await; + (tx, receipt) + }), + Err(_) => { + tracing::warn!("Invalid hash format: {}, skipping", tx.hash); + None + } + }) + .collect(); + + let receipt_results = futures::future::join_all(receipt_futures).await; + + // Categorize transactions + let mut confirmed_txs = Vec::new(); + let mut failed_txs = Vec::new(); + + for (tx, receipt_result) in receipt_results { + match receipt_result { + Ok(Some(receipt)) => { + confirmed_txs.push(ConfirmedTransaction { + nonce: tx.nonce, + hash: tx.hash.clone(), + transaction_id: tx.transaction_id.clone(), + receipt, + }); + } + Ok(None) | Err(_) => { + failed_txs.push(FailedTransaction { + hash: tx.hash.clone(), + transaction_id: tx.transaction_id.clone(), + }); + } + } } - Ok(()) + (confirmed_txs, failed_txs) } - async fn build_and_sign_transaction( + // ========== HELPER METHODS ========== + async fn estimate_gas_fees( + &self, + chain: &impl Chain, + tx: AlloyTransactionRequest, + ) -> Result { + // Check what fees are missing and need to be estimated + + // If we have gas_price set, we're doing legacy - don't estimate EIP-1559 + if tx.gas_price.is_some() { + return Ok(tx); + } + + // If we have both EIP-1559 fees set, don't estimate + if tx.max_fee_per_gas.is_some() && tx.max_priority_fee_per_gas.is_some() { + return Ok(tx); + } + + // Try EIP-1559 fees first, fall back to legacy if unsupported + match chain.provider().estimate_eip1559_fees().await { + Ok(eip1559_fees) => { + tracing::debug!( + "Using EIP-1559 fees: max_fee={}, max_priority_fee={}", + eip1559_fees.max_fee_per_gas, + eip1559_fees.max_priority_fee_per_gas + ); + + let mut result = tx; + // Only set fees that are missing + if result.max_fee_per_gas.is_none() { + result = result.with_max_fee_per_gas(eip1559_fees.max_fee_per_gas); + } + if result.max_priority_fee_per_gas.is_none() { + result = + result.with_max_priority_fee_per_gas(eip1559_fees.max_priority_fee_per_gas); + } + + Ok(result) + } + Err(eip1559_error) => { + // Check if this is an "unsupported feature" error + if let RpcError::UnsupportedFeature(_) = &eip1559_error { + tracing::debug!("EIP-1559 not supported, falling back to legacy gas price"); + + // Fall back to legacy gas price only if no gas price is set + if tx.authorization_list().is_none() { + match chain.provider().get_gas_price().await { + Ok(gas_price) => { + tracing::debug!("Using legacy gas price: {}", gas_price); + Ok(tx.with_gas_price(gas_price)) + } + Err(legacy_error) => Err(EoaExecutorWorkerError::RpcError { + message: format!( + "Failed to get legacy gas price: {}", + legacy_error + ), + inner_error: legacy_error.to_engine_error(chain), + }), + } + } else { + Err(EoaExecutorWorkerError::TransactionBuildFailed { + message: "EIP7702 transactions not supported on chain".to_string(), + }) + } + } else { + // Other EIP-1559 error + Err(EoaExecutorWorkerError::RpcError { + message: format!("Failed to estimate EIP-1559 fees: {}", eip1559_error), + inner_error: eip1559_error.to_engine_error(chain), + }) + } + } + } + } + + async fn build_typed_transaction( &self, tx_data: &TransactionData, nonce: u64, chain: &impl Chain, - ) -> Result, EoaExecutorWorkerError> { + ) -> Result { // Build transaction request from stored data let mut tx_request = AlloyTransactionRequest::default() .with_from(tx_data.user_request.from) @@ -905,9 +1646,10 @@ where tx_request = tx_request.with_gas_limit(gas_limit); } - // Apply transaction type specific settings - if let Some(type_data) = &tx_data.user_request.transaction_type_data { - tx_request = match type_data { + // Handle gas fees - either from user settings or estimation + tx_request = if let Some(type_data) = &tx_data.user_request.transaction_type_data { + // User provided gas settings - respect them first + match type_data { crate::eoa::store::EoaTransactionTypeData::Eip1559(data) => { let mut req = tx_request; if let Some(max_fee) = data.max_fee_per_gas { @@ -916,13 +1658,20 @@ where if let Some(max_priority) = data.max_priority_fee_per_gas { req = req.with_max_priority_fee_per_gas(max_priority); } + + // if either not set, estimate the other one + if req.max_fee_per_gas.is_none() || req.max_priority_fee_per_gas.is_none() { + req = self.estimate_gas_fees(chain, req).await?; + } + req } crate::eoa::store::EoaTransactionTypeData::Legacy(data) => { if let Some(gas_price) = data.gas_price { tx_request.with_gas_price(gas_price) } else { - tx_request + // User didn't provide gas price, estimate it + self.estimate_gas_fees(chain, tx_request).await? } } crate::eoa::store::EoaTransactionTypeData::Eip7702(data) => { @@ -936,10 +1685,19 @@ where if let Some(max_priority) = data.max_priority_fee_per_gas { req = req.with_max_priority_fee_per_gas(max_priority); } + + // if either not set, estimate the other one + if req.max_fee_per_gas.is_none() || req.max_priority_fee_per_gas.is_none() { + req = self.estimate_gas_fees(chain, req).await?; + } + req } - }; - } + } + } else { + // No user settings - estimate appropriate fees + self.estimate_gas_fees(chain, tx_request).await? + }; // Estimate gas if needed if tx_request.gas.is_none() { @@ -953,12 +1711,13 @@ where if let Some(revert_data) = error_payload.as_revert_data() { // This is a revert - the transaction is fundamentally broken // This should fail the individual transaction, not the worker - return Err(EoaExecutorWorkerError::StoreError { + return Err(EoaExecutorWorkerError::TransactionSimulationFailed { message: format!( "Transaction reverted during gas estimation: {} (revert: {})", error_payload.message, hex::encode(&revert_data) ), + inner_error: e.to_engine_error(chain), }); } } @@ -967,20 +1726,25 @@ where let engine_error = e.to_engine_error(chain); return Err(EoaExecutorWorkerError::RpcError { message: format!("Gas estimation failed: {}", engine_error), + inner_error: engine_error, }); } } } // Build typed transaction - let typed_tx = - tx_request - .build_typed_tx() - .map_err(|e| EoaExecutorWorkerError::StoreError { - message: format!("Failed to build typed transaction: {:?}", e), - })?; + tx_request + .build_typed_tx() + .map_err(|e| EoaExecutorWorkerError::TransactionBuildFailed { + message: format!("Failed to build typed transaction: {:?}", e), + }) + } - // Sign transaction + async fn sign_transaction( + &self, + typed_tx: TypedTransaction, + tx_data: &TransactionData, + ) -> Result, EoaExecutorWorkerError> { let signing_options = EoaSigningOptions { from: tx_data.user_request.from, chain_id: Some(tx_data.user_request.chain_id), @@ -996,14 +1760,63 @@ where .await .map_err(|engine_error| EoaExecutorWorkerError::SigningError { message: format!("Failed to sign transaction: {}", engine_error), + inner_error: engine_error, })?; - let signed_tx = typed_tx.into_signed(signature.parse().map_err(|e| { - EoaExecutorWorkerError::StoreError { + let signature = signature.parse::().map_err(|e| { + EoaExecutorWorkerError::SignatureParsingFailed { message: format!("Failed to parse signature: {}", e), } - })?); + })?; + + Ok(typed_tx.into_signed(signature)) + } - Ok(signed_tx) + async fn build_and_sign_transaction( + &self, + tx_data: &TransactionData, + nonce: u64, + chain: &impl Chain, + ) -> Result, EoaExecutorWorkerError> { + let typed_tx = self.build_typed_transaction(tx_data, nonce, chain).await?; + self.sign_transaction(typed_tx, tx_data).await + } + + fn apply_gas_bump_to_typed_transaction( + &self, + mut typed_tx: TypedTransaction, + bump_multiplier: u32, // e.g., 120 for 20% increase + ) -> TypedTransaction { + match &mut typed_tx { + TypedTransaction::Eip1559(tx) => { + tx.max_fee_per_gas = tx.max_fee_per_gas * bump_multiplier as u128 / 100; + tx.max_priority_fee_per_gas = + tx.max_priority_fee_per_gas * bump_multiplier as u128 / 100; + } + TypedTransaction::Legacy(tx) => { + tx.gas_price = tx.gas_price * bump_multiplier as u128 / 100; + } + TypedTransaction::Eip2930(tx) => { + tx.gas_price = tx.gas_price * bump_multiplier as u128 / 100; + } + TypedTransaction::Eip7702(tx) => { + tx.max_fee_per_gas = tx.max_fee_per_gas * bump_multiplier as u128 / 100; + tx.max_priority_fee_per_gas = + tx.max_priority_fee_per_gas * bump_multiplier as u128 / 100; + } + TypedTransaction::Eip4844(tx) => match tx { + TxEip4844Variant::TxEip4844(tx) => { + tx.max_fee_per_gas = tx.max_fee_per_gas * bump_multiplier as u128 / 100; + tx.max_priority_fee_per_gas = + tx.max_priority_fee_per_gas * bump_multiplier as u128 / 100; + } + TxEip4844Variant::TxEip4844WithSidecar(TxEip4844WithSidecar { tx, .. }) => { + tx.max_fee_per_gas = tx.max_fee_per_gas * bump_multiplier as u128 / 100; + tx.max_priority_fee_per_gas = + tx.max_priority_fee_per_gas * bump_multiplier as u128 / 100; + } + }, + } + typed_tx } } diff --git a/server/Cargo.toml b/server/Cargo.toml index d96d04e..93c0ab0 100644 --- a/server/Cargo.toml +++ b/server/Cargo.toml @@ -4,13 +4,13 @@ version = "0.1.0" edition = "2024" [dependencies] -alloy = { version = "1.0.8", features = ["serde"] } +alloy = { workspace = true, features = ["serde"] } axum = { version = "0.8.4", features = ["macros"] } config = "0.15.11" serde = { version = "1.0.219", features = ["derive"] } serde_json = "1.0.140" -vault-sdk = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "main" } -vault-types = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "main" } +vault-sdk = { workspace = true } +vault-types = { workspace = true } engine-core = { path = "../core" } engine-aa-core = { path = "../aa-core" } engine-executors = { path = "../executors" } diff --git a/thirdweb-core/Cargo.toml b/thirdweb-core/Cargo.toml index d38fb0d..cb12a75 100644 --- a/thirdweb-core/Cargo.toml +++ b/thirdweb-core/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = "2024" [dependencies] -alloy = { version = "1.0.9", features = [ +alloy = { workspace = true, features = [ "json-abi", "consensus", "dyn-abi", diff --git a/twmq/src/lib.rs b/twmq/src/lib.rs index 8e7b7d9..f9039c4 100644 --- a/twmq/src/lib.rs +++ b/twmq/src/lib.rs @@ -10,12 +10,12 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use error::TwmqError; use hooks::TransactionContext; +pub use job::BorrowedJob; use job::{ DelayOptions, Job, JobError, JobErrorRecord, JobErrorType, JobOptions, JobResult, JobStatus, PushableJob, RequeuePosition, }; -pub use job::BorrowedJob; -pub use multilane::{MultilaneQueue, MultilanePushableJob}; +pub use multilane::{MultilanePushableJob, MultilaneQueue}; use queue::QueueOptions; use redis::Pipeline; use redis::{AsyncCommands, RedisResult, aio::ConnectionManager}; @@ -74,7 +74,7 @@ pub trait DurableExecution: Sized + Send + Sync + 'static { _job: &BorrowedJob, _d: SuccessHookData, _tx: &mut TransactionContext<'_>, - ) -> impl Future + Send + Sync { + ) -> impl Future + Send { std::future::ready(()) } @@ -83,7 +83,7 @@ pub trait DurableExecution: Sized + Send + Sync + 'static { _job: &BorrowedJob, _d: NackHookData, _tx: &mut TransactionContext<'_>, - ) -> impl Future + Send + Sync { + ) -> impl Future + Send { std::future::ready(()) } @@ -92,7 +92,7 @@ pub trait DurableExecution: Sized + Send + Sync + 'static { _job: &BorrowedJob, _d: FailHookData, _tx: &mut TransactionContext<'_>, - ) -> impl Future + Send + Sync { + ) -> impl Future + Send { std::future::ready(()) } @@ -110,7 +110,7 @@ pub trait DurableExecution: Sized + Send + Sync + 'static { _job: &Job>, _d: QueueInternalErrorHookData<'_>, _tx: &mut TransactionContext<'_>, - ) -> impl Future + Send + Sync { + ) -> impl Future + Send { std::future::ready(()) } } @@ -442,10 +442,12 @@ impl Queue { ); } Ok(CancelResult::CancelledImmediately) - }, + } "cancellation_pending" => Ok(CancelResult::CancellationPending), "not_found" => Ok(CancelResult::NotFound), - _ => Err(TwmqError::Runtime { message: format!("Unexpected cancel result: {}", result) }), + _ => Err(TwmqError::Runtime { + message: format!("Unexpected cancel result: {}", result), + }), } } @@ -537,8 +539,8 @@ impl Queue { .await .into_iter() .collect::, _>>() - .map_err(|e| { - TwmqError::Runtime { message: format!("Failed to acquire permits during shutdown: {}", e) } + .map_err(|e| TwmqError::Runtime { + message: format!("Failed to acquire permits during shutdown: {}", e), })?; tracing::info!( @@ -720,7 +722,11 @@ impl Queue { .unwrap() .as_secs(); - let results_from_lua: (Vec<(String, String, String, String, String, String)>, Vec, Vec) = script + let results_from_lua: ( + Vec<(String, String, String, String, String, String)>, + Vec, + Vec, + ) = script .key(self.name()) .key(self.delayed_zset_name()) .key(self.pending_list_name()) @@ -736,7 +742,7 @@ impl Queue { .await?; let (job_results, cancelled_jobs, timed_out_jobs) = results_from_lua; - + // Log individual lease timeouts and cancellations for job_id in &timed_out_jobs { tracing::warn!(job_id = %job_id, "Job lease expired, moved back to pending"); @@ -746,8 +752,14 @@ impl Queue { } let mut jobs = Vec::new(); - for (job_id_str, job_data_t_json, attempts_str, created_at_str, processed_at_str, lease_token) in - job_results + for ( + job_id_str, + job_data_t_json, + attempts_str, + created_at_str, + processed_at_str, + lease_token, + ) in job_results { match serde_json::from_str::(&job_data_t_json) { Ok(data_t) => { @@ -794,7 +806,7 @@ impl Queue { }; let twmq_error: TwmqError = e.into(); - + // Complete job using queue error method with lease token if let Err(e) = queue_clone .complete_job_queue_error(&job, &lease_token, &twmq_error.into()) @@ -835,37 +847,37 @@ impl Queue { Some(job) => { // Create cancellation error using the trait let cancellation_error = H::ErrorData::user_cancelled(); - - // Create transaction pipeline for atomicity + + // Create transaction pipeline for atomicity let mut pipeline = redis::pipe(); pipeline.atomic(); - + // Create transaction context with mutable access to pipeline - let mut tx_context = TransactionContext::new( - &mut pipeline, - self.name().to_string(), - ); - + let mut tx_context = + TransactionContext::new(&mut pipeline, self.name().to_string()); + let fail_hook_data = FailHookData { error: &cancellation_error, }; - + // Create a BorrowedJob with a dummy lease token since cancelled jobs don't have active leases let borrowed_job = BorrowedJob::new(job, "cancelled".to_string()); - + // Call fail hook for user cancellation - self.handler.on_fail(&borrowed_job, fail_hook_data, &mut tx_context).await; - + self.handler + .on_fail(&borrowed_job, fail_hook_data, &mut tx_context) + .await; + // Execute the pipeline (just hook commands, job already moved to failed) pipeline.query_async::<()>(&mut self.redis.clone()).await?; - + tracing::info!( job_id = %job_id, "Successfully processed job cancellation hooks" ); - + Ok(()) - }, + } None => { tracing::warn!( job_id = %job_id, @@ -892,7 +904,7 @@ impl Queue { // Delete the lease key to consume it pipeline.del(&lease_key); - // Add job completion operations + // Add job completion operations pipeline .hdel(self.active_hash_name(), &job.job.id) .lpush(self.success_list_name(), &job.job.id) @@ -1111,21 +1123,31 @@ impl Queue { match &result { Ok(output) => { let success_hook_data = SuccessHookData { result: output }; - self.handler.on_success(job, success_hook_data, &mut tx_context).await; + self.handler + .on_success(job, success_hook_data, &mut tx_context) + .await; self.add_success_operations(job, output, &mut hook_pipeline)?; } - Err(JobError::Nack { error, delay, position }) => { + Err(JobError::Nack { + error, + delay, + position, + }) => { let nack_hook_data = NackHookData { error, delay: *delay, position: *position, }; - self.handler.on_nack(job, nack_hook_data, &mut tx_context).await; + self.handler + .on_nack(job, nack_hook_data, &mut tx_context) + .await; self.add_nack_operations(job, error, *delay, *position, &mut hook_pipeline)?; } Err(JobError::Fail(error)) => { let fail_hook_data = FailHookData { error }; - self.handler.on_fail(job, fail_hook_data, &mut tx_context).await; + self.handler + .on_fail(job, fail_hook_data, &mut tx_context) + .await; self.add_fail_operations(job, error, &mut hook_pipeline)?; } } @@ -1145,9 +1167,7 @@ impl Queue { // Check if lease exists - if not, job was cancelled or timed out let lease_exists: bool = conn.exists(&lease_key).await?; if !lease_exists { - redis::cmd("UNWATCH") - .query_async::<()>(&mut conn) - .await?; + redis::cmd("UNWATCH").query_async::<()>(&mut conn).await?; tracing::warn!(job_id = %job.job.id, "Lease no longer exists, job was cancelled or timed out"); return Ok(()); } @@ -1157,7 +1177,10 @@ impl Queue { atomic_pipeline.atomic(); // Execute atomically with WATCH/MULTI/EXEC - match atomic_pipeline.query_async::>(&mut conn).await { + match atomic_pipeline + .query_async::>(&mut conn) + .await + { Ok(_) => { // Success! Now run post-completion methods match &result { @@ -1190,9 +1213,13 @@ impl Queue { let mut hook_pipeline = redis::pipe(); let mut tx_context = TransactionContext::new(&mut hook_pipeline, self.name().to_string()); - let twmq_error = TwmqError::Runtime { message: "Job processing failed with user error".to_string() }; + let twmq_error = TwmqError::Runtime { + message: "Job processing failed with user error".to_string(), + }; let queue_error_hook_data = QueueInternalErrorHookData { error: &twmq_error }; - self.handler.on_queue_error(job, queue_error_hook_data, &mut tx_context).await; + self.handler + .on_queue_error(job, queue_error_hook_data, &mut tx_context) + .await; // Add fail operations to pipeline let now = SystemTime::now() @@ -1235,9 +1262,7 @@ impl Queue { // Check if lease exists - if not, job was cancelled or timed out let lease_exists: bool = conn.exists(&lease_key).await?; if !lease_exists { - redis::cmd("UNWATCH") - .query_async::<()>(&mut conn) - .await?; + redis::cmd("UNWATCH").query_async::<()>(&mut conn).await?; tracing::warn!(job_id = %job.id, "Lease no longer exists, job was cancelled or timed out"); return Ok(()); } @@ -1247,7 +1272,10 @@ impl Queue { atomic_pipeline.atomic(); // Execute atomically with WATCH/MULTI/EXEC - match atomic_pipeline.query_async::>(&mut conn).await { + match atomic_pipeline + .query_async::>(&mut conn) + .await + { Ok(_) => { // Success! Run post-completion self.post_fail_completion().await?; From 426ff328b447f6ae4dcf111d89a858dada076e75 Mon Sep 17 00:00:00 2001 From: Prithvish Baidya Date: Mon, 7 Jul 2025 06:07:41 +0530 Subject: [PATCH 8/8] wire up executor to API --- core/src/chain.rs | 122 +++++++----- core/src/error.rs | 12 +- core/src/execution_options/eoa.rs | 186 +++++++++++++++++++ core/src/execution_options/mod.rs | 8 + executors/src/eoa/mod.rs | 2 + executors/src/eoa/store.rs | 65 ++++--- executors/src/eoa/worker.rs | 146 +++++++++++++-- server/configuration/server_base.yaml | 1 + server/src/config.rs | 1 + server/src/execution_router/mod.rs | 110 ++++++++++- server/src/main.rs | 12 +- server/src/queue/manager.rs | 61 +++++- twmq/benches/throughput.rs | 3 +- twmq/src/lib.rs | 32 ++++ twmq/src/multilane.rs | 15 ++ twmq/src/queue.rs | 22 +++ twmq/tests/idempotency_modes.rs | 255 ++++++++++++++++++++++++++ twmq/tests/lease_expiry.rs | 9 +- 18 files changed, 961 insertions(+), 101 deletions(-) create mode 100644 core/src/execution_options/eoa.rs create mode 100644 twmq/tests/idempotency_modes.rs diff --git a/core/src/chain.rs b/core/src/chain.rs index a38bb10..bba79f6 100644 --- a/core/src/chain.rs +++ b/core/src/chain.rs @@ -126,48 +126,73 @@ impl Chain for ThirdwebChain { impl ThirdwebChainConfig<'_> { pub fn to_chain(&self) -> Result { - let rpc_url = Url::parse(&format!( - "https://{chain_id}.{base_url}/{client_id}", - chain_id = self.chain_id, - base_url = self.rpc_base_url, - client_id = self.client_id, - )) - .map_err(|e| EngineError::RpcConfigError { - message: format!("Failed to parse RPC URL: {}", e), - })?; - - let bundler_url = Url::parse(&format!( - "https://{chain_id}.{base_url}/v2", - chain_id = self.chain_id, - base_url = self.bundler_base_url, - )) - .map_err(|e| EngineError::RpcConfigError { - message: format!("Failed to parse Bundler URL: {}", e), - })?; - - let paymaster_url = Url::parse(&format!( - "https://{chain_id}.{base_url}/v2", - chain_id = self.chain_id, - base_url = self.paymaster_base_url, - )) - .map_err(|e| EngineError::RpcConfigError { - message: format!("Failed to parse Paymaster URL: {}", e), - })?; + // Special handling for chain ID 31337 (local anvil) + let (rpc_url, bundler_url, paymaster_url) = if self.chain_id == 31337 { + // For local anvil, use localhost URLs + let local_rpc_url = "http://127.0.0.1:8545"; + let rpc_url = Url::parse(local_rpc_url).map_err(|e| EngineError::RpcConfigError { + message: format!("Failed to parse local anvil RPC URL: {}", e), + })?; + + // For bundler and paymaster, use the same local RPC URL + // since anvil doesn't have separate bundler/paymaster services + let bundler_url = rpc_url.clone(); + let paymaster_url = rpc_url.clone(); + + (rpc_url, bundler_url, paymaster_url) + } else { + // Standard URL construction for other chains + let rpc_url = Url::parse(&format!( + "https://{chain_id}.{base_url}/{client_id}", + chain_id = self.chain_id, + base_url = self.rpc_base_url, + client_id = self.client_id, + )) + .map_err(|e| EngineError::RpcConfigError { + message: format!("Failed to parse RPC URL: {}", e), + })?; + + let bundler_url = Url::parse(&format!( + "https://{chain_id}.{base_url}/v2", + chain_id = self.chain_id, + base_url = self.bundler_base_url, + )) + .map_err(|e| EngineError::RpcConfigError { + message: format!("Failed to parse Bundler URL: {}", e), + })?; + + let paymaster_url = Url::parse(&format!( + "https://{chain_id}.{base_url}/v2", + chain_id = self.chain_id, + base_url = self.paymaster_base_url, + )) + .map_err(|e| EngineError::RpcConfigError { + message: format!("Failed to parse Paymaster URL: {}", e), + })?; + + (rpc_url, bundler_url, paymaster_url) + }; let mut sensitive_headers = HeaderMap::new(); - sensitive_headers.insert( - "x-client-id", - HeaderValue::from_str(self.client_id).map_err(|e| EngineError::RpcConfigError { - message: format!("Unserialisable client-id used: {e}"), - })?, - ); - - sensitive_headers.insert( - "x-secret-key", - HeaderValue::from_str(self.secret_key).map_err(|e| EngineError::RpcConfigError { - message: format!("Unserialisable secret-key used: {e}"), - })?, - ); + + // Only add auth headers for non-local chains + if self.chain_id != 31337 { + sensitive_headers.insert( + "x-client-id", + HeaderValue::from_str(self.client_id).map_err(|e| EngineError::RpcConfigError { + message: format!("Unserialisable client-id used: {e}"), + })?, + ); + + sensitive_headers.insert( + "x-secret-key", + HeaderValue::from_str(self.secret_key).map_err(|e| { + EngineError::RpcConfigError { + message: format!("Unserialisable secret-key used: {e}"), + } + })?, + ); + } let reqwest_client = HttpClientBuilder::new() @@ -181,10 +206,19 @@ impl ThirdwebChainConfig<'_> { let paymaster_transport = transport_builder.default_transport(paymaster_url.clone()); let bundler_transport = transport_builder.default_transport(bundler_url.clone()); - let sensitive_bundler_transport = - transport_builder.with_headers(bundler_url.clone(), sensitive_headers.clone()); - let sensitive_paymaster_transport = - transport_builder.with_headers(paymaster_url.clone(), sensitive_headers); + let sensitive_bundler_transport = if self.chain_id == 31337 { + // For local anvil, use the same transport as non-sensitive + transport_builder.default_transport(bundler_url.clone()) + } else { + transport_builder.with_headers(bundler_url.clone(), sensitive_headers.clone()) + }; + + let sensitive_paymaster_transport = if self.chain_id == 31337 { + // For local anvil, use the same transport as non-sensitive + transport_builder.default_transport(paymaster_url.clone()) + } else { + transport_builder.with_headers(paymaster_url.clone(), sensitive_headers) + }; let paymaster_rpc_client = RpcClient::builder().transport(paymaster_transport, false); let bundler_rpc_client = RpcClient::builder().transport(bundler_transport, false); diff --git a/core/src/error.rs b/core/src/error.rs index 572f60c..d3ef448 100644 --- a/core/src/error.rs +++ b/core/src/error.rs @@ -56,8 +56,8 @@ pub enum RpcErrorKind { #[error("HTTP error {status}")] TransportHttpError { status: u16, body: String }, - #[error("Other transport error: {0}")] - OtherTransportError(String), + #[error("Other transport error: {message}")] + OtherTransportError { message: String }, } #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema, utoipa::ToSchema)] @@ -345,8 +345,12 @@ fn to_engine_rpc_error_kind(err: &AlloyRpcError) -> RpcError status: err.status, body: err.body.to_string(), }, - TransportErrorKind::Custom(err) => RpcErrorKind::OtherTransportError(err.to_string()), - _ => RpcErrorKind::OtherTransportError(err.to_string()), + TransportErrorKind::Custom(err) => RpcErrorKind::OtherTransportError { + message: err.to_string(), + }, + _ => RpcErrorKind::OtherTransportError { + message: err.to_string(), + }, }, } } diff --git a/core/src/execution_options/eoa.rs b/core/src/execution_options/eoa.rs new file mode 100644 index 0000000..3699ccc --- /dev/null +++ b/core/src/execution_options/eoa.rs @@ -0,0 +1,186 @@ +use crate::defs::AddressDef; +use alloy::eips::eip7702::SignedAuthorization; +use alloy::primitives::{Address, U256}; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +/// ### EOA Execution Options +/// This struct configures EOA (Externally Owned Account) direct execution. +/// +/// EOA execution sends transactions directly from an EOA address without +/// smart contract abstraction. This is the most basic form of transaction +/// execution and is suitable for simple transfers and contract interactions. +/// +/// ### Use Cases +/// - Direct ETH transfers +/// - Simple contract interactions +/// - Gas-efficient transactions +/// - When smart account features are not needed +/// +/// ### Features +/// - Direct transaction execution from EOA +/// - Automatic nonce management +/// - Gas price optimization +/// - Transaction confirmation tracking +/// - Retry and recovery mechanisms +/// - Support for EIP-1559, EIP-2930, and Legacy transactions +/// - Support for EIP-7702 delegated transactions +#[derive(Deserialize, Serialize, Debug, Clone, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct EoaExecutionOptions { + /// The EOA address to send transactions from + /// This account must have sufficient balance to pay for gas and transaction value + #[schemars(with = "AddressDef")] + #[schema(value_type = AddressDef)] + pub from: Address, + + /// The gas limit to use for the transaction + /// If not provided, the system will auto-detect the best gas limit + #[schemars(with = "Option")] + #[schema(value_type = Option)] + pub gas_limit: Option, + + // /// Maximum number of in-flight transactions for this EOA + // /// Controls how many transactions can be pending confirmation at once + // /// Defaults to 100 if not specified + // #[serde(default = "default_max_inflight")] + // pub max_inflight: u64, + + // /// Maximum number of recycled nonces to keep + // /// When transactions fail, their nonces are recycled for reuse + // /// Defaults to 50 if not specified + // #[serde(default = "default_max_recycled_nonces")] + // pub max_recycled_nonces: u64, + /// Transaction type-specific data for gas configuration + /// If not provided, the system will auto-detect the best transaction type + #[serde(flatten)] + pub transaction_type_data: Option, +} + +/// EOA Transaction type-specific data for different EIP standards +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, utoipa::ToSchema)] +#[serde(untagged)] +pub enum EoaTransactionTypeData { + /// EIP-7702 transaction with authorization list and EIP-1559 gas pricing + Eip7702(EoaSend7702JobData), + /// EIP-1559 transaction with priority fee and max fee per gas + Eip1559(EoaSend1559JobData), + /// Legacy transaction with simple gas price + Legacy(EoaSendLegacyJobData), +} + +/// EIP-7702 transaction configuration +/// Allows delegation of EOA to smart contract logic temporarily +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct EoaSend7702JobData { + /// List of signed authorizations for contract delegation + /// Each authorization allows the EOA to temporarily delegate to a smart contract + #[schemars(with = "Option>")] + #[schema(value_type = Option>)] + pub authorization_list: Option>, + + /// Maximum fee per gas willing to pay (in wei) + /// This is the total fee cap including base fee and priority fee + pub max_fee_per_gas: Option, + + /// Maximum priority fee per gas willing to pay (in wei) + /// This is the tip paid to validators for transaction inclusion + pub max_priority_fee_per_gas: Option, +} + +/// EIP-1559 transaction configuration +/// Uses base fee + priority fee model for more predictable gas pricing +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct EoaSend1559JobData { + /// Maximum fee per gas willing to pay (in wei) + /// This is the total fee cap including base fee and priority fee + pub max_fee_per_gas: Option, + + /// Maximum priority fee per gas willing to pay (in wei) + /// This is the tip paid to validators for transaction inclusion + pub max_priority_fee_per_gas: Option, +} + +/// Legacy transaction configuration +/// Uses simple gas price model (pre-EIP-1559) +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct EoaSendLegacyJobData { + /// Gas price willing to pay (in wei) + /// This is the total price per unit of gas for legacy transactions + pub gas_price: Option, +} + +/// EIP-7702 Authorization structure for OpenAPI schema +/// Represents an unsigned authorization that allows an EOA to delegate to a smart contract +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct AuthorizationSchema { + /// The chain ID of the authorization + /// Must match the chain where the transaction will be executed + #[schemars(with = "String")] + #[schema(value_type = String, example = "1")] + pub chain_id: U256, + + /// The smart contract address to delegate to + /// This contract will be able to execute logic on behalf of the EOA + #[schemars(with = "AddressDef")] + #[schema(value_type = AddressDef)] + pub address: Address, + + /// The nonce for the authorization + /// Must be the current nonce of the authorizing account + #[schema(example = 42)] + pub nonce: u64, +} + +/// EIP-7702 Signed Authorization structure for OpenAPI schema +/// Contains an authorization plus the cryptographic signature +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct SignedAuthorizationSchema { + /// The chain ID of the authorization + /// Must match the chain where the transaction will be executed + #[schemars(with = "String")] + #[schema(value_type = String, example = "1")] + pub chain_id: U256, + + /// The smart contract address to delegate to + /// This contract will be able to execute logic on behalf of the EOA + #[schemars(with = "AddressDef")] + #[schema(value_type = AddressDef)] + pub address: Address, + + /// The nonce for the authorization + /// Must be the current nonce of the authorizing account + #[schema(example = 42)] + pub nonce: u64, + + /// Signature parity value (0 or 1) + /// Used for ECDSA signature recovery + #[serde(rename = "yParity", alias = "v")] + #[schema(example = 0)] + pub y_parity: u8, + + /// Signature r value + /// First component of the ECDSA signature + #[schemars(with = "String")] + #[schema(value_type = String, example = "0x1234567890abcdef...")] + pub r: U256, + + /// Signature s value + /// Second component of the ECDSA signature + #[schemars(with = "String")] + #[schema(value_type = String, example = "0xfedcba0987654321...")] + pub s: U256, +} + +fn default_max_inflight() -> u64 { + 100 +} + +fn default_max_recycled_nonces() -> u64 { + 50 +} diff --git a/core/src/execution_options/mod.rs b/core/src/execution_options/mod.rs index 186063e..5a56087 100644 --- a/core/src/execution_options/mod.rs +++ b/core/src/execution_options/mod.rs @@ -7,6 +7,7 @@ use std::collections::HashMap; use crate::transaction::InnerTransaction; pub mod aa; pub mod auto; +pub mod eoa; // Base execution options for all transactions // All specific execution options share this @@ -35,6 +36,10 @@ pub enum SpecificExecutionOptions { #[schema(title = "ERC-4337 Execution Options")] ERC4337(aa::Erc4337ExecutionOptions), + + #[serde(rename = "eoa")] + #[schema(title = "EOA Execution Options")] + EOA(eoa::EoaExecutionOptions), } fn deserialize_with_default_auto<'de, D>( @@ -118,6 +123,8 @@ pub struct QueuedTransactionsResponse { pub enum ExecutorType { #[serde(rename = "ERC4337")] Erc4337, + #[serde(rename = "EOA")] + Eoa, } impl ExecutionOptions { @@ -125,6 +132,7 @@ impl ExecutionOptions { match &self.specific { SpecificExecutionOptions::ERC4337(_) => ExecutorType::Erc4337, SpecificExecutionOptions::Auto(_) => ExecutorType::Erc4337, + SpecificExecutionOptions::EOA(_) => ExecutorType::Eoa, } } diff --git a/executors/src/eoa/mod.rs b/executors/src/eoa/mod.rs index 0c7c17f..c7186f7 100644 --- a/executors/src/eoa/mod.rs +++ b/executors/src/eoa/mod.rs @@ -2,3 +2,5 @@ pub mod error_classifier; pub mod store; pub mod worker; pub use error_classifier::{EoaErrorMapper, EoaExecutionError, RecoveryStrategy}; +pub use store::{EoaExecutorStore, EoaTransactionRequest}; +pub use worker::{EoaExecutorWorker, EoaExecutorWorkerJobData}; diff --git a/executors/src/eoa/store.rs b/executors/src/eoa/store.rs index 90fdbe6..d3206bf 100644 --- a/executors/src/eoa/store.rs +++ b/executors/src/eoa/store.rs @@ -1,11 +1,11 @@ use alloy::consensus::{Signed, Transaction, TypedTransaction}; -use alloy::eips::eip7702::SignedAuthorization; use alloy::network::AnyTransactionReceipt; use alloy::primitives::{Address, B256, Bytes, U256}; use chrono; use engine_core::chain::RpcCredentials; use engine_core::credentials::SigningCredential; use engine_core::execution_options::WebhookOptions; +use engine_core::execution_options::eoa::EoaTransactionTypeData; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::future::Future; @@ -248,34 +248,6 @@ pub struct EoaTransactionRequest { pub transaction_type_data: Option, } -#[derive(Serialize, Deserialize, Debug, Clone)] -#[serde(untagged)] -pub enum EoaTransactionTypeData { - Eip7702(EoaSend7702JobData), - Eip1559(EoaSend1559JobData), - Legacy(EoaSendLegacyJobData), -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -#[serde(rename_all = "camelCase")] -pub struct EoaSend7702JobData { - pub authorization_list: Option>, - pub max_fee_per_gas: Option, - pub max_priority_fee_per_gas: Option, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -#[serde(rename_all = "camelCase")] -pub struct EoaSend1559JobData { - pub max_fee_per_gas: Option, - pub max_priority_fee_per_gas: Option, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -#[serde(rename_all = "camelCase")] -pub struct EoaSendLegacyJobData { - pub gas_price: Option, -} /// Active attempt for a transaction (full alloy transaction + metadata) #[derive(Debug, Clone, Serialize, Deserialize)] pub struct TransactionAttempt { @@ -1706,6 +1678,41 @@ impl EoaExecutorStore { }) .await } + + /// Add a transaction to the pending queue and store its data + /// This is called when a new transaction request comes in for an EOA + pub async fn add_transaction( + &self, + transaction_request: EoaTransactionRequest, + ) -> Result<(), TransactionStoreError> { + let transaction_id = &transaction_request.transaction_id; + let eoa = transaction_request.from; + let chain_id = transaction_request.chain_id; + + let tx_data_key = self.transaction_data_key_name(transaction_id); + let pending_key = self.pending_transactions_list_name(eoa, chain_id); + + // Store transaction data as JSON in the user_request field of the hash + let user_request_json = serde_json::to_string(&transaction_request)?; + let now = chrono::Utc::now().timestamp_millis().max(0) as u64; + + let mut conn = self.redis.clone(); + + // Use a pipeline to atomically store data and add to pending queue + let mut pipeline = twmq::redis::pipe(); + + // Store transaction data + pipeline.hset(&tx_data_key, "user_request", &user_request_json); + pipeline.hset(&tx_data_key, "status", "pending"); + pipeline.hset(&tx_data_key, "created_at", now); + + // Add to pending queue + pipeline.lpush(&pending_key, transaction_id); + + pipeline.query_async::<()>(&mut conn).await?; + + Ok(()) + } } // Additional error types diff --git a/executors/src/eoa/worker.rs b/executors/src/eoa/worker.rs index 9adc9d4..fba7601 100644 --- a/executors/src/eoa/worker.rs +++ b/executors/src/eoa/worker.rs @@ -10,6 +10,7 @@ use alloy::rpc::types::TransactionRequest as AlloyTransactionRequest; use alloy::signers::Signature; use alloy::transports::{RpcError, TransportErrorKind}; use engine_core::error::EngineError; +use engine_core::execution_options::eoa::EoaTransactionTypeData; use engine_core::signer::AccountSigner; use engine_core::{ chain::{Chain, ChainService, RpcCredentials}, @@ -198,11 +199,25 @@ fn should_trigger_nonce_reset(error: &RpcError) -> bool { error_str.contains("nonce too high") } -fn should_update_balance_threshold(error: &RpcError) -> bool { - let error_str = error.to_string().to_lowercase(); - - // "insufficient funds" should update the balance threshold - error_str.contains("insufficient funds") +fn should_update_balance_threshold(error: &EngineError) -> bool { + match error { + EngineError::RpcError { kind, .. } + | EngineError::PaymasterError { kind, .. } + | EngineError::BundlerError { kind, .. } => match kind { + RpcErrorKind::ErrorResp(resp) => { + let message = resp.message.to_lowercase(); + message.contains("insufficient funds") + || message.contains("insufficient balance") + || message.contains("out of gas") + || message.contains("insufficient eth") + || message.contains("balance too low") + || message.contains("not enough funds") + || message.contains("insufficient native token") + } + _ => false, + }, + _ => false, + } } fn is_retryable_rpc_error(kind: &RpcErrorKind) -> bool { @@ -454,6 +469,7 @@ where } // ========== CRASH RECOVERY ========== + #[tracing::instrument(skip_all)] async fn recover_borrowed_state( &self, scoped: &ScopedEoaExecutorStore<'_>, @@ -533,7 +549,7 @@ where .await?; tracing::warn!(transaction_id = %borrowed.transaction_id, nonce = nonce, error = %e, "Recycled failed transaction"); - if should_update_balance_threshold(&e) { + if should_update_balance_threshold(&e.to_engine_error(chain)) { self.update_balance_threshold(scoped, chain).await?; } @@ -558,6 +574,7 @@ where } // ========== CONFIRM FLOW ========== + #[tracing::instrument(skip_all)] async fn confirm_flow( &self, scoped: &ScopedEoaExecutorStore<'_>, @@ -726,6 +743,7 @@ where } // ========== SEND FLOW ========== + #[tracing::instrument(skip_all)] async fn send_flow( &self, scoped: &ScopedEoaExecutorStore<'_>, @@ -754,9 +772,9 @@ where scoped.update_health_data(&health).await?; } - if health.balance < health.balance_threshold { + if health.balance <= health.balance_threshold { tracing::warn!( - "EOA has insufficient balance (< {} wei), skipping send flow", + "EOA has insufficient balance (<= {} wei), skipping send flow", health.balance_threshold ); return Ok(0); @@ -839,6 +857,8 @@ where // 3. SEQUENTIAL REDIS: Move successfully built transactions to borrowed state let mut prepared_txs = Vec::new(); + let mut balance_threshold_update_needed = false; + for (nonce, transaction_id, build_result) in build_results { match build_result { Ok(signed_tx) => { @@ -881,12 +901,36 @@ where } } Err(e) => { + // Accumulate balance threshold issues instead of updating immediately + if let EoaExecutorWorkerError::TransactionSimulationFailed { + inner_error, .. + } = &e + { + if should_update_balance_threshold(inner_error) { + balance_threshold_update_needed = true; + } + } else if let EoaExecutorWorkerError::RpcError { inner_error, .. } = &e { + if should_update_balance_threshold(inner_error) { + balance_threshold_update_needed = true; + } + } + tracing::warn!("Failed to build transaction {}: {}", transaction_id, e); continue; } } } + // Update balance threshold once if any build failures were due to balance issues + if balance_threshold_update_needed { + if let Err(e) = self.update_balance_threshold(scoped, chain).await { + tracing::error!( + "Failed to update balance threshold after parallel build failures: {}", + e + ); + } + } + if prepared_txs.is_empty() { return Ok(0); } @@ -980,7 +1024,7 @@ where "Recycled transaction failed, re-recycled nonce" ); - if should_update_balance_threshold(&e) { + if should_update_balance_threshold(&e.to_engine_error(chain)) { if let Err(e) = self.update_balance_threshold(scoped, chain).await { @@ -1047,6 +1091,8 @@ where // 3. SEQUENTIAL REDIS: Move successful transactions to borrowed state (maintain nonce order) let mut prepared_txs = Vec::new(); + let mut balance_threshold_update_needed = false; + for (i, result) in prepared_results.into_iter().enumerate() { match result { Ok(prepared) => { @@ -1091,6 +1137,20 @@ where } } Err(e) => { + // Accumulate balance threshold issues instead of updating immediately + if let EoaExecutorWorkerError::TransactionSimulationFailed { + inner_error, .. + } = &e + { + if should_update_balance_threshold(inner_error) { + balance_threshold_update_needed = true; + } + } else if let EoaExecutorWorkerError::RpcError { inner_error, .. } = &e { + if should_update_balance_threshold(inner_error) { + balance_threshold_update_needed = true; + } + } + tracing::warn!("Failed to build transaction {}: {}", pending_txs[i], e); // Individual transaction failure doesn't stop the worker continue; @@ -1098,6 +1158,16 @@ where } } + // Update balance threshold once if any build failures were due to balance issues + if balance_threshold_update_needed { + if let Err(e) = self.update_balance_threshold(scoped, chain).await { + tracing::error!( + "Failed to update balance threshold after parallel build failures: {}", + e + ); + } + } + if prepared_txs.is_empty() { return Ok(0); } @@ -1197,7 +1267,7 @@ where "New transaction failed, recycled nonce" ); - if should_update_balance_threshold(&e) { + if should_update_balance_threshold(&e.to_engine_error(chain)) { if let Err(e) = self.update_balance_threshold(scoped, chain).await { @@ -1370,11 +1440,52 @@ where // Get the latest attempt to extract gas values from // Build typed transaction -> manually bump -> sign - let typed_tx = self + let typed_tx = match self .build_typed_transaction(&tx_data, expected_nonce, chain) - .await?; + .await + { + Ok(tx) => tx, + Err(e) => { + // Check if this is a balance threshold issue during simulation + if let EoaExecutorWorkerError::TransactionSimulationFailed { + inner_error, .. + } = &e + { + if should_update_balance_threshold(inner_error) { + if let Err(e) = self.update_balance_threshold(scoped, chain).await { + tracing::error!("Failed to update balance threshold: {}", e); + } + } + } else if let EoaExecutorWorkerError::RpcError { inner_error, .. } = &e { + if should_update_balance_threshold(inner_error) { + if let Err(e) = self.update_balance_threshold(scoped, chain).await { + tracing::error!("Failed to update balance threshold: {}", e); + } + } + } + + tracing::warn!( + transaction_id = %transaction_id, + nonce = expected_nonce, + error = %e, + "Failed to build typed transaction for gas bump" + ); + return Ok(false); + } + }; let bumped_typed_tx = self.apply_gas_bump_to_typed_transaction(typed_tx, 120); // 20% increase - let bumped_tx = self.sign_transaction(bumped_typed_tx, &tx_data).await?; + let bumped_tx = match self.sign_transaction(bumped_typed_tx, &tx_data).await { + Ok(tx) => tx, + Err(e) => { + tracing::warn!( + transaction_id = %transaction_id, + nonce = expected_nonce, + error = %e, + "Failed to sign transaction for gas bump" + ); + return Ok(false); + } + }; // Record the gas bump attempt scoped @@ -1455,12 +1566,15 @@ where } } + #[tracing::instrument(skip_all, fields(eoa = %scoped.eoa(), chain_id = %chain.chain_id()))] async fn update_balance_threshold( &self, scoped: &ScopedEoaExecutorStore<'_>, chain: &impl Chain, ) -> Result<(), EoaExecutorWorkerError> { let mut health = self.get_eoa_health(scoped, chain).await?; + + tracing::info!("Updating balance threshold"); let balance_threshold = chain .provider() .get_balance(scoped.eoa()) @@ -1650,7 +1764,7 @@ where tx_request = if let Some(type_data) = &tx_data.user_request.transaction_type_data { // User provided gas settings - respect them first match type_data { - crate::eoa::store::EoaTransactionTypeData::Eip1559(data) => { + EoaTransactionTypeData::Eip1559(data) => { let mut req = tx_request; if let Some(max_fee) = data.max_fee_per_gas { req = req.with_max_fee_per_gas(max_fee); @@ -1666,7 +1780,7 @@ where req } - crate::eoa::store::EoaTransactionTypeData::Legacy(data) => { + EoaTransactionTypeData::Legacy(data) => { if let Some(gas_price) = data.gas_price { tx_request.with_gas_price(gas_price) } else { @@ -1674,7 +1788,7 @@ where self.estimate_gas_fees(chain, tx_request).await? } } - crate::eoa::store::EoaTransactionTypeData::Eip7702(data) => { + EoaTransactionTypeData::Eip7702(data) => { let mut req = tx_request; if let Some(authorization_list) = &data.authorization_list { req = req.with_authorization_list(authorization_list.clone()); diff --git a/server/configuration/server_base.yaml b/server/configuration/server_base.yaml index e236348..43f4f62 100644 --- a/server/configuration/server_base.yaml +++ b/server/configuration/server_base.yaml @@ -20,6 +20,7 @@ queue: webhook_workers: 100 external_bundler_send_workers: 100 userop_confirm_workers: 100 + eoa_executor_workers: 100 local_concurrency: 100 polling_interval_ms: 100 lease_duration_seconds: 600 diff --git a/server/src/config.rs b/server/src/config.rs index e9bd8ad..d248764 100644 --- a/server/src/config.rs +++ b/server/src/config.rs @@ -17,6 +17,7 @@ pub struct QueueConfig { pub external_bundler_send_workers: usize, pub userop_confirm_workers: usize, + pub eoa_executor_workers: usize, pub execution_namespace: Option, diff --git a/server/src/execution_router/mod.rs b/server/src/execution_router/mod.rs index b4bb9e2..909cde6 100644 --- a/server/src/execution_router/mod.rs +++ b/server/src/execution_router/mod.rs @@ -8,11 +8,12 @@ use engine_core::{ error::EngineError, execution_options::{ BaseExecutionOptions, QueuedTransaction, SendTransactionRequest, SpecificExecutionOptions, - WebhookOptions, aa::Erc4337ExecutionOptions, + WebhookOptions, aa::Erc4337ExecutionOptions, eoa::EoaExecutionOptions, }, transaction::InnerTransaction, }; use engine_executors::{ + eoa::{EoaExecutorStore, EoaExecutorWorker, EoaExecutorWorkerJobData, EoaTransactionRequest}, external_bundler::{ confirm::UserOpConfirmationHandler, send::{ExternalBundlerSendHandler, ExternalBundlerSendJobData}, @@ -34,6 +35,8 @@ pub struct ExecutionRouter { pub webhook_queue: Arc>, pub external_bundler_send_queue: Arc>>, pub userop_confirm_queue: Arc>>, + pub eoa_executor_queue: Arc>>, + pub eoa_executor_store: Arc, pub transaction_registry: Arc, pub vault_client: Arc, pub chains: Arc, @@ -222,6 +225,31 @@ impl ExecutionRouter { SpecificExecutionOptions::Auto(_auto_execution_options) => { todo!() } + + SpecificExecutionOptions::EOA(ref eoa_execution_options) => { + self.execute_eoa( + &execution_request.execution_options.base, + eoa_execution_options, + &execution_request.webhook_options, + &execution_request.params, + rpc_credentials, + signing_credential, + ) + .await?; + + let queued_transaction = QueuedTransaction { + id: execution_request + .execution_options + .base + .idempotency_key + .clone(), + batch_index: 0, + execution_params: execution_request.execution_options, + transaction_params: execution_request.params, + }; + + Ok(vec![queued_transaction]) + } } } @@ -273,4 +301,84 @@ impl ExecutionRouter { Ok(()) } + + async fn execute_eoa( + &self, + base_execution_options: &BaseExecutionOptions, + eoa_execution_options: &EoaExecutionOptions, + webhook_options: &Option>, + transactions: &[InnerTransaction], + rpc_credentials: RpcCredentials, + signing_credential: SigningCredential, + ) -> Result<(), TwmqError> { + if transactions.len() != 1 { + return Err(TwmqError::Runtime { + message: "EOA execution currently supports only single transactions".to_string(), + }); + } + + let transaction = &transactions[0]; + let eoa_transaction_request = EoaTransactionRequest { + transaction_id: base_execution_options.idempotency_key.clone(), + chain_id: base_execution_options.chain_id, + from: eoa_execution_options.from, + to: transaction.to, + value: transaction.value, + data: transaction.data.clone(), + gas_limit: eoa_execution_options.gas_limit, + webhook_options: webhook_options.clone(), + signing_credential, + rpc_credentials, + transaction_type_data: eoa_execution_options.transaction_type_data.clone(), + }; + + // Add transaction to the store + self.eoa_executor_store + .add_transaction(eoa_transaction_request) + .await + .map_err(|e| TwmqError::Runtime { + message: format!("Failed to add transaction to EOA store: {}", e), + })?; + + // Register transaction in registry + self.transaction_registry + .set_transaction_queue(&base_execution_options.idempotency_key, "eoa_executor") + .await + .map_err(|e| TwmqError::Runtime { + message: format!("Failed to register transaction: {}", e), + })?; + + // Ensure an idempotent job exists for this EOA:chain combination + let eoa_job_data = EoaExecutorWorkerJobData { + eoa_address: eoa_execution_options.from, + chain_id: base_execution_options.chain_id, + worker_id: format!( + "eoa_{}_{}", + eoa_execution_options.from, base_execution_options.chain_id + ), + }; + + // Create idempotent job for this EOA:chain - only one will exist + let job_id = format!( + "eoa_{}_{}", + eoa_execution_options.from, base_execution_options.chain_id + ); + + self.eoa_executor_queue + .clone() + .job(eoa_job_data) + .with_id(&job_id) + .push() + .await?; + + tracing::debug!( + transaction_id = %base_execution_options.idempotency_key, + eoa = %eoa_execution_options.from, + chain_id = %base_execution_options.chain_id, + queue = "eoa_executor", + "EOA transaction added to store and worker job ensured" + ); + + Ok(()) + } } diff --git a/server/src/main.rs b/server/src/main.rs index 592c280..4b95638 100644 --- a/server/src/main.rs +++ b/server/src/main.rs @@ -52,8 +52,14 @@ async fn main() -> anyhow::Result<()> { }); let eoa_signer = Arc::new(EoaSigner::new(vault_client.clone(), iaw_client)); - let queue_manager = - QueueManager::new(&config.redis, &config.queue, chains.clone(), signer.clone()).await?; + let queue_manager = QueueManager::new( + &config.redis, + &config.queue, + chains.clone(), + signer.clone(), + eoa_signer.clone(), + ) + .await?; tracing::info!("Queue manager initialized"); @@ -71,6 +77,8 @@ async fn main() -> anyhow::Result<()> { webhook_queue: queue_manager.webhook_queue.clone(), external_bundler_send_queue: queue_manager.external_bundler_send_queue.clone(), userop_confirm_queue: queue_manager.userop_confirm_queue.clone(), + eoa_executor_queue: queue_manager.eoa_executor_queue.clone(), + eoa_executor_store: queue_manager.eoa_executor_store.clone(), transaction_registry: queue_manager.transaction_registry.clone(), vault_client: Arc::new(vault_client.clone()), chains: chains.clone(), diff --git a/server/src/queue/manager.rs b/server/src/queue/manager.rs index ceabcf2..16dba88 100644 --- a/server/src/queue/manager.rs +++ b/server/src/queue/manager.rs @@ -4,6 +4,7 @@ use std::{sync::Arc, time::Duration}; use alloy::transports::http::reqwest; use engine_core::error::EngineError; use engine_executors::{ + eoa::{EoaExecutorStore, EoaExecutorWorker}, external_bundler::{ confirm::UserOpConfirmationHandler, deployment::{RedisDeploymentCache, RedisDeploymentLock}, @@ -23,6 +24,8 @@ pub struct QueueManager { pub webhook_queue: Arc>, pub external_bundler_send_queue: Arc>>, pub userop_confirm_queue: Arc>>, + pub eoa_executor_queue: Arc>>, + pub eoa_executor_store: Arc, pub transaction_registry: Arc, } @@ -36,6 +39,7 @@ fn get_queue_name_for_namespace(namespace: &Option, name: &str) -> Strin const EXTERNAL_BUNDLER_SEND_QUEUE_NAME: &str = "external_bundler_send"; const USEROP_CONFIRM_QUEUE_NAME: &str = "userop_confirm"; const WEBHOOK_QUEUE_NAME: &str = "webhook"; +const EOA_EXECUTOR_QUEUE_NAME: &str = "eoa_executor"; impl QueueManager { pub async fn new( @@ -43,6 +47,7 @@ impl QueueManager { queue_config: &QueueConfig, chain_service: Arc, userop_signer: Arc, + eoa_signer: Arc, ) -> Result { // Create Redis clients let redis_client = twmq::redis::Client::open(redis_config.url.as_str())?; @@ -53,6 +58,12 @@ impl QueueManager { queue_config.execution_namespace.clone(), )); + // Create EOA executor store + let eoa_executor_store = Arc::new(EoaExecutorStore::new( + redis_client.get_connection_manager().await?, + queue_config.execution_namespace.clone(), + )); + // Create deployment cache and lock let deployment_cache = RedisDeploymentCache::new(redis_client.clone()).await?; let deployment_lock = RedisDeploymentLock::new(redis_client.clone()).await?; @@ -62,6 +73,7 @@ impl QueueManager { local_concurrency: queue_config.local_concurrency, polling_interval: Duration::from_millis(queue_config.polling_interval_ms), lease_duration: Duration::from_secs(queue_config.lease_duration_seconds), + idempotency_mode: twmq::IdempotencyMode::Permanent, always_poll: false, max_success: 1000, max_failed: 1000, @@ -77,6 +89,10 @@ impl QueueManager { let mut webhook_queue_opts = base_queue_opts.clone(); webhook_queue_opts.local_concurrency = queue_config.webhook_workers; + let mut eoa_executor_queue_opts = base_queue_opts.clone(); + eoa_executor_queue_opts.idempotency_mode = twmq::IdempotencyMode::Active; + eoa_executor_queue_opts.local_concurrency = queue_config.eoa_executor_workers; + // Create webhook queue let webhook_handler = WebhookJobHandler { http_client: reqwest::Client::new(), @@ -96,6 +112,11 @@ impl QueueManager { USEROP_CONFIRM_QUEUE_NAME, ); + let eoa_executor_queue_name = get_queue_name_for_namespace( + &queue_config.execution_namespace, + EOA_EXECUTOR_QUEUE_NAME, + ); + let webhook_queue = Queue::builder() .name(webhook_queue_name) .options(webhook_queue_opts) @@ -142,10 +163,30 @@ impl QueueManager { .await? .arc(); + // Create EOA executor queue + let eoa_executor_handler = EoaExecutorWorker { + chain_service: chain_service.clone(), + store: eoa_executor_store.clone(), + eoa_signer, + max_inflight: 100, + max_recycled_nonces: 50, + }; + + let eoa_executor_queue = Queue::builder() + .name(eoa_executor_queue_name) + .options(eoa_executor_queue_opts) + .handler(eoa_executor_handler) + .redis_client(redis_client.clone()) + .build() + .await? + .arc(); + Ok(Self { webhook_queue, external_bundler_send_queue, userop_confirm_queue, + eoa_executor_queue, + eoa_executor_store, transaction_registry, }) } @@ -166,16 +207,22 @@ impl QueueManager { tracing::info!("Starting external bundler confirmation worker"); let userop_confirm_worker = self.userop_confirm_queue.work(); + // Start EOA executor workers + tracing::info!("Starting EOA executor worker"); + let eoa_executor_worker = self.eoa_executor_queue.work(); + tracing::info!( - "Started {} webhook workers, {} send workers, {} confirm workers", + "Started {} webhook workers, {} send workers, {} confirm workers, {} eoa workers", queue_config.webhook_workers, queue_config.external_bundler_send_workers, - queue_config.userop_confirm_workers + queue_config.userop_confirm_workers, + queue_config.eoa_executor_workers ); ShutdownHandle::with_worker(webhook_worker) .and_worker(external_bundler_send_worker) .and_worker(userop_confirm_worker) + .and_worker(eoa_executor_worker) } /// Get queue statistics for monitoring @@ -221,10 +268,19 @@ impl QueueManager { failed: self.userop_confirm_queue.count(JobStatus::Failed).await?, }; + let eoa_executor_stats = QueueStatistics { + pending: self.eoa_executor_queue.count(JobStatus::Pending).await?, + active: self.eoa_executor_queue.count(JobStatus::Active).await?, + delayed: self.eoa_executor_queue.count(JobStatus::Delayed).await?, + success: self.eoa_executor_queue.count(JobStatus::Success).await?, + failed: self.eoa_executor_queue.count(JobStatus::Failed).await?, + }; + Ok(QueueStats { webhook: webhook_stats, external_bundler_send: send_stats, userop_confirm: confirm_stats, + eoa_executor: eoa_executor_stats, }) } } @@ -234,6 +290,7 @@ pub struct QueueStats { pub webhook: QueueStatistics, pub external_bundler_send: QueueStatistics, pub userop_confirm: QueueStatistics, + pub eoa_executor: QueueStatistics, } #[derive(Debug, serde::Serialize)] diff --git a/twmq/benches/throughput.rs b/twmq/benches/throughput.rs index f666634..4056fa0 100644 --- a/twmq/benches/throughput.rs +++ b/twmq/benches/throughput.rs @@ -10,7 +10,7 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::runtime::Runtime; use twmq::error::TwmqError; use twmq::job::JobError; -use twmq::{BorrowedJob, UserCancellable}; +use twmq::{BorrowedJob, IdempotencyMode, UserCancellable}; use twmq::{ DurableExecution, Queue, @@ -174,6 +174,7 @@ async fn load_test_throughput( // Optimize queue for high throughput let queue_options = QueueOptions { + idempotency_mode: IdempotencyMode::Active, local_concurrency: 200, // High concurrency polling_interval: Duration::from_millis(10), // Fast polling always_poll: true, // Always poll for max responsiveness diff --git a/twmq/src/lib.rs b/twmq/src/lib.rs index f9039c4..be6733c 100644 --- a/twmq/src/lib.rs +++ b/twmq/src/lib.rs @@ -24,6 +24,7 @@ use shutdown::WorkerHandle; use tokio::sync::Semaphore; use tokio::time::sleep; +pub use queue::IdempotencyMode; pub use redis; use tracing::Instrument; @@ -914,6 +915,11 @@ impl Queue { let result_json = serde_json::to_string(result)?; pipeline.hset(self.job_result_hash_name(), &job.job.id, result_json); + // For "active" idempotency mode, remove from deduplication set immediately + if self.options.idempotency_mode == queue::IdempotencyMode::Active { + pipeline.srem(self.dedupe_set_name(), &job.job.id); + } + Ok(()) } @@ -1062,6 +1068,11 @@ impl Queue { let error_json = serde_json::to_string(&error_record)?; pipeline.lpush(self.job_errors_list_name(&job.job.id), error_json); + // For "active" idempotency mode, remove from deduplication set immediately + if self.options.idempotency_mode == queue::IdempotencyMode::Active { + pipeline.srem(self.dedupe_set_name(), &job.job.id); + } + Ok(()) } @@ -1249,6 +1260,11 @@ impl Queue { let error_json = serde_json::to_string(&error_record)?; hook_pipeline.lpush(self.job_errors_list_name(&job.id), error_json); + // For "active" idempotency mode, remove from deduplication set immediately + if self.options.idempotency_mode == queue::IdempotencyMode::Active { + hook_pipeline.srem(self.dedupe_set_name(), &job.id); + } + // 2. Use pipeline in unlimited retry loop with lease check loop { let mut conn = self.redis.clone(); @@ -1290,4 +1306,20 @@ impl Queue { } } } + + pub async fn remove_from_dedupe_set(&self, job_id: &str) -> Result<(), TwmqError> { + self.redis + .clone() + .srem::<&str, &str, ()>(&self.dedupe_set_name(), job_id) + .await?; + Ok(()) + } + + pub async fn empty_dedupe_set(&self) -> Result<(), TwmqError> { + self.redis + .clone() + .del::<&str, ()>(&self.dedupe_set_name()) + .await?; + Ok(()) + } } diff --git a/twmq/src/multilane.rs b/twmq/src/multilane.rs index 497cb37..bb9b359 100644 --- a/twmq/src/multilane.rs +++ b/twmq/src/multilane.rs @@ -937,6 +937,11 @@ impl MultilaneQueue { let result_json = serde_json::to_string(result)?; pipeline.hset(self.job_result_hash_name(), &job.job.id, result_json); + // For "active" idempotency mode, remove from deduplication set immediately + if self.options.idempotency_mode == crate::queue::IdempotencyMode::Active { + pipeline.srem(self.dedupe_set_name(), &job.job.id); + } + Ok(()) } @@ -1055,6 +1060,11 @@ impl MultilaneQueue { let error_json = serde_json::to_string(&error_record)?; pipeline.lpush(self.job_errors_list_name(&job.job.id), error_json); + // For "active" idempotency mode, remove from deduplication set immediately + if self.options.idempotency_mode == crate::queue::IdempotencyMode::Active { + pipeline.srem(self.dedupe_set_name(), &job.job.id); + } + Ok(()) } @@ -1286,6 +1296,11 @@ impl MultilaneQueue { let error_json = serde_json::to_string(&error_record)?; hook_pipeline.lpush(self.job_errors_list_name(&job.id), error_json); + // For "active" idempotency mode, remove from deduplication set immediately + if self.options.idempotency_mode == crate::queue::IdempotencyMode::Active { + hook_pipeline.srem(self.dedupe_set_name(), &job.id); + } + // Execute with lease protection loop { let mut conn = self.redis.clone(); diff --git a/twmq/src/queue.rs b/twmq/src/queue.rs index 0418576..819a3d6 100644 --- a/twmq/src/queue.rs +++ b/twmq/src/queue.rs @@ -1,9 +1,27 @@ use std::{marker::PhantomData, sync::Arc, time::Duration}; use redis::{Client, aio::ConnectionManager}; +use serde::{Serialize, Deserialize}; use crate::{DurableExecution, Queue, error::TwmqError}; +/// Defines how job idempotency is handled +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +pub enum IdempotencyMode { + /// Jobs stay in the deduplication set until pruned (existing behavior) + /// This prevents duplicate jobs from being added even after completion + Permanent, + /// Jobs are removed from the deduplication set immediately upon completion + /// This only prevents duplicates of pending/delayed/active jobs + Active, +} + +impl Default for IdempotencyMode { + fn default() -> Self { + Self::Permanent + } +} + #[derive(Clone, Debug)] pub struct QueueOptions { pub max_success: usize, @@ -18,6 +36,9 @@ pub struct QueueOptions { /// If you have a horiztonally scaled deployment, this can be set to the default of false /// But if there's only one node, you can set this to true to avoid the local concurrency from blocking queue housekeeping pub always_poll: bool, + + /// Controls how job idempotency is handled + pub idempotency_mode: IdempotencyMode, } impl Default for QueueOptions { @@ -29,6 +50,7 @@ impl Default for QueueOptions { polling_interval: Duration::from_millis(100), lease_duration: Duration::from_secs(30), always_poll: false, + idempotency_mode: IdempotencyMode::default(), } } } diff --git a/twmq/tests/idempotency_modes.rs b/twmq/tests/idempotency_modes.rs new file mode 100644 index 0000000..663920b --- /dev/null +++ b/twmq/tests/idempotency_modes.rs @@ -0,0 +1,255 @@ +use serde::{Deserialize, Serialize}; +use std::sync::{ + Arc, + atomic::{AtomicUsize, Ordering}, +}; +use std::time::Duration; +use twmq::{ + DurableExecution, Queue, + job::{BorrowedJob, JobResult, JobStatus}, + queue::{IdempotencyMode, QueueOptions}, + redis::aio::ConnectionManager, +}; + +const REDIS_URL: &str = "redis://127.0.0.1:6379/"; + +#[derive(Serialize, Deserialize, Debug, Clone)] +struct TestJobData { + message: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +struct TestJobOutput { + processed: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +struct TestJobError { + error_message: String, +} + +impl From for TestJobError { + fn from(err: twmq::error::TwmqError) -> Self { + TestJobError { + error_message: err.to_string(), + } + } +} + +impl twmq::UserCancellable for TestJobError { + fn user_cancelled() -> Self { + TestJobError { + error_message: "User cancelled".to_string(), + } + } +} + +struct TestJobHandler { + processed_count: Arc, +} + +impl DurableExecution for TestJobHandler { + type Output = TestJobOutput; + type ErrorData = TestJobError; + type JobData = TestJobData; + + async fn process( + &self, + job: &BorrowedJob, + ) -> JobResult { + self.processed_count.fetch_add(1, Ordering::SeqCst); + + Ok(TestJobOutput { + processed: format!("Processed: {}", job.data().message), + }) + } +} + +// Helper to clean up Redis keys +async fn cleanup_redis_keys(conn_manager: &ConnectionManager, queue_name: &str) { + let mut conn = conn_manager.clone(); + let keys_pattern = format!("twmq:{}:*", queue_name); + + let keys: Vec = redis::cmd("KEYS") + .arg(&keys_pattern) + .query_async(&mut conn) + .await + .unwrap_or_default(); + if !keys.is_empty() { + redis::cmd("DEL") + .arg(keys) + .query_async::<()>(&mut conn) + .await + .unwrap_or_default(); + } +} + +#[tokio::test] +async fn test_permanent_idempotency_mode() { + let queue_name = format!("test_perm_{}", nanoid::nanoid!(6)); + let processed_count = Arc::new(AtomicUsize::new(0)); + + let mut queue_options = QueueOptions::default(); + queue_options.idempotency_mode = IdempotencyMode::Permanent; + queue_options.local_concurrency = 1; + + let handler = TestJobHandler { + processed_count: processed_count.clone(), + }; + + let queue = Arc::new( + Queue::new(REDIS_URL, &queue_name, Some(queue_options), handler) + .await + .expect("Failed to create queue"), + ); + + cleanup_redis_keys(&queue.redis, &queue_name).await; + + let job_data = TestJobData { + message: "test message".to_string(), + }; + + // Push the same job twice with the same ID + let job_id = "test_job_permanent"; + + let _job1 = queue + .clone() + .job(job_data.clone()) + .with_id(job_id) + .push() + .await + .unwrap(); + let _job2 = queue + .clone() + .job(job_data.clone()) + .with_id(job_id) + .push() + .await + .unwrap(); + + // Only one job should be in pending (deduplication should prevent the second) + let pending_count = queue.count(JobStatus::Pending).await.unwrap(); + assert_eq!( + pending_count, 1, + "Only one job should be pending due to deduplication" + ); + + // Start worker and let it process + let worker = queue.work(); + + // Wait for processing + tokio::time::sleep(Duration::from_millis(500)).await; + + // Should have processed exactly one job + assert_eq!( + processed_count.load(Ordering::SeqCst), + 1, + "Should have processed exactly one job" + ); + + // Try to add the same job again - should still be blocked by permanent idempotency + let _job3 = queue + .clone() + .job(job_data.clone()) + .with_id(job_id) + .push() + .await + .unwrap(); + + tokio::time::sleep(Duration::from_millis(500)).await; + + // Should still be only one processed job + assert_eq!( + processed_count.load(Ordering::SeqCst), + 1, + "Should still have processed only one job" + ); + + worker.shutdown().await.unwrap(); + cleanup_redis_keys(&queue.redis, &queue_name).await; +} + +#[tokio::test] +async fn test_active_idempotency_mode() { + let queue_name = format!("test_active_{}", nanoid::nanoid!(6)); + let processed_count = Arc::new(AtomicUsize::new(0)); + + let mut queue_options = QueueOptions::default(); + queue_options.idempotency_mode = IdempotencyMode::Active; + queue_options.local_concurrency = 1; + + let handler = TestJobHandler { + processed_count: processed_count.clone(), + }; + + let queue = Arc::new( + Queue::new(REDIS_URL, &queue_name, Some(queue_options), handler) + .await + .expect("Failed to create queue"), + ); + + cleanup_redis_keys(&queue.redis, &queue_name).await; + + let job_data = TestJobData { + message: "test message".to_string(), + }; + + // Push the same job twice with the same ID + let job_id = "test_job_active"; + + let _job1 = queue + .clone() + .job(job_data.clone()) + .with_id(job_id) + .push() + .await + .unwrap(); + let _job2 = queue + .clone() + .job(job_data.clone()) + .with_id(job_id) + .push() + .await + .unwrap(); + + // Only one job should be in pending (deduplication should prevent the second) + let pending_count = queue.count(JobStatus::Pending).await.unwrap(); + assert_eq!( + pending_count, 1, + "Only one job should be pending due to deduplication" + ); + + // Start worker and let it process + let worker = queue.work(); + + // Wait for processing + tokio::time::sleep(Duration::from_millis(500)).await; + + // Should have processed exactly one job + assert_eq!( + processed_count.load(Ordering::SeqCst), + 1, + "Should have processed exactly one job" + ); + + // Try to add the same job again - should be allowed with active idempotency + let _job3 = queue + .clone() + .job(job_data.clone()) + .with_id(job_id) + .push() + .await + .unwrap(); + + tokio::time::sleep(Duration::from_millis(500)).await; + + // Should have processed two jobs now + assert_eq!( + processed_count.load(Ordering::SeqCst), + 2, + "Should have processed two jobs with active idempotency" + ); + + worker.shutdown().await.unwrap(); + cleanup_redis_keys(&queue.redis, &queue_name).await; +} diff --git a/twmq/tests/lease_expiry.rs b/twmq/tests/lease_expiry.rs index b2ece8a..4c11378 100644 --- a/twmq/tests/lease_expiry.rs +++ b/twmq/tests/lease_expiry.rs @@ -8,7 +8,7 @@ use serde::{Deserialize, Serialize}; use tracing_subscriber::{EnvFilter, layer::SubscriberExt, util::SubscriberInitExt}; use twmq::{ - DurableExecution, FailHookData, NackHookData, Queue, SuccessHookData, + DurableExecution, FailHookData, IdempotencyMode, NackHookData, Queue, SuccessHookData, hooks::TransactionContext, job::{BorrowedJob, JobResult, JobStatus}, queue::QueueOptions, @@ -65,7 +65,10 @@ impl DurableExecution for SleepForeverHandler { type ErrorData = TestJobErrorData; type JobData = SleepForeverJobData; - async fn process(&self, job: &BorrowedJob) -> JobResult { + async fn process( + &self, + job: &BorrowedJob, + ) -> JobResult { tracing::info!( "SLEEP_JOB: Starting to process job {}, attempt {}", job.job.id, @@ -153,6 +156,7 @@ async fn test_job_lease_expiry() { polling_interval: Duration::from_millis(100), local_concurrency: 1, always_poll: true, + idempotency_mode: IdempotencyMode::Active, }; let handler = SleepForeverHandler { @@ -301,6 +305,7 @@ async fn test_multiple_job_lease_expiry() { lease_duration, polling_interval: Duration::from_millis(100), always_poll: true, + idempotency_mode: IdempotencyMode::Active, }; let queue = Arc::new(