diff --git a/.github/workflows/ci-twmq.yaml b/.github/workflows/ci-twmq.yaml index 8aec294..cb8eab5 100644 --- a/.github/workflows/ci-twmq.yaml +++ b/.github/workflows/ci-twmq.yaml @@ -38,7 +38,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #checkout@v4 - name: Install CI dependencies - uses: taiki-e/install-action@ab3728c7ba6948b9b429627f4d55a68842b27f18 + uses: taiki-e/install-action@9185c192a96ba09167ad8663015b3fbbf007ec79 #@2.56.2 with: tool: cargo-nextest diff --git a/.github/workflows/coverage-twmq.yaml b/.github/workflows/coverage-twmq.yaml index 3f153fd..8c20654 100644 --- a/.github/workflows/coverage-twmq.yaml +++ b/.github/workflows/coverage-twmq.yaml @@ -38,7 +38,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #checkout@v4 - name: Install CI dependencies - uses: taiki-e/install-action@ab3728c7ba6948b9b429627f4d55a68842b27f18 + uses: taiki-e/install-action@9185c192a96ba09167ad8663015b3fbbf007ec79 #@2.56.2 with: tool: cargo-tarpaulin diff --git a/Cargo.lock b/Cargo.lock index 22d11cd..68ec643 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "adler2" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "aead" @@ -47,7 +47,7 @@ dependencies = [ "bytes", "cfg-if", "http 1.3.1", - "indexmap 2.9.0", + "indexmap 2.10.0", "schemars 0.8.22", "serde", "serde_json", @@ -66,7 +66,7 @@ checksum = "be8e0d4af7cc08353807aaf80722125a229bf2d67be7fe0b89163c648db3d223" dependencies = [ "darling", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -77,41 +77,25 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy" -version = "0.15.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a0be470ab41e3aaed6c54dbb2b6224d3048de467d8009cf9d5d32a8b8957ef7" -dependencies = [ - "alloy-consensus 0.15.8", - "alloy-core", - "alloy-eips 0.15.8", - "alloy-serde 0.15.8", - "alloy-signer 0.15.8", - "alloy-signer-aws 0.15.8", - "alloy-signer-gcp 0.15.8", - "alloy-signer-ledger 0.15.8", -] - -[[package]] -name = "alloy" -version = "1.0.9" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0093d23bf026b580c1f66ed3a053d8209c104a446c5264d3ad99587f6edef24e" +checksum = "4e0d1aecf3cab3d0e7383064ce488616434b4ade10d8904dff422e74203c712f" dependencies = [ - "alloy-consensus 1.0.8", + "alloy-consensus", "alloy-contract", "alloy-core", - "alloy-eips 1.0.8", + "alloy-eips", "alloy-genesis", - "alloy-json-rpc 1.0.8", - "alloy-network 1.0.8", + "alloy-json-rpc", + "alloy-network", "alloy-provider", "alloy-rpc-client", "alloy-rpc-types", - "alloy-serde 1.0.8", - "alloy-signer 1.0.8", - "alloy-signer-aws 1.0.9", - "alloy-signer-gcp 1.0.9", - "alloy-signer-ledger 1.0.9", + "alloy-serde", + "alloy-signer", + "alloy-signer-aws", + "alloy-signer-gcp", + "alloy-signer-ledger", "alloy-signer-local", "alloy-transport", "alloy-transport-http", @@ -119,9 +103,9 @@ dependencies = [ [[package]] name = "alloy-chains" -version = "0.2.0" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7734aecfc58a597dde036e4c5cace2ae43e2f8bf3d406b022a1ef34da178dd49" +checksum = "5674914c2cfdb866c21cb0c09d82374ee39a1395cf512e7515f4c014083b3fff" dependencies = [ "alloy-primitives", "num_enum", @@ -130,39 +114,16 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.15.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c103f18381c4f17b5691cbea7baa3bafa7da3bf9c9b7d90f94f48715c6cc054" -dependencies = [ - "alloy-eips 0.15.8", - "alloy-primitives", - "alloy-rlp", - "alloy-serde 0.15.8", - "alloy-trie", - "auto_impl", - "c-kzg", - "derive_more", - "either", - "k256", - "once_cell", - "rand 0.8.5", - "secp256k1", - "serde", - "serde_with", - "thiserror 2.0.12", -] - -[[package]] -name = "alloy-consensus" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78090ff96d0d1b648dbcebc63b5305296b76ad4b5d4810f755d7d1224ced6247" +checksum = "e9c6ad411efe0f49e0e99b9c7d8749a1eb55f6dbf74a1bc6953ab285b02c4f67" dependencies = [ - "alloy-eips 1.0.8", + "alloy-eips", "alloy-primitives", "alloy-rlp", - "alloy-serde 1.0.8", + "alloy-serde", "alloy-trie", + "alloy-tx-macros", "auto_impl", "c-kzg", "derive_more", @@ -178,58 +139,45 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "0.15.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c13988a8d23dfd00a49dc6702bc704000d34853951f23b9c125a342ee537443" -dependencies = [ - "alloy-consensus 0.15.8", - "alloy-eips 0.15.8", - "alloy-primitives", - "alloy-rlp", - "alloy-serde 0.15.8", - "serde", -] - -[[package]] -name = "alloy-consensus-any" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcdfc3b2f202e3c6284685e6d3dcfbb532b39552d9e1021276e68e2389037616" +checksum = "0bf397edad57b696501702d5887e4e14d7d0bbae9fbb6439e148d361f7254f45" dependencies = [ - "alloy-consensus 1.0.8", - "alloy-eips 1.0.8", + "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", - "alloy-serde 1.0.8", + "alloy-serde", "serde", ] [[package]] name = "alloy-contract" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f4a5b6c7829e8aa048f5b23defa21706b675e68e612cf88d9f509771fecc806" +checksum = "977b97d271159578afcb26e39e1ca5ce1a7f937697793d7d571b0166dd8b8225" dependencies = [ - "alloy-consensus 1.0.8", + "alloy-consensus", "alloy-dyn-abi", "alloy-json-abi", - "alloy-network 1.0.8", - "alloy-network-primitives 1.0.8", + "alloy-network", + "alloy-network-primitives", "alloy-primitives", "alloy-provider", - "alloy-rpc-types-eth 1.0.8", + "alloy-rpc-types-eth", "alloy-sol-types", "alloy-transport", "futures", "futures-util", + "serde_json", "thiserror 2.0.12", ] [[package]] name = "alloy-core" -version = "1.0.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d020a85ae8cf79b9c897a86d617357817bbc9a7d159dd7e6fedf1bc90f64d35" +checksum = "ad31216895d27d307369daa1393f5850b50bbbd372478a9fa951c095c210627e" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -240,15 +188,14 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "1.0.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884a5d4560f7e5e34ec3c5e54a60223c56352677dd049b495fbb59384cf72a90" +checksum = "7b95b3deca680efc7e9cba781f1a1db352fa1ea50e6384a514944dcf4419e652" dependencies = [ "alloy-json-abi", "alloy-primitives", "alloy-sol-type-parser", "alloy-sol-types", - "const-hex", "derive_more", "itoa", "serde", @@ -294,36 +241,16 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.15.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86eba345a4e6ff5684cb98c0aedc69eca4db77cbca3a456e7930ab7d086febdd" +checksum = "749b8449e4daf7359bdf1dabdba6ce424ff8b1bdc23bdb795661b2e991a08d87" dependencies = [ "alloy-eip2124", "alloy-eip2930", "alloy-eip7702", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.15.8", - "auto_impl", - "c-kzg", - "derive_more", - "either", - "serde", - "sha2", -] - -[[package]] -name = "alloy-eips" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fb7646210355c36b07886c91cac52e4727191e2b0ee1415cce8f953f6019dd2" -dependencies = [ - "alloy-eip2124", - "alloy-eip2930", - "alloy-eip7702", - "alloy-primitives", - "alloy-rlp", - "alloy-serde 1.0.8", + "alloy-serde", "auto_impl", "c-kzg", "derive_more", @@ -334,22 +261,23 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b14b506d7a4f739dd57ad5026d65eb64d842f4e971f71da5e9be5067ecbdc9" +checksum = "5fcbae2107f3f2df2b02bb7d9e81e8aa730ae371ca9dd7fd0c81c3d0cb78a452" dependencies = [ - "alloy-eips 1.0.8", + "alloy-eips", "alloy-primitives", - "alloy-serde 1.0.8", + "alloy-serde", "alloy-trie", "serde", + "serde_with", ] [[package]] name = "alloy-json-abi" -version = "1.0.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5189fa9a8797e92396bc4b4454c5f2073a4945f7c2b366af9af60f9536558f7a" +checksum = "15516116086325c157c18261d768a20677f0f699348000ed391d4ad0dcb82530" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -359,26 +287,13 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.15.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b3429d59feb5c72e1e5f92efd6f67def0f6a8de5cb610aea56c35eff1cf60d" -dependencies = [ - "alloy-primitives", - "alloy-sol-types", - "serde", - "serde_json", - "thiserror 2.0.12", - "tracing", -] - -[[package]] -name = "alloy-json-rpc" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a7ed339a633ba1a2af3eb9847dc90936d1b3c380a223cfca7a45be1713d8ab0" +checksum = "bc30b0e20fcd0843834ecad2a716661c7b9d5aca2486f8e96b93d5246eb83e06" dependencies = [ "alloy-primitives", "alloy-sol-types", + "http 1.3.1", "serde", "serde_json", "thiserror 2.0.12", @@ -387,46 +302,20 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.15.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb5acf3ae77e8e9775673d9f81f96c1f5596824c35b4f158c5f8eb30e4d565f" +checksum = "eaeb681024cf71f5ca14f3d812c0a8d8b49f13f7124713538e66d74d3bfe6aff" dependencies = [ - "alloy-consensus 0.15.8", - "alloy-consensus-any 0.15.8", - "alloy-eips 0.15.8", - "alloy-json-rpc 0.15.8", - "alloy-network-primitives 0.15.8", + "alloy-consensus", + "alloy-consensus-any", + "alloy-eips", + "alloy-json-rpc", + "alloy-network-primitives", "alloy-primitives", - "alloy-rpc-types-any 0.15.8", - "alloy-rpc-types-eth 0.15.8", - "alloy-serde 0.15.8", - "alloy-signer 0.15.8", - "alloy-sol-types", - "async-trait", - "auto_impl", - "derive_more", - "futures-utils-wasm", - "serde", - "serde_json", - "thiserror 2.0.12", -] - -[[package]] -name = "alloy-network" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "691a4825b3d08f031b49aae3c11cb35abf2af376fc11146bf8e5930a432dbf40" -dependencies = [ - "alloy-consensus 1.0.8", - "alloy-consensus-any 1.0.8", - "alloy-eips 1.0.8", - "alloy-json-rpc 1.0.8", - "alloy-network-primitives 1.0.8", - "alloy-primitives", - "alloy-rpc-types-any 1.0.8", - "alloy-rpc-types-eth 1.0.8", - "alloy-serde 1.0.8", - "alloy-signer 1.0.8", + "alloy-rpc-types-any", + "alloy-rpc-types-eth", + "alloy-serde", + "alloy-signer", "alloy-sol-types", "async-trait", "auto_impl", @@ -439,35 +328,22 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "0.15.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d770bdcca12fe2b3e973de3f17290065b42c6d9a10cbd9728877bbbbfb050b6" +checksum = "a03ad273e1c55cc481889b4130e82860e33624e6969e9a08854e0f3ebe659295" dependencies = [ - "alloy-consensus 0.15.8", - "alloy-eips 0.15.8", + "alloy-consensus", + "alloy-eips", "alloy-primitives", - "alloy-serde 0.15.8", - "serde", -] - -[[package]] -name = "alloy-network-primitives" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5713f40f9cbe4428292d095e8bbb38af82e63ad4247418b7f6d6fb7ef2d9d68b" -dependencies = [ - "alloy-consensus 1.0.8", - "alloy-eips 1.0.8", - "alloy-primitives", - "alloy-serde 1.0.8", + "alloy-serde", "serde", ] [[package]] name = "alloy-primitives" -version = "1.0.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70b98b99c1dcfbe74d7f0b31433ff215e7d1555e367d90e62db904f3c9d4ff53" +checksum = "6177ed26655d4e84e00b65cb494d4e0b8830e7cae7ef5d63087d445a2600fb55" dependencies = [ "alloy-rlp", "bytes", @@ -475,8 +351,8 @@ dependencies = [ "const-hex", "derive_more", "foldhash", - "hashbrown 0.15.3", - "indexmap 2.9.0", + "hashbrown 0.15.4", + "indexmap 2.10.0", "itoa", "k256", "keccak-asm", @@ -492,20 +368,20 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1382ef9e0fa1ab3f5a3dbc0a0fa1193f3794d5c9d75fc22654bb6da1cf7a59cc" +checksum = "abc164acf8c41c756e76c7aea3be8f0fb03f8a3ef90a33e3ddcea5d1614d8779" dependencies = [ "alloy-chains", - "alloy-consensus 1.0.8", - "alloy-eips 1.0.8", - "alloy-json-rpc 1.0.8", - "alloy-network 1.0.8", - "alloy-network-primitives 1.0.8", + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-network", + "alloy-network-primitives", "alloy-primitives", "alloy-rpc-client", - "alloy-rpc-types-eth 1.0.8", - "alloy-signer 1.0.8", + "alloy-rpc-types-eth", + "alloy-signer", "alloy-sol-types", "alloy-transport", "alloy-transport-http", @@ -516,6 +392,7 @@ dependencies = [ "either", "futures", "futures-utils-wasm", + "http 1.3.1", "lru", "parking_lot", "pin-project 1.1.10", @@ -531,9 +408,9 @@ dependencies = [ [[package]] name = "alloy-rlp" -version = "0.3.11" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6c1d995bff8d011f7cd6c81820d51825e6e06d6db73914c1630ecf544d83d6" +checksum = "5f70d83b765fdc080dbcd4f4db70d8d23fe4761f2f02ebfa9146b833900634b4" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -542,22 +419,22 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.11" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a40e1ef334153322fd878d07e86af7a529bcb86b2439525920a88eba87bcf943" +checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "alloy-rpc-client" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "859ec46fb132175969a0101bdd2fe9ecd413c40feeb0383e98710a4a089cee77" +checksum = "03c44d31bcb9afad460915fe1fba004a2af5a07a3376c307b9bdfeec3678c209" dependencies = [ - "alloy-json-rpc 1.0.8", + "alloy-json-rpc", "alloy-primitives", "alloy-transport", "alloy-transport-http", @@ -578,73 +455,42 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f1512ec542339a72c263570644a56d685f20ce77be465fbd3f3f33fb772bcbd" +checksum = "2ba2cf3d3c6ece87f1c6bb88324a997f28cf0ad7e98d5e0b6fa91c4003c30916" dependencies = [ "alloy-primitives", - "alloy-rpc-types-eth 1.0.8", - "alloy-serde 1.0.8", + "alloy-rpc-types-eth", + "alloy-serde", "serde", ] [[package]] name = "alloy-rpc-types-any" -version = "0.15.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d1a67b833618d34929f343c764cd84b88fb76af48fe3f4581ac6919e453822f" -dependencies = [ - "alloy-consensus-any 0.15.8", - "alloy-rpc-types-eth 0.15.8", - "alloy-serde 0.15.8", -] - -[[package]] -name = "alloy-rpc-types-any" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d87236623aafabbf7196bcde37a4d626c3e56b3b22d787310e6d5ea25239c5d8" -dependencies = [ - "alloy-consensus-any 1.0.8", - "alloy-rpc-types-eth 1.0.8", - "alloy-serde 1.0.8", -] - -[[package]] -name = "alloy-rpc-types-eth" -version = "0.15.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d08bc32276225c44bdc14b34ed244a1b4ccab0b4e39025bfc3dd60be5203af43" +checksum = "ef5b22062142ce3b2ed3374337d4b343437e5de6959397f55d2c9fe2c2ce0162" dependencies = [ - "alloy-consensus 0.15.8", - "alloy-consensus-any 0.15.8", - "alloy-eips 0.15.8", - "alloy-network-primitives 0.15.8", - "alloy-primitives", - "alloy-rlp", - "alloy-serde 0.15.8", - "alloy-sol-types", - "itertools 0.13.0", - "serde", - "serde_json", - "thiserror 2.0.12", + "alloy-consensus-any", + "alloy-rpc-types-eth", + "alloy-serde", ] [[package]] name = "alloy-rpc-types-eth" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b8acc64d23e484a0a27375b57caba34569729560a29aa366933f0ae07b7786f" +checksum = "391e59f81bacbffc7bddd2da3a26d6eec0e2058e9237c279e9b1052bdf21b49e" dependencies = [ - "alloy-consensus 1.0.8", - "alloy-consensus-any 1.0.8", - "alloy-eips 1.0.8", - "alloy-network-primitives 1.0.8", + "alloy-consensus", + "alloy-consensus-any", + "alloy-eips", + "alloy-network-primitives", "alloy-primitives", "alloy-rlp", - "alloy-serde 1.0.8", + "alloy-serde", "alloy-sol-types", - "itertools 0.13.0", + "itertools 0.14.0", "serde", "serde_json", "thiserror 2.0.12", @@ -652,20 +498,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.15.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f448b0d9ee32ef23565ac4dfb0ea7544b03c01571ee187c471337a6f1b3cb203" -dependencies = [ - "alloy-primitives", - "serde", - "serde_json", -] - -[[package]] -name = "alloy-serde" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "114c287eb4595f1e0844800efb0860dd7228fcf9bc77d52e303fb7a43eb766b2" +checksum = "0ea08bc854235d4dff08fd57df8033285c11b8d7548b20c6da218194e7e6035f" dependencies = [ "alloy-primitives", "serde", @@ -674,26 +509,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.15.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07abd80b2db2606a4d4566b8d71440534c46160f6bd34cf029be145ecc20fd46" -dependencies = [ - "alloy-dyn-abi", - "alloy-primitives", - "alloy-sol-types", - "async-trait", - "auto_impl", - "either", - "elliptic-curve", - "k256", - "thiserror 2.0.12", -] - -[[package]] -name = "alloy-signer" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afebd60fa84d9ce793326941509d8f26ce7b383f2aabd7a42ba215c1b92ea96b" +checksum = "bcb3759f85ef5f010a874d9ebd5ee6ce01cac65211510863124e0ebac6552db0" dependencies = [ "alloy-dyn-abi", "alloy-primitives", @@ -708,32 +526,14 @@ dependencies = [ [[package]] name = "alloy-signer-aws" -version = "0.15.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52a05eb43a52d6e685eed6dfbc8ba35520b1f6b29bbb97039607deba168c413b" -dependencies = [ - "alloy-consensus 0.15.8", - "alloy-network 0.15.8", - "alloy-primitives", - "alloy-signer 0.15.8", - "async-trait", - "aws-sdk-kms", - "k256", - "spki", - "thiserror 2.0.12", - "tracing", -] - -[[package]] -name = "alloy-signer-aws" -version = "1.0.9" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6be3d371299b62eac5aa459fa58e8d1c761aabdc637573ae258ab744457fcc88" +checksum = "7942b850ec7be43de89b2680321d7921b7620b25be53b9981aae6fb29daa9e97" dependencies = [ - "alloy-consensus 1.0.8", - "alloy-network 1.0.8", + "alloy-consensus", + "alloy-network", "alloy-primitives", - "alloy-signer 1.0.8", + "alloy-signer", "async-trait", "aws-sdk-kms", "k256", @@ -744,14 +544,14 @@ dependencies = [ [[package]] name = "alloy-signer-gcp" -version = "0.15.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "977892f64653934d5674260f145b968bacbc644423f30cf54c117d184a2e5e46" +checksum = "74809e45053bd43d24338e618202ebea68d5660aa9632d77b0244faa2dcaa9d1" dependencies = [ - "alloy-consensus 0.15.8", - "alloy-network 0.15.8", + "alloy-consensus", + "alloy-network", "alloy-primitives", - "alloy-signer 0.15.8", + "alloy-signer", "async-trait", "gcloud-sdk", "k256", @@ -760,55 +560,17 @@ dependencies = [ "tracing", ] -[[package]] -name = "alloy-signer-gcp" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df298e47bbb7d0a8e06b603046b91062c11ba70d22f8a6c9bab1c1468bd856d0" -dependencies = [ - "alloy-consensus 1.0.8", - "alloy-network 1.0.8", - "alloy-primitives", - "alloy-signer 1.0.8", - "async-trait", - "gcloud-sdk", - "k256", - "spki", - "thiserror 2.0.12", - "tracing", -] - -[[package]] -name = "alloy-signer-ledger" -version = "0.15.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cd6d983cdd49649aff1d20010e8b70f3f04fb0b1be9d82ae6500c4afeb8fbd5" -dependencies = [ - "alloy-consensus 0.15.8", - "alloy-dyn-abi", - "alloy-network 0.15.8", - "alloy-primitives", - "alloy-signer 0.15.8", - "alloy-sol-types", - "async-trait", - "coins-ledger", - "futures-util", - "semver 1.0.26", - "thiserror 2.0.12", - "tracing", -] - [[package]] name = "alloy-signer-ledger" -version = "1.0.9" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b0e049299cc7e131a438a904f89a493bcea45cd92bbed3e50116a28bc27987c" +checksum = "63c7e67367bc2b1d5790236448d2402865a4f0bc2b53cfda06d71b7ba3dbdffd" dependencies = [ - "alloy-consensus 1.0.8", + "alloy-consensus", "alloy-dyn-abi", - "alloy-network 1.0.8", + "alloy-network", "alloy-primitives", - "alloy-signer 1.0.8", + "alloy-signer", "alloy-sol-types", "async-trait", "coins-ledger", @@ -820,14 +582,14 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f551042c11c4fa7cb8194d488250b8dc58035241c418d79f07980c4aee4fa5c9" +checksum = "14d95902d29e1290809e1c967a1e974145b44b78f6e3e12fc07a60c1225e3df0" dependencies = [ - "alloy-consensus 1.0.8", - "alloy-network 1.0.8", + "alloy-consensus", + "alloy-network", "alloy-primitives", - "alloy-signer 1.0.8", + "alloy-signer", "async-trait", "k256", "rand 0.8.5", @@ -836,42 +598,42 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "1.0.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60fcfa26956bcb22f66ab13407115197f26ef23abca5b48d39a1946897382d74" +checksum = "a14f21d053aea4c6630687c2f4ad614bed4c81e14737a9b904798b24f30ea849" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "alloy-sol-macro-expander" -version = "1.0.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72a9b402f0013f1ff8c24066eeafc2207a8e52810a2b18b77776ce7fead5af41" +checksum = "34d99282e7c9ef14eb62727981a985a01869e586d1dec729d3bb33679094c100" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", "const-hex", "heck", - "indexmap 2.9.0", + "indexmap 2.10.0", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "1.0.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d02d61741337bb6b3f4899c2e3173fe17ffa2810e143d3b28acd953197c8dd79" +checksum = "eda029f955b78e493360ee1d7bd11e1ab9f2a220a5715449babc79d6d0a01105" dependencies = [ "alloy-json-abi", "const-hex", @@ -881,15 +643,15 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.101", + "syn 2.0.104", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "251273c5aa1abb590852f795c938730fa641832fc8fa77b5478ed1bf11b6097e" +checksum = "10db1bd7baa35bc8d4a1b07efbf734e73e5ba09f2580fb8cee3483a36087ceb2" dependencies = [ "serde", "winnow", @@ -897,24 +659,23 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "1.0.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02635bce18205ff8149fb752c753b0a91ea3f3c8ee04c58846448be4811a640" +checksum = "58377025a47d8b8426b3e4846a251f2c1991033b27f517aade368146f6ab1dfe" dependencies = [ "alloy-json-abi", "alloy-primitives", "alloy-sol-macro", - "const-hex", "serde", ] [[package]] name = "alloy-transport" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46fb766c0bce9f62779a83048ca6d998c2ced4153d694027c66e537629f4fd61" +checksum = "dcdf4b7fc58ebb2605b2fc5a33dae5cf15527ea70476978351cc0db1c596ea93" dependencies = [ - "alloy-json-rpc 1.0.8", + "alloy-json-rpc", "alloy-primitives", "base64 0.22.1", "derive_more", @@ -933,11 +694,11 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.0.8" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "254bd59ca1abaf2da3e3201544a41924163b019414cce16f0dc6bc75d20c6612" +checksum = "4c4b0f3a9c28bcd3761504d9eb3578838d6d115c8959fc1ea05f59a3a8f691af" dependencies = [ - "alloy-json-rpc 1.0.8", + "alloy-json-rpc", "alloy-transport", "reqwest", "serde_json", @@ -948,9 +709,9 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "983d99aa81f586cef9dae38443245e585840fcf0fc58b09aee0b1f27aed1d500" +checksum = "bada1fc392a33665de0dc50d401a3701b62583c655e3522a323490a5da016962" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -962,6 +723,19 @@ dependencies = [ "tracing", ] +[[package]] +name = "alloy-tx-macros" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79bf2869e66904b2148c809e7a75e23ca26f5d7b46663a149a1444fb98a69d1d" +dependencies = [ + "alloy-primitives", + "darling", + "proc-macro2", + "quote", + "syn 2.0.104", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -985,9 +759,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstyle" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" +checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" [[package]] name = "anyhow" @@ -1142,9 +916,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.23" +version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b37fc50485c4f3f736a4fb14199f6d5f5ba008d7f28fe710306c92780f004c07" +checksum = "40f6024f3f856663b45fd0c9b6f2024034a702f453549449e0d84a305900dad4" dependencies = [ "flate2", "futures-core", @@ -1183,7 +957,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -1194,7 +968,7 @@ checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -1211,14 +985,14 @@ checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "autocfg" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-credential-types" @@ -1234,9 +1008,9 @@ dependencies = [ [[package]] name = "aws-runtime" -version = "1.5.7" +version = "1.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c4063282c69991e57faab9e5cb21ae557e59f5b0fb285c196335243df8dc25c" +checksum = "4f6c68419d8ba16d9a7463671593c54f81ba58cab466e9b759418da606dcc2e2" dependencies = [ "aws-credential-types", "aws-sigv4", @@ -1258,9 +1032,9 @@ dependencies = [ [[package]] name = "aws-sdk-kms" -version = "1.66.0" +version = "1.77.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "655097cd83ab1f15575890943135192560f77097413c6dd1733fdbdc453e81ac" +checksum = "6cd57d0c1a5bd6c7eaa2b26462e046d5ca7b72189346718d2435dfc48bfa988b" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1274,16 +1048,15 @@ dependencies = [ "bytes", "fastrand", "http 0.2.12", - "once_cell", "regex-lite", "tracing", ] [[package]] name = "aws-sigv4" -version = "1.3.1" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3503af839bd8751d0bdc5a46b9cac93a003a353e635b0c12cf2376b5b53e41ea" +checksum = "ddfb9021f581b71870a17eac25b52335b82211cdc092e02b6876b2bcefa61666" dependencies = [ "aws-credential-types", "aws-smithy-http", @@ -1334,9 +1107,9 @@ dependencies = [ [[package]] name = "aws-smithy-json" -version = "0.61.3" +version = "0.61.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92144e45819cae7dc62af23eac5a038a58aa544432d2102609654376a900bd07" +checksum = "a16e040799d29c17412943bdbf488fd75db04112d0c0d4b9290bacf5ae0014b9" dependencies = [ "aws-smithy-types", ] @@ -1375,9 +1148,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime-api" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e5d9e3a80a18afa109391fb5ad09c3daf887b516c6fd805a157c6ea7994a57" +checksum = "bd8531b6d8882fd8f48f82a9754e682e29dd44cff27154af51fa3eb730f59efb" dependencies = [ "aws-smithy-async", "aws-smithy-types", @@ -1392,9 +1165,9 @@ dependencies = [ [[package]] name = "aws-smithy-types" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40076bd09fadbc12d5e026ae080d0930defa606856186e31d83ccc6a255eeaf3" +checksum = "d498595448e43de7f4296b7b7a18a8a02c61ec9349128c80a368f7c3b4ab11a8" dependencies = [ "base64-simd", "bytes", @@ -1490,23 +1263,23 @@ checksum = "604fde5e028fea851ce1d8570bbdc034bec850d157f7569d10f347d06808c05c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "backon" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd0b50b1b78dbadd44ab18b3c794e496f3a139abb9fbc27d9c94c4eebbb96496" +checksum = "302eaff5357a264a2c42f127ecb8bac761cf99749fc3dc95677e2743991f99e7" dependencies = [ "fastrand", ] [[package]] name = "backtrace" -version = "0.3.74" +version = "0.3.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" dependencies = [ "addr2line", "cfg-if", @@ -1547,9 +1320,9 @@ dependencies = [ [[package]] name = "base64ct" -version = "1.7.3" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" [[package]] name = "bincode" @@ -1610,9 +1383,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" dependencies = [ "serde", ] @@ -1640,9 +1413,9 @@ dependencies = [ [[package]] name = "blst" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47c79a94619fade3c0b887670333513a67ac28a6a7e653eb260bf0d4103db38d" +checksum = "4fd49896f12ac9b6dcd7a5998466b9b58263a695a3dd1ecc1aaca2e12a90b080" dependencies = [ "cc", "glob", @@ -1652,9 +1425,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.17.0" +version = "3.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" [[package]] name = "byte-slice-cast" @@ -1710,18 +1483,18 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.21" +version = "1.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8691782945451c1c383942c4874dbe63814f61cb57ef773cda2972682b7bb3c0" +checksum = "d487aa071b5f64da6f19a3e848e3578944b726ee5a4854b82172f02aa876bfdc" dependencies = [ "shlex", ] [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" [[package]] name = "cfg_aliases" @@ -1808,18 +1581,18 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.38" +version = "4.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed93b9805f8ba930df42c2590f05453d5ec36cbb85d018868a5b24d31f6ac000" +checksum = "40b6887a1d8685cebccf115538db5c0efe625ccac9696ad45c409d96566e910f" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" -version = "4.5.38" +version = "4.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "379026ff283facf611b0ea629334361c4211d1b12ee01024eec1591133b04120" +checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e" dependencies = [ "anstyle", "clap_lex", @@ -1827,9 +1600,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" +checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" [[package]] name = "coins-ledger" @@ -1898,9 +1671,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.14.0" +version = "1.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b0485bab839b018a8f1723fc5391819fea5f8f0f32288ef8a735fd096b6160c" +checksum = "83e22e0ed40b96a48d3db274f72fd365bd78f67af39b6bbd47e8a15e1c6207ff" dependencies = [ "cfg-if", "cpufeatures", @@ -1976,9 +1749,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" dependencies = [ "core-foundation-sys", "libc", @@ -2001,9 +1774,9 @@ dependencies = [ [[package]] name = "crc" -version = "3.2.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" +checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675" dependencies = [ "crc-catalog", ] @@ -2093,9 +1866,9 @@ checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" [[package]] name = "crypto-bigint" @@ -2143,7 +1916,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2167,7 +1940,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2178,7 +1951,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2244,7 +2017,7 @@ checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "unicode-xid", ] @@ -2277,7 +2050,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2359,7 +2132,7 @@ dependencies = [ name = "engine-aa-core" version = "0.1.0" dependencies = [ - "alloy 1.0.9", + "alloy", "engine-aa-types", "engine-core", "serde", @@ -2373,7 +2146,7 @@ dependencies = [ name = "engine-aa-types" version = "0.1.0" dependencies = [ - "alloy 1.0.9", + "alloy", "schemars 0.8.22", "serde", "serde_json", @@ -2385,7 +2158,7 @@ dependencies = [ name = "engine-core" version = "0.1.0" dependencies = [ - "alloy 1.0.9", + "alloy", "engine-aa-types", "schemars 0.8.22", "serde", @@ -2406,11 +2179,12 @@ dependencies = [ name = "engine-executors" version = "0.1.0" dependencies = [ - "alloy 1.0.9", + "alloy", "chrono", "engine-aa-core", "engine-aa-types", "engine-core", + "futures", "hex", "hmac", "rand 0.9.1", @@ -2419,6 +2193,7 @@ dependencies = [ "serde_json", "sha2", "thiserror 2.0.12", + "tokio", "tracing", "twmq", "uuid", @@ -2432,12 +2207,12 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.11" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -2519,9 +2294,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" +checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" dependencies = [ "crc32fast", "miniz_oxide", @@ -2625,7 +2400,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2666,9 +2441,9 @@ checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" [[package]] name = "gcloud-sdk" -version = "0.27.0" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00c7dc8c1f6c0865d02a2d931f3a15ac919ef583077c5141fd9b8efa8b493c44" +checksum = "a3ec9c312db09dc0dac684dda2f18d76e9ce00effdd27fcaaa90fa811691cd6d" dependencies = [ "async-trait", "bytes", @@ -2726,15 +2501,15 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi 0.11.1+wasi-snapshot-preview1", "wasm-bindgen", ] [[package]] name = "getrandom" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" dependencies = [ "cfg-if", "js-sys", @@ -2769,9 +2544,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.9" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75249d144030531f8dee69fe9cea04d3edf809a017ae445e2abdff6629e86633" +checksum = "17da50a276f1e01e0ba6c029e47b7100754904ee8a278f886546e98575380785" dependencies = [ "atomic-waker", "bytes", @@ -2779,7 +2554,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.3.1", - "indexmap 2.9.0", + "indexmap 2.10.0", "slab", "tokio", "tokio-util", @@ -2810,9 +2585,9 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" [[package]] name = "hashbrown" -version = "0.15.3" +version = "0.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3" +checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" dependencies = [ "allocator-api2", "equivalent", @@ -2826,7 +2601,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" dependencies = [ - "hashbrown 0.15.3", + "hashbrown 0.15.4", ] [[package]] @@ -2837,9 +2612,9 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hermit-abi" -version = "0.3.9" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "hex" @@ -2980,11 +2755,10 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.5" +version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ - "futures-util", "http 1.3.1", "hyper", "hyper-util", @@ -2994,7 +2768,7 @@ dependencies = [ "tokio", "tokio-rustls", "tower-service", - "webpki-roots 0.26.10", + "webpki-roots", ] [[package]] @@ -3028,9 +2802,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c293b6b3d21eca78250dc7dbebd6b9210ec5530e038cbfe0661b5c47ab06e8" +checksum = "dc2fdfdbff08affe55bb779f33b053aa1fe5dd5b54c257343c17edfa55711bdb" dependencies = [ "base64 0.22.1", "bytes", @@ -3078,21 +2852,22 @@ dependencies = [ [[package]] name = "icu_collections" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" dependencies = [ "displaydoc", + "potential_utf", "yoke", "zerofrom", "zerovec", ] [[package]] -name = "icu_locid" -version = "1.5.0" +name = "icu_locale_core" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" dependencies = [ "displaydoc", "litemap", @@ -3101,31 +2876,11 @@ dependencies = [ "zerovec", ] -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" - [[package]] name = "icu_normalizer" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" dependencies = [ "displaydoc", "icu_collections", @@ -3133,67 +2888,54 @@ dependencies = [ "icu_properties", "icu_provider", "smallvec", - "utf16_iter", - "utf8_iter", - "write16", "zerovec", ] [[package]] name = "icu_normalizer_data" -version = "1.5.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] name = "icu_properties" -version = "1.5.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" dependencies = [ "displaydoc", "icu_collections", - "icu_locid_transform", + "icu_locale_core", "icu_properties_data", "icu_provider", - "tinystr", + "potential_utf", + "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" -version = "1.5.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" [[package]] name = "icu_provider" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" dependencies = [ "displaydoc", - "icu_locid", - "icu_provider_macros", + "icu_locale_core", "stable_deref_trait", "tinystr", "writeable", "yoke", "zerofrom", + "zerotrie", "zerovec", ] -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.101", -] - [[package]] name = "ident_case" version = "1.0.1" @@ -3213,9 +2955,9 @@ dependencies = [ [[package]] name = "idna_adapter" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" dependencies = [ "icu_normalizer", "icu_properties", @@ -3238,7 +2980,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -3254,12 +2996,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" dependencies = [ "equivalent", - "hashbrown 0.15.3", + "hashbrown 0.15.4", "serde", ] @@ -3272,6 +3014,17 @@ dependencies = [ "generic-array", ] +[[package]] +name = "io-uring" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b86e202f00093dcba4275d4636b93ef9dd75d025ae560d2521b45ea28ab49013" +dependencies = [ + "bitflags 2.9.1", + "cfg-if", + "libc", +] + [[package]] name = "ipnet" version = "2.11.0" @@ -3306,6 +3059,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.15" @@ -3389,15 +3151,15 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.172" +version = "0.2.174" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" [[package]] name = "libm" -version = "0.2.13" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9627da5196e5d8ed0b0495e61e518847578da83483c37288316d9b2e03a7f72" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "libusb1-sys" @@ -3419,15 +3181,15 @@ checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" [[package]] name = "litemap" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" dependencies = [ "autocfg", "scopeguard", @@ -3458,9 +3220,15 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "227748d55f2f0ab4735d87fd623798cb6b664512fe979705f829c9f81c934465" dependencies = [ - "hashbrown 0.15.3", + "hashbrown 0.15.4", ] +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + [[package]] name = "macro-string" version = "0.1.4" @@ -3469,7 +3237,7 @@ checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -3489,9 +3257,9 @@ checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" [[package]] name = "memchr" -version = "2.7.4" +version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" [[package]] name = "memoffset" @@ -3520,22 +3288,22 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", ] [[package]] name = "mio" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.52.0", + "wasi 0.11.1+wasi-snapshot-preview1", + "windows-sys 0.59.0", ] [[package]] @@ -3646,9 +3414,9 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" dependencies = [ "hermit-abi", "libc", @@ -3656,33 +3424,34 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" +checksum = "a973b4e44ce6cad84ce69d797acf9a044532e4184c4f267913d1b546a0727b7a" dependencies = [ "num_enum_derive", + "rustversion", ] [[package]] name = "num_enum_derive" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" +checksum = "77e878c846a8abae00dd069496dbe8751b16ac1c3d6bd2a7283a938e8228f90d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "nybbles" -version = "0.3.4" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" +checksum = "11d51b0175c49668a033fe7cc69080110d9833b291566cdf332905f3ad9c68a0" dependencies = [ "alloy-rlp", - "const-hex", "proptest", + "ruint", "serde", "smallvec", ] @@ -3716,11 +3485,11 @@ checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "openssl" -version = "0.10.72" +version = "0.10.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fedfea7d58a1f73118430a55da6a286e7b044961736ce96a16a17068ea25e5da" +checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "cfg-if", "foreign-types", "libc", @@ -3737,7 +3506,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -3748,9 +3517,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-sys" -version = "0.9.108" +version = "0.9.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e145e1651e858e820e4860f7b9c5e169bc1d8ce1c86043be79fa7b7634821847" +checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" dependencies = [ "cc", "libc", @@ -3782,9 +3551,9 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "parity-scale-codec" -version = "3.7.4" +version = "3.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9fde3d0718baf5bc92f577d652001da0f8d54cd03a7974e118d04fc888dc23d" +checksum = "799781ae679d79a948e13d4824a40970bfa500058d245760dd857301059810fa" dependencies = [ "arrayvec", "bitvec", @@ -3798,14 +3567,14 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.7.4" +version = "3.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581c837bb6b9541ce7faa9377c20616e4fb7650f6b0f68bc93c827ee504fb7b3" +checksum = "34b4653168b563151153c9e4c08ebed57fb8262bebfa79711552fa983c623e7a" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -3816,9 +3585,9 @@ checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" dependencies = [ "lock_api", "parking_lot_core", @@ -3826,9 +3595,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ "cfg-if", "libc", @@ -3876,9 +3645,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "198db74531d58c70a361c42201efde7e2591e976d518caf7662a47dc5720e7b6" +checksum = "1db05f56d34358a8b1066f67cbb203ee3e7ed2ba674a6263a1d5ec6db2204323" dependencies = [ "memchr", "thiserror 2.0.12", @@ -3887,9 +3656,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d725d9cfd79e87dccc9341a2ef39d1b6f6353d68c4b33c177febbe1a402c97c5" +checksum = "bb056d9e8ea77922845ec74a1c4e8fb17e7c218cc4fc11a15c5d25e189aa40bc" dependencies = [ "pest", "pest_generator", @@ -3897,24 +3666,23 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db7d01726be8ab66ab32f9df467ae8b1148906685bbe75c82d1e65d7f5b3f841" +checksum = "87e404e638f781eb3202dc82db6760c8ae8a1eeef7fb3fa8264b2ef280504966" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "pest_meta" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9f832470494906d1fca5329f8ab5791cc60beb230c74815dff541cbd2b5ca0" +checksum = "edd1101f170f5903fde0914f899bb503d9ff5271d7ba76bbb70bea63690cc0d5" dependencies = [ - "once_cell", "pest", "sha2", ] @@ -3956,7 +3724,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -4028,9 +3796,18 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + +[[package]] +name = "potential_utf" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +dependencies = [ + "zerovec", +] [[package]] name = "powerfmt" @@ -4086,7 +3863,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -4100,17 +3877,17 @@ dependencies = [ [[package]] name = "proptest" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" +checksum = "6fcdab19deb5195a31cf7726a210015ff1496ba1464fd42cb4f537b8b01b471f" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.9.0", + "bitflags 2.9.1", "lazy_static", "num-traits", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand 0.9.1", + "rand_chacha 0.9.0", "rand_xorshift", "regex-syntax 0.8.5", "rusty-fork", @@ -4135,10 +3912,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ "anyhow", - "itertools 0.13.0", + "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -4158,9 +3935,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quinn" -version = "0.11.7" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3bd15a6f2967aef83887dcb9fec0014580467e33720d073560cf015a5683012" +checksum = "626214629cda6781b6dc1d316ba307189c85ba657213ce642d9c77670f8202c8" dependencies = [ "bytes", "cfg_aliases", @@ -4178,12 +3955,13 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.11" +version = "0.11.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcbafbbdbb0f638fe3f35f3c56739f77a8a1d070cb25603226c83339b391472b" +checksum = "49df843a9161c85bb8aae55f101bc0bac8bcafd637a620d9122fd7e0b2f7422e" dependencies = [ "bytes", - "getrandom 0.3.2", + "getrandom 0.3.3", + "lru-slab", "rand 0.9.1", "ring", "rustc-hash", @@ -4198,9 +3976,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.12" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee4e529991f949c5e25755532370b8af5d114acae52326361d68d47af64aa842" +checksum = "fcebb1209ee276352ef14ff8732e24cc2b02bbac986cd74a4c81bcb2f9881970" dependencies = [ "cfg_aliases", "libc", @@ -4221,9 +3999,9 @@ dependencies = [ [[package]] name = "r-efi" -version = "5.2.0" +version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] name = "radium" @@ -4289,17 +4067,17 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom 0.3.2", + "getrandom 0.3.3", "serde", ] [[package]] name = "rand_xorshift" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" dependencies = [ - "rand_core 0.6.4", + "rand_core 0.9.3", ] [[package]] @@ -4349,11 +4127,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.12" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" +checksum = "0d04b7d0ee6b4a0207a0a7adb104d23ecb0b47d6beae7152d0fa34b692b29fd6" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", ] [[package]] @@ -4373,7 +4151,7 @@ checksum = "1165225c21bff1f3bbce98f5a1f889949bc902d3575308cc7b0de30b4f6d27c7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -4428,9 +4206,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" -version = "0.12.18" +version = "0.12.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e98ff6b0dbbe4d5a37318f433d4fc82babd21631f194d370409ceb2e40b2f0b5" +checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531" dependencies = [ "async-compression", "base64 0.22.1", @@ -4446,13 +4224,11 @@ dependencies = [ "hyper-rustls", "hyper-tls", "hyper-util", - "ipnet", "js-sys", "log", "mime", "mime_guess", "native-tls", - "once_cell", "percent-encoding", "pin-project-lite", "quinn", @@ -4475,7 +4251,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 1.0.0", + "webpki-roots", ] [[package]] @@ -4519,16 +4295,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" dependencies = [ "base64 0.21.7", - "bitflags 2.9.0", + "bitflags 2.9.1", "serde", "serde_derive", ] [[package]] name = "ruint" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78a46eb779843b2c4f21fac5773e25d6d5b7c8f0922876c91541790d2ca27eef" +checksum = "11256b5fe8c68f56ac6f39ef0720e592f33d2367a4782740d9c9142e889c7fb4" dependencies = [ "alloy-rlp", "ark-ff 0.3.0", @@ -4569,20 +4345,19 @@ dependencies = [ [[package]] name = "rust-ini" -version = "0.21.1" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e310ef0e1b6eeb79169a1171daf9abcb87a2e17c03bee2c4bb100b55c75409f" +checksum = "e7295b7ce3bf4806b419dc3420745998b447178b7005e2011947b38fc5aa6791" dependencies = [ "cfg-if", "ordered-multimap", - "trim-in-place", ] [[package]] name = "rustc-demangle" -version = "0.1.24" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +checksum = "989e6739f80c4ad5b13e0fd7fe89531180375b18520cc8c82080e4dc4035b84f" [[package]] name = "rustc-hash" @@ -4620,7 +4395,7 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "errno", "libc", "linux-raw-sys", @@ -4629,9 +4404,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.26" +version = "0.23.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df51b5869f3a441595eac5e8ff14d486ff285f7b8c0df8770e49c3b56351f0f0" +checksum = "7160e3e10bf4535308537f3c4e1641468cd0e485175d6163087c0393c7d46643" dependencies = [ "log", "once_cell", @@ -4656,18 +4431,19 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" dependencies = [ "web-time", + "zeroize", ] [[package]] name = "rustls-webpki" -version = "0.103.1" +version = "0.103.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fef8b8769aaccf73098557a87cd1816b4f9c7c16811c9c77142aa695c16f2c03" +checksum = "e4a72fe2bcf7a6ac6fd7d0b9e5cb68aeb7d4c0a0271730218b3e92d43b4eb435" dependencies = [ "ring", "rustls-pki-types", @@ -4676,9 +4452,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" [[package]] name = "rusty-fork" @@ -4723,7 +4499,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fbf2ae1b8bc8e02df939598064d22402220cd5bbcca1c76f7d6a310974d5615" dependencies = [ "dyn-clone", - "indexmap 2.9.0", + "indexmap 2.10.0", "schemars_derive", "serde", "serde_json", @@ -4741,6 +4517,18 @@ dependencies = [ "serde_json", ] +[[package]] +name = "schemars" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1375ba8ef45a6f15d83fa8748f1079428295d403d6ea991d09ab100155fbc06d" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + [[package]] name = "schemars_derive" version = "0.8.22" @@ -4750,7 +4538,7 @@ dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -4820,7 +4608,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "core-foundation 0.9.4", "core-foundation-sys", "libc", @@ -4833,8 +4621,8 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.9.0", - "core-foundation 0.10.0", + "bitflags 2.9.1", + "core-foundation 0.10.1", "core-foundation-sys", "libc", "security-framework-sys", @@ -4900,7 +4688,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -4911,7 +4699,7 @@ checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -4951,9 +4739,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" dependencies = [ "serde", ] @@ -4972,16 +4760,17 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf65a400f8f66fb7b0552869ad70157166676db75ed8181f8104ea91cf9d0b42" +checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.9.0", + "indexmap 2.10.0", "schemars 0.9.0", + "schemars 1.0.3", "serde", "serde_derive", "serde_json", @@ -4991,14 +4780,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81679d9ed988d5e9a5e6531dc3f2c28efbd639cbd1dfb628df08edea6004da77" +checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -5096,27 +4885,24 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" [[package]] name = "smallvec" -version = "1.15.0" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" dependencies = [ "serde", ] [[package]] name = "socket2" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" dependencies = [ "libc", "windows-sys 0.52.0", @@ -5169,7 +4955,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -5191,9 +4977,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.101" +version = "2.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" dependencies = [ "proc-macro2", "quote", @@ -5202,14 +4988,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d0f0d4760f4c2a0823063b2c70e97aa2ad185f57be195172ccc0e23c4b787c4" +checksum = "b9ac494e7266fcdd2ad80bf4375d55d27a117ea5c866c26d0e97fe5b3caeeb75" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -5229,7 +5015,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -5238,7 +5024,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "core-foundation 0.9.4", "system-configuration-sys", ] @@ -5267,12 +5053,12 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.19.1" +version = "3.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7437ac7763b9b123ccf33c338a5cc1bac6f69b45a136c19bdd8a65e3916435bf" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" dependencies = [ "fastrand", - "getrandom 0.3.2", + "getrandom 0.3.3", "once_cell", "rustix", "windows-sys 0.59.0", @@ -5282,7 +5068,7 @@ dependencies = [ name = "thirdweb-core" version = "0.1.0" dependencies = [ - "alloy 1.0.9", + "alloy", "engine-aa-types", "moka", "reqwest", @@ -5300,7 +5086,7 @@ name = "thirdweb-engine" version = "0.1.0" dependencies = [ "aide", - "alloy 1.0.9", + "alloy", "anyhow", "axum", "config", @@ -5352,7 +5138,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -5363,17 +5149,16 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "thread_local" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" dependencies = [ "cfg-if", - "once_cell", ] [[package]] @@ -5427,9 +5212,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.7.6" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" dependencies = [ "displaydoc", "zerovec", @@ -5462,17 +5247,19 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.45.0" +version = "1.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2513ca694ef9ede0fb23fe71a4ee4107cb102b9dc1930f6d0fd77aae068ae165" +checksum = "1140bb80481756a8cbe10541f37433b459c5aa1e727b4c020fbfebdc25bf3ec4" dependencies = [ "backtrace", "bytes", + "io-uring", "libc", "mio", "parking_lot", "pin-project-lite", "signal-hook-registry", + "slab", "socket2", "tokio-macros", "windows-sys 0.52.0", @@ -5486,7 +5273,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -5536,9 +5323,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.22" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ae329d1f08c4d17a59bed7ff5b5a769d062e64a62d34a3261b219e62cd5aae" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", "serde_spanned", @@ -5548,20 +5335,20 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.9" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.22.26" +version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.10.0", "serde", "serde_spanned", "toml_datetime", @@ -5570,9 +5357,9 @@ dependencies = [ [[package]] name = "tonic" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85839f0b32fd242bb3209262371d07feda6d780d16ee9d2bc88581b89da1549b" +checksum = "7e581ba15a835f4d9ea06c55ab1bd4dce26fc53752c69a04aac00703bfb49ba9" dependencies = [ "async-trait", "axum", @@ -5607,7 +5394,7 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", - "indexmap 2.9.0", + "indexmap 2.10.0", "pin-project-lite", "slab", "sync_wrapper", @@ -5620,11 +5407,11 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.4" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fdb0c213ca27a9f57ab69ddb290fd80d970922355b83ae380b395d3986b8a2e" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "bytes", "futures-util", "http 1.3.1", @@ -5675,20 +5462,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.28" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "tracing-core" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", "valuable", @@ -5748,12 +5535,6 @@ dependencies = [ "tracing-serde", ] -[[package]] -name = "trim-in-place" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "343e926fc669bc8cde4fa3129ab681c63671bae288b1f1081ceee6d9d37904fc" - [[package]] name = "try-lock" version = "0.2.5" @@ -5865,12 +5646,6 @@ dependencies = [ "percent-encoding", ] -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - [[package]] name = "utf8_iter" version = "1.0.4" @@ -5883,7 +5658,7 @@ version = "5.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2fcc29c80c21c31608227e0912b2d7fddba57ad76b606890627ba8ee7964e993" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.10.0", "serde", "serde_json", "utoipa-gen", @@ -5911,7 +5686,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.101", + "syn 2.0.104", "uuid", ] @@ -5933,7 +5708,7 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" dependencies = [ - "getrandom 0.3.2", + "getrandom 0.3.3", "js-sys", "serde", "wasm-bindgen", @@ -5948,9 +5723,9 @@ checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "vault-sdk" version = "0.1.0" -source = "git+ssh://git@github.com/thirdweb-dev/vault.git?branch=main#a9d62a85ae69d47b2f341e886d16c12611644235" +source = "git+ssh://git@github.com/thirdweb-dev/vault.git?branch=pb%2Fupdate-alloy#b0a72f93335ff05f722c070f32f0697c5478243a" dependencies = [ - "alloy 0.15.10", + "alloy", "chacha20poly1305", "chrono", "hex", @@ -5969,9 +5744,9 @@ dependencies = [ [[package]] name = "vault-types" version = "0.1.0" -source = "git+ssh://git@github.com/thirdweb-dev/vault.git?branch=main#a9d62a85ae69d47b2f341e886d16c12611644235" +source = "git+ssh://git@github.com/thirdweb-dev/vault.git?branch=pb%2Fupdate-alloy#b0a72f93335ff05f722c070f32f0697c5478243a" dependencies = [ - "alloy 0.15.10", + "alloy", "bincode", "chrono", "serde", @@ -6033,9 +5808,9 @@ dependencies = [ [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" @@ -6068,7 +5843,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "wasm-bindgen-shared", ] @@ -6103,7 +5878,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6132,9 +5907,9 @@ dependencies = [ [[package]] name = "wasmtimer" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0048ad49a55b9deb3953841fa1fc5858f0efbcb7a18868c899a360269fac1b23" +checksum = "d8d49b5d6c64e8558d9b1b065014426f35c18de636895d24893dbbd329743446" dependencies = [ "futures", "js-sys", @@ -6166,18 +5941,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37493cadf42a2a939ed404698ded7fb378bf301b5011f973361779a3a74f8c93" -dependencies = [ - "rustls-pki-types", -] - -[[package]] -name = "webpki-roots" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2853738d1cc4f2da3a225c18ec6c3721abb31961096e9dbf5ab35fa88b19cfdb" +checksum = "8782dd5a41a24eed3a4f40b606249b3e236ca61adf1f25ea4d45c73de122b502" dependencies = [ "rustls-pki-types", ] @@ -6215,9 +5981,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.61.1" +version = "0.61.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5ee8f3d025738cb02bad7868bbb5f8a6327501e870bf51f1b455b0a2454a419" +checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" dependencies = [ "windows-collections", "windows-core", @@ -6237,25 +6003,26 @@ dependencies = [ [[package]] name = "windows-core" -version = "0.61.0" +version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4763c1de310c86d75a878046489e2e5ba02c649d185f21c67d4cf8a56d098980" +checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" dependencies = [ "windows-implement", "windows-interface", "windows-link", "windows-result", - "windows-strings 0.4.0", + "windows-strings", ] [[package]] name = "windows-future" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a1d6bbefcb7b60acd19828e1bc965da6fcf18a7e39490c5f8be71e54a19ba32" +checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" dependencies = [ "windows-core", "windows-link", + "windows-threading", ] [[package]] @@ -6266,7 +6033,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -6277,14 +6044,14 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "windows-link" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" [[package]] name = "windows-numerics" @@ -6298,38 +6065,29 @@ dependencies = [ [[package]] name = "windows-registry" -version = "0.4.0" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" +checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e" dependencies = [ + "windows-link", "windows-result", - "windows-strings 0.3.1", - "windows-targets 0.53.0", + "windows-strings", ] [[package]] name = "windows-result" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" -dependencies = [ - "windows-link", -] - -[[package]] -name = "windows-strings" -version = "0.3.1" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" dependencies = [ "windows-link", ] [[package]] name = "windows-strings" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2ba9642430ee452d5a7aa78d72907ebe8cfda358e8cb7918a2050581322f97" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" dependencies = [ "windows-link", ] @@ -6352,6 +6110,15 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.2", +] + [[package]] name = "windows-targets" version = "0.52.6" @@ -6370,9 +6137,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.53.0" +version = "0.53.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" +checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" dependencies = [ "windows_aarch64_gnullvm 0.53.0", "windows_aarch64_msvc 0.53.0", @@ -6384,6 +6151,15 @@ dependencies = [ "windows_x86_64_msvc 0.53.0", ] +[[package]] +name = "windows-threading" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6" +dependencies = [ + "windows-link", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" @@ -6482,9 +6258,9 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.7.9" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9fb597c990f03753e08d3c29efbfcf2019a003b4bf4ba19225c158e1549f0f3" +checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd" dependencies = [ "memchr", ] @@ -6495,20 +6271,14 @@ version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", ] -[[package]] -name = "write16" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" - [[package]] name = "writeable" -version = "0.5.5" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" [[package]] name = "wyz" @@ -6533,9 +6303,9 @@ dependencies = [ [[package]] name = "yaml-rust2" -version = "0.10.1" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "818913695e83ece1f8d2a1c52d54484b7b46d0f9c06beeb2649b9da50d9b512d" +checksum = "4ce2a4ff45552406d02501cea6c18d8a7e50228e7736a872951fe2fe75c91be7" dependencies = [ "arraydeque", "encoding_rs", @@ -6544,9 +6314,9 @@ dependencies = [ [[package]] name = "yoke" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" dependencies = [ "serde", "stable_deref_trait", @@ -6556,34 +6326,34 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.25" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.25" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -6603,7 +6373,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "synstructure", ] @@ -6624,14 +6394,25 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", +] + +[[package]] +name = "zerotrie" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", ] [[package]] name = "zerovec" -version = "0.10.4" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" dependencies = [ "yoke", "zerofrom", @@ -6640,11 +6421,11 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.10.3" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] diff --git a/Cargo.toml b/Cargo.toml index 32fe580..ef7b824 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,3 +1,16 @@ [workspace] -members = ["aa-types", "aa-core", "core", "executors", "server", "thirdweb-core", "twmq"] +members = [ + "aa-types", + "aa-core", + "core", + "executors", + "server", + "thirdweb-core", + "twmq", +] resolver = "2" + +[workspace.dependencies] +alloy = { version = "1.0.8" } +vault-types = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "pb/update-alloy" } +vault-sdk = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "pb/update-alloy" } diff --git a/aa-core/Cargo.toml b/aa-core/Cargo.toml index 8644cc7..ae93cb9 100644 --- a/aa-core/Cargo.toml +++ b/aa-core/Cargo.toml @@ -4,11 +4,11 @@ version = "0.1.0" edition = "2024" [dependencies] -alloy = { version = "1.0.8", features = ["serde"] } +alloy = { workspace = true, features = ["serde"] } tokio = "1.44.2" engine-aa-types = { path = "../aa-types" } engine-core = { path = "../core" } -vault-types = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "main" } -vault-sdk = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "main" } +vault-types = { workspace = true } +vault-sdk = { workspace = true } serde = "1.0.219" tracing = "0.1.41" diff --git a/aa-core/src/userop/builder.rs b/aa-core/src/userop/builder.rs index 355f883..65807a9 100644 --- a/aa-core/src/userop/builder.rs +++ b/aa-core/src/userop/builder.rs @@ -3,7 +3,6 @@ use std::sync::Arc; use alloy::{ hex, primitives::{Address, Bytes, U256}, - providers::Provider, rpc::types::{PackedUserOperation, UserOperation}, }; use engine_aa_types::VersionedUserOp; @@ -236,7 +235,7 @@ impl<'a, C: Chain> UserOpBuilderV0_7<'a, C> { // .estimate_eip1559_fees() // .await // .map_err(|err| err.to_engine_error(self.chain))?; - + // TODO: modularize this so only used with thirdweb paymaster let prices = self .chain diff --git a/core/Cargo.toml b/core/Cargo.toml index 9945f42..5427590 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -10,8 +10,8 @@ schemars = "0.8.22" serde = { version = "1.0.219", features = ["derive"] } serde_json = "1.0.140" thiserror = "2.0.12" -vault-types = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "main" } -vault-sdk = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "main" } +vault-types = { workspace = true } +vault-sdk = { workspace = true } tower = "0.5.2" tracing = "0.1.41" twmq = { version = "0.1.0", path = "../twmq" } diff --git a/core/src/chain.rs b/core/src/chain.rs index a38bb10..bba79f6 100644 --- a/core/src/chain.rs +++ b/core/src/chain.rs @@ -126,48 +126,73 @@ impl Chain for ThirdwebChain { impl ThirdwebChainConfig<'_> { pub fn to_chain(&self) -> Result { - let rpc_url = Url::parse(&format!( - "https://{chain_id}.{base_url}/{client_id}", - chain_id = self.chain_id, - base_url = self.rpc_base_url, - client_id = self.client_id, - )) - .map_err(|e| EngineError::RpcConfigError { - message: format!("Failed to parse RPC URL: {}", e), - })?; - - let bundler_url = Url::parse(&format!( - "https://{chain_id}.{base_url}/v2", - chain_id = self.chain_id, - base_url = self.bundler_base_url, - )) - .map_err(|e| EngineError::RpcConfigError { - message: format!("Failed to parse Bundler URL: {}", e), - })?; - - let paymaster_url = Url::parse(&format!( - "https://{chain_id}.{base_url}/v2", - chain_id = self.chain_id, - base_url = self.paymaster_base_url, - )) - .map_err(|e| EngineError::RpcConfigError { - message: format!("Failed to parse Paymaster URL: {}", e), - })?; + // Special handling for chain ID 31337 (local anvil) + let (rpc_url, bundler_url, paymaster_url) = if self.chain_id == 31337 { + // For local anvil, use localhost URLs + let local_rpc_url = "http://127.0.0.1:8545"; + let rpc_url = Url::parse(local_rpc_url).map_err(|e| EngineError::RpcConfigError { + message: format!("Failed to parse local anvil RPC URL: {}", e), + })?; + + // For bundler and paymaster, use the same local RPC URL + // since anvil doesn't have separate bundler/paymaster services + let bundler_url = rpc_url.clone(); + let paymaster_url = rpc_url.clone(); + + (rpc_url, bundler_url, paymaster_url) + } else { + // Standard URL construction for other chains + let rpc_url = Url::parse(&format!( + "https://{chain_id}.{base_url}/{client_id}", + chain_id = self.chain_id, + base_url = self.rpc_base_url, + client_id = self.client_id, + )) + .map_err(|e| EngineError::RpcConfigError { + message: format!("Failed to parse RPC URL: {}", e), + })?; + + let bundler_url = Url::parse(&format!( + "https://{chain_id}.{base_url}/v2", + chain_id = self.chain_id, + base_url = self.bundler_base_url, + )) + .map_err(|e| EngineError::RpcConfigError { + message: format!("Failed to parse Bundler URL: {}", e), + })?; + + let paymaster_url = Url::parse(&format!( + "https://{chain_id}.{base_url}/v2", + chain_id = self.chain_id, + base_url = self.paymaster_base_url, + )) + .map_err(|e| EngineError::RpcConfigError { + message: format!("Failed to parse Paymaster URL: {}", e), + })?; + + (rpc_url, bundler_url, paymaster_url) + }; let mut sensitive_headers = HeaderMap::new(); - sensitive_headers.insert( - "x-client-id", - HeaderValue::from_str(self.client_id).map_err(|e| EngineError::RpcConfigError { - message: format!("Unserialisable client-id used: {e}"), - })?, - ); - - sensitive_headers.insert( - "x-secret-key", - HeaderValue::from_str(self.secret_key).map_err(|e| EngineError::RpcConfigError { - message: format!("Unserialisable secret-key used: {e}"), - })?, - ); + + // Only add auth headers for non-local chains + if self.chain_id != 31337 { + sensitive_headers.insert( + "x-client-id", + HeaderValue::from_str(self.client_id).map_err(|e| EngineError::RpcConfigError { + message: format!("Unserialisable client-id used: {e}"), + })?, + ); + + sensitive_headers.insert( + "x-secret-key", + HeaderValue::from_str(self.secret_key).map_err(|e| { + EngineError::RpcConfigError { + message: format!("Unserialisable secret-key used: {e}"), + } + })?, + ); + } let reqwest_client = HttpClientBuilder::new() @@ -181,10 +206,19 @@ impl ThirdwebChainConfig<'_> { let paymaster_transport = transport_builder.default_transport(paymaster_url.clone()); let bundler_transport = transport_builder.default_transport(bundler_url.clone()); - let sensitive_bundler_transport = - transport_builder.with_headers(bundler_url.clone(), sensitive_headers.clone()); - let sensitive_paymaster_transport = - transport_builder.with_headers(paymaster_url.clone(), sensitive_headers); + let sensitive_bundler_transport = if self.chain_id == 31337 { + // For local anvil, use the same transport as non-sensitive + transport_builder.default_transport(bundler_url.clone()) + } else { + transport_builder.with_headers(bundler_url.clone(), sensitive_headers.clone()) + }; + + let sensitive_paymaster_transport = if self.chain_id == 31337 { + // For local anvil, use the same transport as non-sensitive + transport_builder.default_transport(paymaster_url.clone()) + } else { + transport_builder.with_headers(paymaster_url.clone(), sensitive_headers) + }; let paymaster_rpc_client = RpcClient::builder().transport(paymaster_transport, false); let bundler_rpc_client = RpcClient::builder().transport(bundler_transport, false); diff --git a/core/src/error.rs b/core/src/error.rs index 572f60c..d3ef448 100644 --- a/core/src/error.rs +++ b/core/src/error.rs @@ -56,8 +56,8 @@ pub enum RpcErrorKind { #[error("HTTP error {status}")] TransportHttpError { status: u16, body: String }, - #[error("Other transport error: {0}")] - OtherTransportError(String), + #[error("Other transport error: {message}")] + OtherTransportError { message: String }, } #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema, utoipa::ToSchema)] @@ -345,8 +345,12 @@ fn to_engine_rpc_error_kind(err: &AlloyRpcError) -> RpcError status: err.status, body: err.body.to_string(), }, - TransportErrorKind::Custom(err) => RpcErrorKind::OtherTransportError(err.to_string()), - _ => RpcErrorKind::OtherTransportError(err.to_string()), + TransportErrorKind::Custom(err) => RpcErrorKind::OtherTransportError { + message: err.to_string(), + }, + _ => RpcErrorKind::OtherTransportError { + message: err.to_string(), + }, }, } } diff --git a/core/src/execution_options/eoa.rs b/core/src/execution_options/eoa.rs new file mode 100644 index 0000000..3699ccc --- /dev/null +++ b/core/src/execution_options/eoa.rs @@ -0,0 +1,186 @@ +use crate::defs::AddressDef; +use alloy::eips::eip7702::SignedAuthorization; +use alloy::primitives::{Address, U256}; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +/// ### EOA Execution Options +/// This struct configures EOA (Externally Owned Account) direct execution. +/// +/// EOA execution sends transactions directly from an EOA address without +/// smart contract abstraction. This is the most basic form of transaction +/// execution and is suitable for simple transfers and contract interactions. +/// +/// ### Use Cases +/// - Direct ETH transfers +/// - Simple contract interactions +/// - Gas-efficient transactions +/// - When smart account features are not needed +/// +/// ### Features +/// - Direct transaction execution from EOA +/// - Automatic nonce management +/// - Gas price optimization +/// - Transaction confirmation tracking +/// - Retry and recovery mechanisms +/// - Support for EIP-1559, EIP-2930, and Legacy transactions +/// - Support for EIP-7702 delegated transactions +#[derive(Deserialize, Serialize, Debug, Clone, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct EoaExecutionOptions { + /// The EOA address to send transactions from + /// This account must have sufficient balance to pay for gas and transaction value + #[schemars(with = "AddressDef")] + #[schema(value_type = AddressDef)] + pub from: Address, + + /// The gas limit to use for the transaction + /// If not provided, the system will auto-detect the best gas limit + #[schemars(with = "Option")] + #[schema(value_type = Option)] + pub gas_limit: Option, + + // /// Maximum number of in-flight transactions for this EOA + // /// Controls how many transactions can be pending confirmation at once + // /// Defaults to 100 if not specified + // #[serde(default = "default_max_inflight")] + // pub max_inflight: u64, + + // /// Maximum number of recycled nonces to keep + // /// When transactions fail, their nonces are recycled for reuse + // /// Defaults to 50 if not specified + // #[serde(default = "default_max_recycled_nonces")] + // pub max_recycled_nonces: u64, + /// Transaction type-specific data for gas configuration + /// If not provided, the system will auto-detect the best transaction type + #[serde(flatten)] + pub transaction_type_data: Option, +} + +/// EOA Transaction type-specific data for different EIP standards +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, utoipa::ToSchema)] +#[serde(untagged)] +pub enum EoaTransactionTypeData { + /// EIP-7702 transaction with authorization list and EIP-1559 gas pricing + Eip7702(EoaSend7702JobData), + /// EIP-1559 transaction with priority fee and max fee per gas + Eip1559(EoaSend1559JobData), + /// Legacy transaction with simple gas price + Legacy(EoaSendLegacyJobData), +} + +/// EIP-7702 transaction configuration +/// Allows delegation of EOA to smart contract logic temporarily +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct EoaSend7702JobData { + /// List of signed authorizations for contract delegation + /// Each authorization allows the EOA to temporarily delegate to a smart contract + #[schemars(with = "Option>")] + #[schema(value_type = Option>)] + pub authorization_list: Option>, + + /// Maximum fee per gas willing to pay (in wei) + /// This is the total fee cap including base fee and priority fee + pub max_fee_per_gas: Option, + + /// Maximum priority fee per gas willing to pay (in wei) + /// This is the tip paid to validators for transaction inclusion + pub max_priority_fee_per_gas: Option, +} + +/// EIP-1559 transaction configuration +/// Uses base fee + priority fee model for more predictable gas pricing +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct EoaSend1559JobData { + /// Maximum fee per gas willing to pay (in wei) + /// This is the total fee cap including base fee and priority fee + pub max_fee_per_gas: Option, + + /// Maximum priority fee per gas willing to pay (in wei) + /// This is the tip paid to validators for transaction inclusion + pub max_priority_fee_per_gas: Option, +} + +/// Legacy transaction configuration +/// Uses simple gas price model (pre-EIP-1559) +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct EoaSendLegacyJobData { + /// Gas price willing to pay (in wei) + /// This is the total price per unit of gas for legacy transactions + pub gas_price: Option, +} + +/// EIP-7702 Authorization structure for OpenAPI schema +/// Represents an unsigned authorization that allows an EOA to delegate to a smart contract +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct AuthorizationSchema { + /// The chain ID of the authorization + /// Must match the chain where the transaction will be executed + #[schemars(with = "String")] + #[schema(value_type = String, example = "1")] + pub chain_id: U256, + + /// The smart contract address to delegate to + /// This contract will be able to execute logic on behalf of the EOA + #[schemars(with = "AddressDef")] + #[schema(value_type = AddressDef)] + pub address: Address, + + /// The nonce for the authorization + /// Must be the current nonce of the authorizing account + #[schema(example = 42)] + pub nonce: u64, +} + +/// EIP-7702 Signed Authorization structure for OpenAPI schema +/// Contains an authorization plus the cryptographic signature +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct SignedAuthorizationSchema { + /// The chain ID of the authorization + /// Must match the chain where the transaction will be executed + #[schemars(with = "String")] + #[schema(value_type = String, example = "1")] + pub chain_id: U256, + + /// The smart contract address to delegate to + /// This contract will be able to execute logic on behalf of the EOA + #[schemars(with = "AddressDef")] + #[schema(value_type = AddressDef)] + pub address: Address, + + /// The nonce for the authorization + /// Must be the current nonce of the authorizing account + #[schema(example = 42)] + pub nonce: u64, + + /// Signature parity value (0 or 1) + /// Used for ECDSA signature recovery + #[serde(rename = "yParity", alias = "v")] + #[schema(example = 0)] + pub y_parity: u8, + + /// Signature r value + /// First component of the ECDSA signature + #[schemars(with = "String")] + #[schema(value_type = String, example = "0x1234567890abcdef...")] + pub r: U256, + + /// Signature s value + /// Second component of the ECDSA signature + #[schemars(with = "String")] + #[schema(value_type = String, example = "0xfedcba0987654321...")] + pub s: U256, +} + +fn default_max_inflight() -> u64 { + 100 +} + +fn default_max_recycled_nonces() -> u64 { + 50 +} diff --git a/core/src/execution_options/mod.rs b/core/src/execution_options/mod.rs index 1f6ae5d..0ee2e18 100644 --- a/core/src/execution_options/mod.rs +++ b/core/src/execution_options/mod.rs @@ -7,6 +7,7 @@ use std::collections::HashMap; use crate::transaction::InnerTransaction; pub mod aa; pub mod auto; +pub mod eoa; pub mod eip7702; // Base execution options for all transactions @@ -37,6 +38,10 @@ pub enum SpecificExecutionOptions { #[schema(title = "ERC-4337 Execution Options")] ERC4337(aa::Erc4337ExecutionOptions), + #[serde(rename = "eoa")] + #[schema(title = "EOA Execution Options")] + EOA(eoa::EoaExecutionOptions), + #[schema(title = "EIP-7702 Execution Options")] EIP7702(eip7702::Eip7702ExecutionOptions), } @@ -122,6 +127,8 @@ pub struct QueuedTransactionsResponse { pub enum ExecutorType { #[serde(rename = "ERC4337")] Erc4337, + #[serde(rename = "EOA")] + Eoa, #[serde(rename = "EIP7702")] Eip7702, } @@ -131,6 +138,7 @@ impl ExecutionOptions { match &self.specific { SpecificExecutionOptions::ERC4337(_) => ExecutorType::Erc4337, SpecificExecutionOptions::Auto(_) => ExecutorType::Erc4337, + SpecificExecutionOptions::EOA(_) => ExecutorType::Eoa, SpecificExecutionOptions::EIP7702(_) => ExecutorType::Eip7702, } } diff --git a/core/src/signer.rs b/core/src/signer.rs index 92799f8..30c22c2 100644 --- a/core/src/signer.rs +++ b/core/src/signer.rs @@ -1,4 +1,5 @@ use alloy::{ + consensus::TypedTransaction, dyn_abi::TypedData, eips::eip7702::SignedAuthorization, hex::FromHex, @@ -179,6 +180,14 @@ pub trait AccountSigner { credentials: SigningCredential, ) -> impl std::future::Future> + Send; + /// Sign a transaction + fn sign_transaction( + &self, + options: Self::SigningOptions, + transaction: TypedTransaction, + credentials: SigningCredential, + ) -> impl std::future::Future> + Send; + /// Sign EIP-7702 authorization fn sign_authorization( &self, @@ -309,6 +318,43 @@ impl AccountSigner for EoaSigner { } } + async fn sign_transaction( + &self, + options: EoaSigningOptions, + transaction: TypedTransaction, + credentials: SigningCredential, + ) -> Result { + match credentials { + SigningCredential::Vault(auth_method) => { + let vault_result = self + .vault_client + .sign_transaction(auth_method.clone(), transaction, options.from) + .await + .map_err(|e| { + tracing::error!("Error signing transaction with EOA (Vault): {:?}", e); + e + })?; + + Ok(vault_result.signature) + } + SigningCredential::Iaw { + auth_token, + thirdweb_auth, + } => { + let iaw_result = self + .iaw_client + .sign_transaction(auth_token.clone(), thirdweb_auth.clone(), transaction) + .await + .map_err(|e| { + tracing::error!("Error signing transaction with EOA (IAW): {:?}", e); + EngineError::from(e) + })?; + + Ok(iaw_result.signature) + } + } + } + async fn sign_authorization( &self, options: EoaSigningOptions, @@ -323,7 +369,6 @@ impl AccountSigner for EoaSigner { address, nonce: nonce.to::(), }; - match credentials { SigningCredential::Vault(auth_method) => { let vault_result = self diff --git a/executors/Cargo.toml b/executors/Cargo.toml index 21efa2e..2148d9e 100644 --- a/executors/Cargo.toml +++ b/executors/Cargo.toml @@ -20,3 +20,5 @@ engine-aa-core = { version = "0.1.0", path = "../aa-core" } rand = "0.9.1" uuid = { version = "1.17.0", features = ["v4"] } chrono = "0.4.41" +tokio = { version = "1.45.0", features = ["full"] } +futures = "0.3.31" diff --git a/executors/src/eoa/error_classifier.rs b/executors/src/eoa/error_classifier.rs new file mode 100644 index 0000000..8dd1c80 --- /dev/null +++ b/executors/src/eoa/error_classifier.rs @@ -0,0 +1,314 @@ +use alloy::transports::{RpcError, TransportErrorKind}; +use engine_core::{ + chain::Chain, + error::{AlloyRpcErrorToEngineError, EngineError, RpcErrorKind}, +}; +use std::time::Duration; +use twmq::job::RequeuePosition; + +/// Domain-specific EOA execution errors mapped from RPC errors +#[derive(Debug, Clone)] +pub enum EoaExecutionError { + /// Nonce too low - transaction might already be in mempool + NonceTooLow { message: String }, + + /// Nonce too high - indicates nonce gap or desync + NonceTooHigh { message: String }, + + /// Transaction already known in mempool + AlreadyKnown { message: String }, + + /// Replacement transaction underpriced + ReplacementUnderpriced { message: String }, + + /// Insufficient funds for transaction + InsufficientFunds { message: String }, + + /// Gas-related error (limit, estimation, etc.) + GasError { message: String }, + + /// Transaction pool is full or has limits + PoolLimitExceeded { message: String }, + + /// Account does not exist or invalid + AccountError { message: String }, + + /// Network/connectivity issues - use existing handling + RpcError { + message: String, + inner_error: Option, + }, +} + +/// Recovery strategy for an EOA execution error +#[derive(Debug, Clone, PartialEq)] +pub struct RecoveryStrategy { + /// Should we queue confirmation job + pub queue_confirmation: bool, + /// Should we recycle the nonce + pub recycle_nonce: bool, + /// Should we trigger a resync + pub needs_resync: bool, + /// Is this error retryable + pub retryable: bool, + /// Retry delay if retryable + pub retry_delay: Option, +} + +/// Maps RPC errors to domain-specific EOA errors and determines recovery strategies +pub struct EoaErrorMapper; + +impl EoaErrorMapper { + /// Map an RPC error from transaction sending - only handle actionable errors + pub fn map_send_error( + error: &RpcError, + chain: &C, + ) -> Result { + match error { + RpcError::ErrorResp(error_payload) => Ok(Self::map_ethereum_error( + error_payload.code, + &error_payload.message, + )), + _ => { + // Use existing engine error handling for non-actionable errors + Err(error.to_engine_error(chain)) + } + } + } + + /// Map Ethereum-specific errors that we need to act on + fn map_ethereum_error(code: i64, message: &str) -> EoaExecutionError { + let msg_lower = message.to_lowercase(); + + match code { + -32000 => { + // Only handle the specific ethereum errors we care about + if msg_lower.contains("nonce too low") { + EoaExecutionError::NonceTooLow { + message: message.to_string(), + } + } else if msg_lower.contains("nonce too high") { + EoaExecutionError::NonceTooHigh { + message: message.to_string(), + } + } else if msg_lower.contains("already known") || msg_lower.contains("duplicate") { + EoaExecutionError::AlreadyKnown { + message: message.to_string(), + } + } else if msg_lower.contains("replacement") && msg_lower.contains("underpriced") { + EoaExecutionError::ReplacementUnderpriced { + message: message.to_string(), + } + } else if msg_lower.contains("insufficient funds") { + EoaExecutionError::InsufficientFunds { + message: message.to_string(), + } + } else if msg_lower.contains("gas") { + EoaExecutionError::GasError { + message: message.to_string(), + } + } else if msg_lower.contains("txpool") || msg_lower.contains("pool limit") { + EoaExecutionError::PoolLimitExceeded { + message: message.to_string(), + } + } else if msg_lower.contains("account") { + EoaExecutionError::AccountError { + message: message.to_string(), + } + } else { + // Not an actionable error - let engine error handle it + EoaExecutionError::RpcError { + message: message.to_string(), + inner_error: Some(EngineError::InternalError { + message: message.to_string(), + }), + } + } + } + _ => { + // Not an actionable error code + EoaExecutionError::RpcError { + message: format!("RPC error code {}: {}", code, message), + inner_error: Some(EngineError::InternalError { + message: message.to_string(), + }), + } + } + } + } + + /// Determine recovery strategy for an EOA execution error + pub fn get_recovery_strategy(error: &EoaExecutionError) -> RecoveryStrategy { + match error { + EoaExecutionError::NonceTooLow { .. } => RecoveryStrategy { + queue_confirmation: true, + recycle_nonce: false, + needs_resync: false, + retryable: false, + retry_delay: None, + }, + + EoaExecutionError::NonceTooHigh { .. } => RecoveryStrategy { + queue_confirmation: false, + recycle_nonce: true, + needs_resync: true, + retryable: true, + retry_delay: Some(Duration::from_secs(10)), + }, + + EoaExecutionError::AlreadyKnown { .. } => RecoveryStrategy { + queue_confirmation: true, + recycle_nonce: false, + needs_resync: false, + retryable: false, + retry_delay: None, + }, + + EoaExecutionError::ReplacementUnderpriced { .. } => RecoveryStrategy { + queue_confirmation: true, + recycle_nonce: false, + needs_resync: false, + retryable: true, + retry_delay: Some(Duration::from_secs(10)), + }, + + EoaExecutionError::InsufficientFunds { .. } => RecoveryStrategy { + queue_confirmation: false, + recycle_nonce: true, + needs_resync: false, + retryable: true, + retry_delay: Some(Duration::from_secs(60)), + }, + + EoaExecutionError::GasError { .. } => RecoveryStrategy { + queue_confirmation: false, + recycle_nonce: true, + needs_resync: false, + retryable: true, + retry_delay: Some(Duration::from_secs(30)), + }, + + EoaExecutionError::PoolLimitExceeded { .. } => RecoveryStrategy { + queue_confirmation: false, + recycle_nonce: true, + needs_resync: false, + retryable: true, + retry_delay: Some(Duration::from_secs(30)), + }, + + EoaExecutionError::AccountError { .. } => RecoveryStrategy { + queue_confirmation: false, + recycle_nonce: true, + needs_resync: false, + retryable: false, + retry_delay: None, + }, + + EoaExecutionError::RpcError { .. } => { + // This should not be used - let engine error handle it + RecoveryStrategy { + queue_confirmation: false, + recycle_nonce: false, + needs_resync: false, + retryable: false, + retry_delay: None, + } + } + } + } +} + +/// Helper for converting mapped errors and recovery strategies to job results +impl EoaExecutionError { + /// Get the message for this error + pub fn message(&self) -> &str { + match self { + EoaExecutionError::NonceTooLow { message } + | EoaExecutionError::NonceTooHigh { message } + | EoaExecutionError::AlreadyKnown { message } + | EoaExecutionError::ReplacementUnderpriced { message } + | EoaExecutionError::InsufficientFunds { message } + | EoaExecutionError::GasError { message } + | EoaExecutionError::PoolLimitExceeded { message } + | EoaExecutionError::AccountError { message } + | EoaExecutionError::RpcError { message, .. } => message, + } + } + + /// Convert to appropriate job result for send operations + pub fn to_send_job_result( + &self, + strategy: &RecoveryStrategy, + success_factory: impl FnOnce() -> T, + error_factory: impl FnOnce(String) -> E, + ) -> twmq::job::JobResult { + use twmq::job::{ToJobError, ToJobResult}; + + if strategy.queue_confirmation { + // Treat as success since we need to check confirmation + Ok(success_factory()) + } else if strategy.retryable { + if let Some(delay) = strategy.retry_delay { + Err(error_factory(self.message().to_string()) + .nack(Some(delay), RequeuePosition::Last)) + } else { + Err(error_factory(self.message().to_string()) + .nack(Some(Duration::from_secs(5)), RequeuePosition::Last)) + } + } else { + // Permanent failure + Err(error_factory(self.message().to_string()).fail()) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_nonce_too_low_mapping() { + let error = EoaErrorMapper::map_ethereum_error(-32000, "nonce too low"); + let strategy = EoaErrorMapper::get_recovery_strategy(&error); + + match error { + EoaExecutionError::NonceTooLow { .. } => {} + _ => panic!("Expected NonceTooLow error"), + } + + assert!(strategy.queue_confirmation); + assert!(!strategy.recycle_nonce); + } + + #[test] + fn test_insufficient_funds_mapping() { + let error = EoaErrorMapper::map_ethereum_error( + -32000, + "insufficient funds for gas * price + value", + ); + let strategy = EoaErrorMapper::get_recovery_strategy(&error); + + match error { + EoaExecutionError::InsufficientFunds { .. } => {} + _ => panic!("Expected InsufficientFunds error"), + } + + assert!(!strategy.queue_confirmation); + assert!(strategy.recycle_nonce); + assert!(strategy.retryable); + } + + #[test] + fn test_already_known_mapping() { + let error = EoaErrorMapper::map_ethereum_error(-32000, "already known"); + let strategy = EoaErrorMapper::get_recovery_strategy(&error); + + match error { + EoaExecutionError::AlreadyKnown { .. } => {} + _ => panic!("Expected AlreadyKnown error"), + } + + assert!(strategy.queue_confirmation); + assert!(!strategy.recycle_nonce); + } +} diff --git a/executors/src/eoa/mod.rs b/executors/src/eoa/mod.rs new file mode 100644 index 0000000..c7186f7 --- /dev/null +++ b/executors/src/eoa/mod.rs @@ -0,0 +1,6 @@ +pub mod error_classifier; +pub mod store; +pub mod worker; +pub use error_classifier::{EoaErrorMapper, EoaExecutionError, RecoveryStrategy}; +pub use store::{EoaExecutorStore, EoaTransactionRequest}; +pub use worker::{EoaExecutorWorker, EoaExecutorWorkerJobData}; diff --git a/executors/src/eoa/store.rs b/executors/src/eoa/store.rs new file mode 100644 index 0000000..d3206bf --- /dev/null +++ b/executors/src/eoa/store.rs @@ -0,0 +1,2156 @@ +use alloy::consensus::{Signed, Transaction, TypedTransaction}; +use alloy::network::AnyTransactionReceipt; +use alloy::primitives::{Address, B256, Bytes, U256}; +use chrono; +use engine_core::chain::RpcCredentials; +use engine_core::credentials::SigningCredential; +use engine_core::execution_options::WebhookOptions; +use engine_core::execution_options::eoa::EoaTransactionTypeData; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::future::Future; +use twmq::redis::{AsyncCommands, Pipeline, aio::ConnectionManager}; + +pub trait SafeRedisTransaction: Send + Sync { + fn name(&self) -> &str; + fn operation(&self, pipeline: &mut Pipeline); + fn validation( + &self, + conn: &mut ConnectionManager, + ) -> impl Future> + Send; + fn watch_keys(&self) -> Vec; +} + +struct MovePendingToBorrowedWithRecycledNonce { + recycled_key: String, + pending_key: String, + transaction_id: String, + borrowed_key: String, + nonce: u64, + prepared_tx_json: String, +} + +impl SafeRedisTransaction for MovePendingToBorrowedWithRecycledNonce { + fn name(&self) -> &str { + "pending->borrowed with recycled nonce" + } + + fn operation(&self, pipeline: &mut Pipeline) { + // Remove nonce from recycled set (we know it exists) + pipeline.zrem(&self.recycled_key, self.nonce); + // Remove transaction from pending (we know it exists) + pipeline.lrem(&self.pending_key, 0, &self.transaction_id); + // Store borrowed transaction + pipeline.hset( + &self.borrowed_key, + self.nonce.to_string(), + &self.prepared_tx_json, + ); + } + + fn watch_keys(&self) -> Vec { + vec![self.recycled_key.clone(), self.pending_key.clone()] + } + + async fn validation(&self, conn: &mut ConnectionManager) -> Result<(), TransactionStoreError> { + // Check if nonce exists in recycled set + let nonce_score: Option = conn.zscore(&self.recycled_key, self.nonce).await?; + if nonce_score.is_none() { + return Err(TransactionStoreError::NonceNotInRecycledSet { nonce: self.nonce }); + } + + // Check if transaction exists in pending + let pending_transactions: Vec = conn.lrange(&self.pending_key, 0, -1).await?; + if !pending_transactions.contains(&self.transaction_id) { + return Err(TransactionStoreError::TransactionNotInPendingQueue { + transaction_id: self.transaction_id.clone(), + }); + } + + Ok(()) + } +} + +struct MovePendingToBorrowedWithNewNonce { + optimistic_key: String, + pending_key: String, + nonce: u64, + prepared_tx_json: String, + transaction_id: String, + borrowed_key: String, + eoa: Address, + chain_id: u64, +} + +impl SafeRedisTransaction for MovePendingToBorrowedWithNewNonce { + fn name(&self) -> &str { + "pending->borrowed with new nonce" + } + + fn operation(&self, pipeline: &mut Pipeline) { + // Increment optimistic nonce + pipeline.incr(&self.optimistic_key, 1); + // Remove transaction from pending + pipeline.lrem(&self.pending_key, 0, &self.transaction_id); + // Store borrowed transaction + pipeline.hset( + &self.borrowed_key, + self.nonce.to_string(), + &self.prepared_tx_json, + ); + } + + fn watch_keys(&self) -> Vec { + vec![self.optimistic_key.clone(), self.pending_key.clone()] + } + + async fn validation(&self, conn: &mut ConnectionManager) -> Result<(), TransactionStoreError> { + // Check current optimistic nonce + let current_optimistic: Option = conn.get(&self.optimistic_key).await?; + let current_nonce = match current_optimistic { + Some(nonce) => nonce, + None => { + return Err(TransactionStoreError::NonceSyncRequired { + eoa: self.eoa, + chain_id: self.chain_id, + }); + } + }; + + if current_nonce != self.nonce { + return Err(TransactionStoreError::OptimisticNonceChanged { + expected: self.nonce, + actual: current_nonce, + }); + } + + // Check if transaction exists in pending + let pending_transactions: Vec = conn.lrange(&self.pending_key, 0, -1).await?; + if !pending_transactions.contains(&self.transaction_id) { + return Err(TransactionStoreError::TransactionNotInPendingQueue { + transaction_id: self.transaction_id.clone(), + }); + } + + Ok(()) + } +} + +struct MoveBorrowedToSubmitted { + nonce: u64, + hash: String, + transaction_id: String, + borrowed_key: String, + submitted_key: String, + hash_to_id_key: String, +} + +impl SafeRedisTransaction for MoveBorrowedToSubmitted { + fn name(&self) -> &str { + "borrowed->submitted" + } + + fn operation(&self, pipeline: &mut Pipeline) { + // Remove from borrowed (we know it exists) + pipeline.hdel(&self.borrowed_key, self.nonce.to_string()); + + // Add to submitted with hash:id format + let hash_id_value = format!("{}:{}", self.hash, self.transaction_id); + pipeline.zadd(&self.submitted_key, &hash_id_value, self.nonce); + + // Still maintain hash-to-ID mapping for backward compatibility and external lookups + pipeline.set(&self.hash_to_id_key, &self.transaction_id); + } + + fn watch_keys(&self) -> Vec { + vec![self.borrowed_key.clone()] + } + + async fn validation(&self, conn: &mut ConnectionManager) -> Result<(), TransactionStoreError> { + // Validate that borrowed transaction actually exists + let borrowed_tx: Option = conn + .hget(&self.borrowed_key, self.nonce.to_string()) + .await?; + if borrowed_tx.is_none() { + return Err(TransactionStoreError::TransactionNotInBorrowedState { + transaction_id: self.transaction_id.clone(), + nonce: self.nonce, + }); + } + Ok(()) + } +} + +struct MoveBorrowedToRecycled { + nonce: u64, + transaction_id: String, + borrowed_key: String, + recycled_key: String, + pending_key: String, +} + +impl SafeRedisTransaction for MoveBorrowedToRecycled { + fn name(&self) -> &str { + "borrowed->recycled" + } + + fn operation(&self, pipeline: &mut Pipeline) { + // Remove from borrowed (we know it exists) + pipeline.hdel(&self.borrowed_key, self.nonce.to_string()); + + // Add nonce to recycled set (with timestamp as score) + pipeline.zadd(&self.recycled_key, self.nonce, self.nonce); + + // Add transaction back to pending + pipeline.lpush(&self.pending_key, &self.transaction_id); + } + + fn watch_keys(&self) -> Vec { + vec![self.borrowed_key.clone()] + } + + async fn validation(&self, conn: &mut ConnectionManager) -> Result<(), TransactionStoreError> { + // Validate that borrowed transaction actually exists + let borrowed_tx: Option = conn + .hget(&self.borrowed_key, self.nonce.to_string()) + .await?; + if borrowed_tx.is_none() { + return Err(TransactionStoreError::TransactionNotInBorrowedState { + transaction_id: self.transaction_id.clone(), + nonce: self.nonce, + }); + } + Ok(()) + } +} + +/// The actual user request data +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct EoaTransactionRequest { + pub transaction_id: String, + pub chain_id: u64, + + pub from: Address, + pub to: Option
, + pub value: U256, + pub data: Bytes, + + #[serde(alias = "gas")] + pub gas_limit: Option, + + pub webhook_options: Option>, + + pub signing_credential: SigningCredential, + pub rpc_credentials: RpcCredentials, + + #[serde(flatten)] + pub transaction_type_data: Option, +} + +/// Active attempt for a transaction (full alloy transaction + metadata) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TransactionAttempt { + pub transaction_id: String, + pub details: Signed, + pub sent_at: u64, // Unix timestamp in milliseconds + pub attempt_number: u32, +} + +/// Transaction data for a transaction_id +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TransactionData { + pub transaction_id: String, + pub user_request: EoaTransactionRequest, + pub receipt: Option, + pub attempts: Vec, +} + +pub struct BorrowedTransaction { + pub transaction_id: String, + pub data: Signed, + pub borrowed_at: chrono::DateTime, +} + +/// Transaction store focused on transaction_id operations and nonce indexing +pub struct EoaExecutorStore { + pub redis: ConnectionManager, + pub namespace: Option, +} + +impl EoaExecutorStore { + pub fn new(redis: ConnectionManager, namespace: Option) -> Self { + Self { redis, namespace } + } + + /// Name of the key for the transaction data + /// + /// Transaction data is stored as a Redis HSET with the following fields: + /// - "user_request": JSON string containing EoaTransactionRequest + /// - "receipt": JSON string containing AnyTransactionReceipt (optional) + /// - "status": String status ("confirmed", "failed", etc.) + /// - "completed_at": String Unix timestamp (optional) + fn transaction_data_key_name(&self, transaction_id: &str) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:tx_data:{transaction_id}"), + None => format!("eoa_tx_data:{transaction_id}"), + } + } + + /// Name of the list for transaction attempts + /// + /// Attempts are stored as a separate Redis LIST where each element is a JSON blob + /// of a TransactionAttempt. This allows efficient append operations. + fn transaction_attempts_list_name(&self, transaction_id: &str) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:tx_attempts:{transaction_id}"), + None => format!("eoa_executor:tx_attempts:{transaction_id}"), + } + } + + /// Name of the list for pending transactions + fn pending_transactions_list_name(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:pending_txs:{chain_id}:{eoa}"), + None => format!("eoa_executor:pending_txs:{chain_id}:{eoa}"), + } + } + + /// Name of the zset for submitted transactions. nonce -> hash:id + /// Same transaction might appear multiple times in the zset with different nonces/gas prices (and thus different hashes) + fn submitted_transactions_zset_name(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:submitted_txs:{chain_id}:{eoa}"), + None => format!("eoa_executor:submitted_txs:{chain_id}:{eoa}"), + } + } + + /// Name of the key that maps transaction hash to transaction id + fn transaction_hash_to_id_key_name(&self, hash: &str) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:tx_hash_to_id:{hash}"), + None => format!("eoa_executor:tx_hash_to_id:{hash}"), + } + } + + /// Name of the hashmap that maps `transaction_id` -> `BorrowedTransactionData` + /// + /// This is used for crash recovery. Before submitting a transaction, we atomically move from pending to this borrowed hashmap. + /// + /// On worker recovery, if any borrowed transactions are found, we rebroadcast them and move back to pending or submitted + /// + /// If there's no crash, happy path moves borrowed transactions back to pending or submitted + fn borrowed_transactions_hashmap_name(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:borrowed_txs:{chain_id}:{eoa}"), + None => format!("eoa_executor:borrowed_txs:{chain_id}:{eoa}"), + } + } + + /// Name of the set that contains recycled nonces. + /// + /// If a transaction was submitted but failed (ie, we know with certainty it didn't enter the mempool), + /// + /// we add the nonce to this set. + /// + /// These nonces are used with priority, before any other nonces. + fn recycled_nonces_set_name(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:recycled_nonces:{chain_id}:{eoa}"), + None => format!("eoa_executor:recycled_nonces:{chain_id}:{eoa}"), + } + } + + /// Optimistic nonce key name. + /// + /// This is used for optimistic nonce tracking. + /// + /// We store the nonce of the last successfuly sent transaction for each EOA. + /// + /// We increment this nonce for each new transaction. + /// + /// !IMPORTANT! When sending a transaction, we use this nonce as the assigned nonce, NOT the incremented nonce. + fn optimistic_transaction_count_key_name(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:optimistic_nonce:{chain_id}:{eoa}"), + None => format!("eoa_executor:optimistic_nonce:{chain_id}:{eoa}"), + } + } + + /// Name of the key that contains the nonce of the last fetched ONCHAIN transaction count for each EOA. + /// + /// This is a cache for the actual transaction count, which is fetched from the RPC. + /// + /// The nonce for the NEXT transaction is the ONCHAIN transaction count (NOT + 1) + /// + /// Eg: transaction count is 0, so we use nonce 0 for sending the next transaction. Once successful, transaction count will be 1. + fn last_transaction_count_key_name(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:last_tx_nonce:{chain_id}:{eoa}"), + None => format!("eoa_executor:last_tx_nonce:{chain_id}:{eoa}"), + } + } + + /// EOA health key name. + /// + /// EOA health stores: + /// - cached balance, the timestamp of the last balance fetch + /// - timestamp of the last successful transaction confirmation + /// - timestamp of the last 5 nonce resets + fn eoa_health_key_name(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:health:{chain_id}:{eoa}"), + None => format!("eoa_executor:health:{chain_id}:{eoa}"), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct EoaHealth { + pub balance: U256, + /// Update the balance threshold when we see out of funds errors + pub balance_threshold: U256, + pub balance_fetched_at: u64, + pub last_confirmation_at: u64, + pub last_nonce_movement_at: u64, // Track when nonce last moved for gas bump detection + pub nonce_resets: Vec, // Last 5 reset timestamps +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BorrowedTransactionData { + pub transaction_id: String, + pub signed_transaction: Signed, + pub hash: String, + pub borrowed_at: u64, +} + +/// Type of nonce allocation for transaction processing +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum NonceType { + /// Nonce was recycled from a previously failed transaction + Recycled(u64), + /// Nonce was incremented from the current optimistic counter + Incremented(u64), +} + +impl NonceType { + /// Get the nonce value regardless of type + pub fn nonce(&self) -> u64 { + match self { + NonceType::Recycled(nonce) => *nonce, + NonceType::Incremented(nonce) => *nonce, + } + } + + /// Check if this is a recycled nonce + pub fn is_recycled(&self) -> bool { + matches!(self, NonceType::Recycled(_)) + } + + /// Check if this is an incremented nonce + pub fn is_incremented(&self) -> bool { + matches!(self, NonceType::Incremented(_)) + } +} + +impl EoaExecutorStore { + // ========== BOILERPLATE REDUCTION PATTERN ========== + // + // This implementation uses a helper method `execute_with_watch_and_retry` to reduce + // boilerplate in atomic Redis operations. The pattern separates: + // 1. Validation phase: async closure that checks preconditions + // 2. Pipeline phase: sync closure that builds Redis commands + // + // Benefits: + // - Eliminates ~80 lines of boilerplate per method + // - Centralizes retry logic, lock checking, and error handling + // - Makes individual methods focus on business logic + // - Reduces chance of bugs in WATCH/MULTI/EXEC handling + // + // See examples in: + // - atomic_move_pending_to_borrowed_with_recycled_nonce_v2() + // - atomic_move_pending_to_borrowed_with_new_nonce() + // - move_borrowed_to_submitted() + // - move_borrowed_to_recycled() + + /// Aggressively acquire EOA lock, forcefully taking over from stalled workers + pub async fn acquire_eoa_lock_aggressively( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + ) -> Result<(), TransactionStoreError> { + let lock_key = self.eoa_lock_key_name(eoa, chain_id); + let mut conn = self.redis.clone(); + + // First try normal acquisition + let acquired: bool = conn.set_nx(&lock_key, worker_id).await?; + if acquired { + return Ok(()); + } + // Lock exists, forcefully take it over + tracing::warn!( + eoa = %eoa, + chain_id = %chain_id, + worker_id = %worker_id, + "Forcefully taking over EOA lock from stalled worker" + ); + // Force set - no expiry, only released by explicit takeover + let _: () = conn.set(&lock_key, worker_id).await?; + Ok(()) + } + + /// Release EOA lock following the spec's finally pattern + pub async fn release_eoa_lock( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + ) -> Result<(), TransactionStoreError> { + // Use existing utility method that handles all the atomic lock checking + match self + .with_lock_check(eoa, chain_id, worker_id, |pipeline| { + let lock_key = self.eoa_lock_key_name(eoa, chain_id); + pipeline.del(&lock_key); + }) + .await + { + Ok(()) => { + tracing::debug!( + eoa = %eoa, + chain_id = %chain_id, + worker_id = %worker_id, + "Successfully released EOA lock" + ); + Ok(()) + } + Err(TransactionStoreError::LockLost { .. }) => { + // Lock was already taken over, which is fine for release + tracing::debug!( + eoa = %eoa, + chain_id = %chain_id, + worker_id = %worker_id, + "Lock already released or taken over by another worker" + ); + Ok(()) + } + Err(e) => { + // Other errors shouldn't fail the worker, just log + tracing::warn!( + eoa = %eoa, + chain_id = %chain_id, + worker_id = %worker_id, + error = %e, + "Failed to release EOA lock" + ); + Ok(()) + } + } + } + + /// Helper to execute atomic operations with proper retry logic and watch handling + /// + /// This helper centralizes all the boilerplate for WATCH/MULTI/EXEC operations: + /// - Retry logic with exponential backoff + /// - Lock ownership validation + /// - WATCH key management + /// - Error handling and UNWATCH cleanup + /// + /// ## Usage: + /// Implement the `SafeRedisTransaction` trait for your operation, then call this method. + /// The trait separates validation (async) from pipeline operations (sync) for clean patterns. + /// + /// ## Example: + /// ```rust + /// let safe_tx = MovePendingToBorrowedWithNewNonce { + /// nonce: expected_nonce, + /// prepared_tx_json, + /// transaction_id, + /// borrowed_key, + /// optimistic_key, + /// pending_key, + /// eoa, + /// chain_id, + /// }; + /// + /// self.execute_with_watch_and_retry(eoa, chain_id, worker_id, &safe_tx).await?; + /// ``` + /// + /// ## When to use this helper: + /// - Operations that implement `SafeRedisTransaction` trait + /// - Need atomic WATCH/MULTI/EXEC with retry logic + /// - Want centralized lock checking and error handling + /// + /// ## When NOT to use this helper: + /// - Simple operations that can use `with_lock_check` instead + /// - Operations that don't need WATCH on multiple keys + /// - Read-only operations that don't modify state + async fn execute_with_watch_and_retry( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + safe_tx: &impl SafeRedisTransaction, + ) -> Result<(), TransactionStoreError> { + let lock_key = self.eoa_lock_key_name(eoa, chain_id); + let mut conn = self.redis.clone(); + let mut retry_count = 0; + + loop { + if retry_count >= MAX_RETRIES { + return Err(TransactionStoreError::InternalError { + message: format!( + "Exceeded max retries ({}) for {} on {}:{}", + MAX_RETRIES, + safe_tx.name(), + eoa, + chain_id + ), + }); + } + + // Exponential backoff after first retry + if retry_count > 0 { + let delay_ms = RETRY_BASE_DELAY_MS * (1 << (retry_count - 1).min(6)); + tokio::time::sleep(tokio::time::Duration::from_millis(delay_ms)).await; + tracing::debug!( + retry_count = retry_count, + delay_ms = delay_ms, + eoa = %eoa, + chain_id = chain_id, + operation = safe_tx.name(), + "Retrying atomic operation" + ); + } + + // WATCH all specified keys including lock + let mut watch_cmd = twmq::redis::cmd("WATCH"); + watch_cmd.arg(&lock_key); + for key in safe_tx.watch_keys() { + watch_cmd.arg(key); + } + let _: () = watch_cmd.query_async(&mut conn).await?; + + // Check lock ownership + let current_owner: Option = conn.get(&lock_key).await?; + if current_owner.as_deref() != Some(worker_id) { + let _: () = twmq::redis::cmd("UNWATCH").query_async(&mut conn).await?; + return Err(TransactionStoreError::LockLost { + eoa, + chain_id, + worker_id: worker_id.to_string(), + }); + } + + // Execute validation + match safe_tx.validation(&mut conn).await { + Ok(()) => { + // Build and execute pipeline + let mut pipeline = twmq::redis::pipe(); + pipeline.atomic(); + safe_tx.operation(&mut pipeline); + + match pipeline + .query_async::>(&mut conn) + .await + { + Ok(_) => return Ok(()), // Success + Err(_) => { + // WATCH failed, check if it was our lock + let still_own_lock: Option = conn.get(&lock_key).await?; + if still_own_lock.as_deref() != Some(worker_id) { + return Err(TransactionStoreError::LockLost { + eoa, + chain_id, + worker_id: worker_id.to_string(), + }); + } + // State changed, retry + retry_count += 1; + continue; + } + } + } + Err(e) => { + // Validation failed, unwatch and return error + let _: () = twmq::redis::cmd("UNWATCH").query_async(&mut conn).await?; + return Err(e); + } + } + } + } + + /// Example of how to refactor a complex method using the helper to reduce boilerplate + /// This shows the pattern for atomic_move_pending_to_borrowed_with_recycled_nonce + pub async fn atomic_move_pending_to_borrowed_with_recycled_nonce( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + transaction_id: &str, + nonce: u64, + prepared_tx: &BorrowedTransactionData, + ) -> Result<(), TransactionStoreError> { + let safe_tx = MovePendingToBorrowedWithRecycledNonce { + recycled_key: self.recycled_nonces_set_name(eoa, chain_id), + pending_key: self.pending_transactions_list_name(eoa, chain_id), + transaction_id: transaction_id.to_string(), + borrowed_key: self.borrowed_transactions_hashmap_name(eoa, chain_id), + nonce, + prepared_tx_json: serde_json::to_string(prepared_tx)?, + }; + + self.execute_with_watch_and_retry(eoa, chain_id, worker_id, &safe_tx) + .await?; + + Ok(()) + } + + /// Atomically move specific transaction from pending to borrowed with new nonce allocation + pub async fn atomic_move_pending_to_borrowed_with_new_nonce( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + transaction_id: &str, + expected_nonce: u64, + prepared_tx: &BorrowedTransactionData, + ) -> Result<(), TransactionStoreError> { + let optimistic_key = self.optimistic_transaction_count_key_name(eoa, chain_id); + let borrowed_key = self.borrowed_transactions_hashmap_name(eoa, chain_id); + let pending_key = self.pending_transactions_list_name(eoa, chain_id); + let prepared_tx_json = serde_json::to_string(prepared_tx)?; + let transaction_id = transaction_id.to_string(); + + self.execute_with_watch_and_retry( + eoa, + chain_id, + worker_id, + &MovePendingToBorrowedWithNewNonce { + nonce: expected_nonce, + prepared_tx_json, + transaction_id, + borrowed_key, + optimistic_key, + pending_key, + eoa, + chain_id, + }, + ) + .await + } + + /// Generic helper that handles WATCH + retry logic for atomic operations + /// The operation closure receives a mutable connection and should: + /// 1. Perform any validation (return early errors if needed) + /// 2. Build and execute the pipeline + /// 3. Return the result + pub async fn with_atomic_operation( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + watch_keys: Vec, + operation_name: &str, + operation: F, + ) -> Result + where + F: Fn(&mut ConnectionManager) -> Fut, + Fut: std::future::Future>, + { + let lock_key = self.eoa_lock_key_name(eoa, chain_id); + let mut conn = self.redis.clone(); + let mut retry_count = 0; + + loop { + if retry_count >= MAX_RETRIES { + return Err(TransactionStoreError::InternalError { + message: format!( + "Exceeded max retries ({}) for {} on {}:{}", + MAX_RETRIES, operation_name, eoa, chain_id + ), + }); + } + + // Exponential backoff after first retry + if retry_count > 0 { + let delay_ms = RETRY_BASE_DELAY_MS * (1 << (retry_count - 1).min(6)); + tokio::time::sleep(tokio::time::Duration::from_millis(delay_ms)).await; + tracing::debug!( + retry_count = retry_count, + delay_ms = delay_ms, + eoa = %eoa, + chain_id = chain_id, + operation = operation_name, + "Retrying atomic operation" + ); + } + + // WATCH all specified keys (lock is always included) + let mut watch_cmd = twmq::redis::cmd("WATCH"); + watch_cmd.arg(&lock_key); + for key in &watch_keys { + watch_cmd.arg(key); + } + let _: () = watch_cmd.query_async(&mut conn).await?; + + // Check if we still own the lock + let current_owner: Option = conn.get(&lock_key).await?; + match current_owner { + Some(owner) if owner == worker_id => { + // We still own it, proceed + } + _ => { + // Lost ownership - immediately fail + let _: () = twmq::redis::cmd("UNWATCH").query_async(&mut conn).await?; + return Err(TransactionStoreError::LockLost { + eoa, + chain_id, + worker_id: worker_id.to_string(), + }); + } + } + + // Execute operation (includes validation and pipeline execution) + match operation(&mut conn).await { + Ok(result) => return Ok(result), + Err(TransactionStoreError::LockLost { .. }) => { + // Lock was lost during operation, propagate immediately + return Err(TransactionStoreError::LockLost { + eoa, + chain_id, + worker_id: worker_id.to_string(), + }); + } + Err(TransactionStoreError::WatchFailed) => { + // WATCH failed, check if it was our lock + let still_own_lock: Option = conn.get(&lock_key).await?; + if still_own_lock.as_deref() != Some(worker_id) { + return Err(TransactionStoreError::LockLost { + eoa, + chain_id, + worker_id: worker_id.to_string(), + }); + } + // Our lock is fine, retry + retry_count += 1; + continue; + } + Err(other_error) => { + // Other errors propagate immediately (validation failures, etc.) + let _: () = twmq::redis::cmd("UNWATCH").query_async(&mut conn).await?; + return Err(other_error); + } + } + } + } + + /// Wrapper that executes operations with lock validation using WATCH/MULTI/EXEC + pub async fn with_lock_check( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + operation: F, + ) -> Result + where + F: Fn(&mut Pipeline) -> R, + T: From, + { + let lock_key = self.eoa_lock_key_name(eoa, chain_id); + let mut conn = self.redis.clone(); + let mut retry_count = 0; + + loop { + if retry_count >= MAX_RETRIES { + return Err(TransactionStoreError::InternalError { + message: format!( + "Exceeded max retries ({}) for lock check on {}:{}", + MAX_RETRIES, eoa, chain_id + ), + }); + } + + // Exponential backoff after first retry + if retry_count > 0 { + let delay_ms = RETRY_BASE_DELAY_MS * (1 << (retry_count - 1).min(6)); + tokio::time::sleep(tokio::time::Duration::from_millis(delay_ms)).await; + tracing::debug!( + retry_count = retry_count, + delay_ms = delay_ms, + eoa = %eoa, + chain_id = chain_id, + "Retrying lock check operation" + ); + } + + // WATCH the EOA lock + let _: () = twmq::redis::cmd("WATCH") + .arg(&lock_key) + .query_async(&mut conn) + .await?; + + // Check if we still own the lock + let current_owner: Option = conn.get(&lock_key).await?; + match current_owner { + Some(owner) if owner == worker_id => { + // We still own it, proceed + } + _ => { + // Lost ownership - immediately fail + let _: () = twmq::redis::cmd("UNWATCH").query_async(&mut conn).await?; + return Err(TransactionStoreError::LockLost { + eoa, + chain_id, + worker_id: worker_id.to_string(), + }); + } + } + + // Build pipeline with operation + let mut pipeline = twmq::redis::pipe(); + pipeline.atomic(); + let result = operation(&mut pipeline); + + // Execute with WATCH protection + match pipeline + .query_async::>(&mut conn) + .await + { + Ok(_) => return Ok(T::from(result)), + Err(_) => { + // WATCH failed, check if it was our lock or someone else's + let still_own_lock: Option = conn.get(&lock_key).await?; + if still_own_lock.as_deref() != Some(worker_id) { + return Err(TransactionStoreError::LockLost { + eoa, + chain_id, + worker_id: worker_id.to_string(), + }); + } + // Our lock is fine, someone else's WATCH failed - retry + retry_count += 1; + continue; + } + } + } + } + + // ========== ATOMIC OPERATIONS ========== + + /// Peek all borrowed transactions without removing them + pub async fn peek_borrowed_transactions( + &self, + eoa: Address, + chain_id: u64, + ) -> Result, TransactionStoreError> { + let borrowed_key = self.borrowed_transactions_hashmap_name(eoa, chain_id); + let mut conn = self.redis.clone(); + + let borrowed_map: HashMap = conn.hgetall(&borrowed_key).await?; + let mut result = Vec::new(); + + for (_nonce_str, transaction_json) in borrowed_map { + let borrowed_data: BorrowedTransactionData = serde_json::from_str(&transaction_json)?; + result.push(borrowed_data); + } + + Ok(result) + } + + /// Atomically move borrowed transaction to submitted state + /// Returns error if transaction not found in borrowed state + pub async fn move_borrowed_to_submitted( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + nonce: u64, + hash: &str, + transaction_id: &str, + ) -> Result<(), TransactionStoreError> { + let borrowed_key = self.borrowed_transactions_hashmap_name(eoa, chain_id); + let submitted_key = self.submitted_transactions_zset_name(eoa, chain_id); + let hash_to_id_key = self.transaction_hash_to_id_key_name(hash); + let hash = hash.to_string(); + let transaction_id = transaction_id.to_string(); + + self.execute_with_watch_and_retry( + eoa, + chain_id, + worker_id, + &MoveBorrowedToSubmitted { + nonce, + hash: hash.to_string(), + transaction_id, + borrowed_key, + submitted_key, + hash_to_id_key, + }, + ) + .await + } + + /// Atomically move borrowed transaction back to recycled nonces and pending queue + /// Returns error if transaction not found in borrowed state + pub async fn move_borrowed_to_recycled( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + nonce: u64, + transaction_id: &str, + ) -> Result<(), TransactionStoreError> { + let borrowed_key = self.borrowed_transactions_hashmap_name(eoa, chain_id); + let recycled_key = self.recycled_nonces_set_name(eoa, chain_id); + let pending_key = self.pending_transactions_list_name(eoa, chain_id); + let transaction_id = transaction_id.to_string(); + + self.execute_with_watch_and_retry( + eoa, + chain_id, + worker_id, + &MoveBorrowedToRecycled { + nonce, + transaction_id, + borrowed_key, + recycled_key, + pending_key, + }, + ) + .await + } + + /// Get all hashes below a certain nonce from submitted transactions + /// Returns (nonce, hash, transaction_id) tuples + pub async fn get_hashes_below_nonce( + &self, + eoa: Address, + chain_id: u64, + below_nonce: u64, + ) -> Result, TransactionStoreError> { + let submitted_key = self.submitted_transactions_zset_name(eoa, chain_id); + let mut conn = self.redis.clone(); + + // Get all entries with nonce < below_nonce + let results: Vec<(String, u64)> = conn + .zrangebyscore_withscores(&submitted_key, 0, below_nonce - 1) + .await?; + + let mut parsed_results = Vec::new(); + for (hash_id_value, nonce) in results { + // Parse hash:id format + if let Some((hash, transaction_id)) = hash_id_value.split_once(':') { + parsed_results.push((nonce, hash.to_string(), transaction_id.to_string())); + } else { + // Fallback for old format (just hash) - look up transaction ID + if let Some(transaction_id) = + self.get_transaction_id_for_hash(&hash_id_value).await? + { + parsed_results.push((nonce, hash_id_value, transaction_id)); + } + } + } + + Ok(parsed_results) + } + + /// Get all transaction IDs for a specific nonce + pub async fn get_transaction_ids_for_nonce( + &self, + eoa: Address, + chain_id: u64, + nonce: u64, + ) -> Result, TransactionStoreError> { + let submitted_key = self.submitted_transactions_zset_name(eoa, chain_id); + let mut conn = self.redis.clone(); + + // Get all members with the exact nonce + let members: Vec = conn + .zrangebyscore(&submitted_key, nonce, nonce) + .await + .map_err(|e| TransactionStoreError::RedisError { + message: format!("Failed to get transaction IDs for nonce {}: {}", nonce, e), + })?; + + let mut transaction_ids = Vec::new(); + for value in members { + // Parse the value as hash:id format, with fallback to old format + if let Some((_, transaction_id)) = value.split_once(':') { + // New format: hash:id + transaction_ids.push(transaction_id.to_string()); + } else { + // Old format: just hash - look up transaction ID + if let Some(transaction_id) = self.get_transaction_id_for_hash(&value).await? { + transaction_ids.push(transaction_id); + } + } + } + + Ok(transaction_ids) + } + + /// Remove all hashes for a transaction and requeue it + /// Returns error if no hashes found for this transaction in submitted state + /// NOTE: This method keeps the original boilerplate pattern because it needs to pass + /// complex data (transaction_hashes) from validation to pipeline phase. + /// The helper pattern works best for simple validation that doesn't need to pass data. + pub async fn fail_and_requeue_transaction( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + transaction_id: &str, + ) -> Result<(), TransactionStoreError> { + let submitted_key = self.submitted_transactions_zset_name(eoa, chain_id); + let pending_key = self.pending_transactions_list_name(eoa, chain_id); + let lock_key = self.eoa_lock_key_name(eoa, chain_id); + let mut conn = self.redis.clone(); + let mut retry_count = 0; + + loop { + if retry_count >= MAX_RETRIES { + return Err(TransactionStoreError::InternalError { + message: format!( + "Exceeded max retries ({}) for fail and requeue transaction {}:{} tx:{}", + MAX_RETRIES, eoa, chain_id, transaction_id + ), + }); + } + + if retry_count > 0 { + let delay_ms = RETRY_BASE_DELAY_MS * (1 << (retry_count - 1).min(6)); + tokio::time::sleep(tokio::time::Duration::from_millis(delay_ms)).await; + } + + // WATCH lock and submitted state + let _: () = twmq::redis::cmd("WATCH") + .arg(&lock_key) + .arg(&submitted_key) + .query_async(&mut conn) + .await?; + + // Check lock ownership + let current_owner: Option = conn.get(&lock_key).await?; + if current_owner.as_deref() != Some(worker_id) { + let _: () = twmq::redis::cmd("UNWATCH").query_async(&mut conn).await?; + return Err(TransactionStoreError::LockLost { + eoa, + chain_id, + worker_id: worker_id.to_string(), + }); + } + + // Find all hashes for this transaction that actually exist in submitted + let all_hash_id_values: Vec = conn.zrange(&submitted_key, 0, -1).await?; + let mut transaction_hashes = Vec::new(); + + for hash_id_value in all_hash_id_values { + // Parse hash:id format + if let Some((hash, tx_id)) = hash_id_value.split_once(':') { + if tx_id == transaction_id { + transaction_hashes.push(hash.to_string()); + } + } else { + // Fallback for old format (just hash) - look up transaction ID + if let Some(tx_id) = self.get_transaction_id_for_hash(&hash_id_value).await? { + if tx_id == transaction_id { + transaction_hashes.push(hash_id_value); + } + } + } + } + + if transaction_hashes.is_empty() { + let _: () = twmq::redis::cmd("UNWATCH").query_async(&mut conn).await?; + return Err(TransactionStoreError::TransactionNotInSubmittedState { + transaction_id: transaction_id.to_string(), + }); + } + + // Transaction has hashes in submitted, proceed with atomic removal and requeue + let mut pipeline = twmq::redis::pipe(); + pipeline.atomic(); + + // Remove all hash:id values for this transaction (we know they exist) + for hash in &transaction_hashes { + // Remove the hash:id value from the zset + let hash_id_value = format!("{}:{}", hash, transaction_id); + pipeline.zrem(&submitted_key, &hash_id_value); + + // Also remove the separate hash-to-ID mapping for backward compatibility + let hash_to_id_key = self.transaction_hash_to_id_key_name(hash); + pipeline.del(&hash_to_id_key); + } + + // Add back to pending + pipeline.lpush(&pending_key, transaction_id); + + match pipeline + .query_async::>(&mut conn) + .await + { + Ok(_) => return Ok(()), // Success + Err(_) => { + // WATCH failed, check if it was our lock + let still_own_lock: Option = conn.get(&lock_key).await?; + if still_own_lock.as_deref() != Some(worker_id) { + return Err(TransactionStoreError::LockLost { + eoa, + chain_id, + worker_id: worker_id.to_string(), + }); + } + // Submitted state changed, retry + retry_count += 1; + continue; + } + } + } + } + + /// Check EOA health (balance, etc.) + pub async fn check_eoa_health( + &self, + eoa: Address, + chain_id: u64, + ) -> Result, TransactionStoreError> { + let health_key = self.eoa_health_key_name(eoa, chain_id); + let mut conn = self.redis.clone(); + + let health_json: Option = conn.get(&health_key).await?; + if let Some(json) = health_json { + let health: EoaHealth = serde_json::from_str(&json)?; + Ok(Some(health)) + } else { + Ok(None) + } + } + + /// Update EOA health data + pub async fn update_health_data( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + health: &EoaHealth, + ) -> Result<(), TransactionStoreError> { + let health_json = serde_json::to_string(health)?; + self.with_lock_check(eoa, chain_id, worker_id, |pipeline| { + let health_key = self.eoa_health_key_name(eoa, chain_id); + pipeline.set(&health_key, &health_json); + }) + .await + } + + /// Update cached transaction count + pub async fn update_cached_transaction_count( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + transaction_count: u64, + ) -> Result<(), TransactionStoreError> { + self.with_lock_check(eoa, chain_id, worker_id, |pipeline| { + let tx_count_key = self.last_transaction_count_key_name(eoa, chain_id); + pipeline.set(&tx_count_key, transaction_count); + }) + .await + } + + /// Peek recycled nonces without removing them + pub async fn peek_recycled_nonces( + &self, + eoa: Address, + chain_id: u64, + ) -> Result, TransactionStoreError> { + let recycled_key = self.recycled_nonces_set_name(eoa, chain_id); + let mut conn = self.redis.clone(); + + let nonces: Vec = conn.zrange(&recycled_key, 0, -1).await?; + Ok(nonces) + } + + /// Peek at pending transactions without removing them (safe for planning) + pub async fn peek_pending_transactions( + &self, + eoa: Address, + chain_id: u64, + limit: u64, + ) -> Result, TransactionStoreError> { + let pending_key = self.pending_transactions_list_name(eoa, chain_id); + let mut conn = self.redis.clone(); + + // Use LRANGE to peek without removing + let transaction_ids: Vec = + conn.lrange(&pending_key, 0, (limit as isize) - 1).await?; + Ok(transaction_ids) + } + + /// Get inflight budget (how many new transactions can be sent) + pub async fn get_inflight_budget( + &self, + eoa: Address, + chain_id: u64, + max_inflight: u64, + ) -> Result { + let optimistic_key = self.optimistic_transaction_count_key_name(eoa, chain_id); + let last_tx_count_key = self.last_transaction_count_key_name(eoa, chain_id); + let mut conn = self.redis.clone(); + + // Read both values atomically to avoid race conditions + let (optimistic_nonce, last_tx_count): (Option, Option) = twmq::redis::pipe() + .get(&optimistic_key) + .get(&last_tx_count_key) + .query_async(&mut conn) + .await?; + + let optimistic = match optimistic_nonce { + Some(nonce) => nonce, + None => return Err(TransactionStoreError::NonceSyncRequired { eoa, chain_id }), + }; + let last_count = match last_tx_count { + Some(count) => count, + None => return Err(TransactionStoreError::NonceSyncRequired { eoa, chain_id }), + }; + + let current_inflight = optimistic.saturating_sub(last_count); + let available_budget = max_inflight.saturating_sub(current_inflight); + + Ok(available_budget) + } + + /// Get current optimistic nonce (without incrementing) + pub async fn get_optimistic_nonce( + &self, + eoa: Address, + chain_id: u64, + ) -> Result { + let optimistic_key = self.optimistic_transaction_count_key_name(eoa, chain_id); + let mut conn = self.redis.clone(); + + let current: Option = conn.get(&optimistic_key).await?; + match current { + Some(nonce) => Ok(nonce), + None => Err(TransactionStoreError::NonceSyncRequired { eoa, chain_id }), + } + } + + /// Lock key name for EOA processing + fn eoa_lock_key_name(&self, eoa: Address, chain_id: u64) -> String { + match &self.namespace { + Some(ns) => format!("{ns}:eoa_executor:lock:{chain_id}:{eoa}"), + None => format!("eoa_executor:lock:{chain_id}:{eoa}"), + } + } + + /// Get transaction ID for a given hash + pub async fn get_transaction_id_for_hash( + &self, + hash: &str, + ) -> Result, TransactionStoreError> { + let hash_to_id_key = self.transaction_hash_to_id_key_name(hash); + let mut conn = self.redis.clone(); + + let transaction_id: Option = conn.get(&hash_to_id_key).await?; + Ok(transaction_id) + } + + /// Get transaction data by transaction ID + pub async fn get_transaction_data( + &self, + transaction_id: &str, + ) -> Result, TransactionStoreError> { + let tx_data_key = self.transaction_data_key_name(transaction_id); + let mut conn = self.redis.clone(); + + // Get the hash data (the transaction data is stored as a hash) + let hash_data: HashMap = conn.hgetall(&tx_data_key).await?; + + if hash_data.is_empty() { + return Ok(None); + } + + // Extract user_request from the hash data + let user_request_json = hash_data.get("user_request").ok_or_else(|| { + TransactionStoreError::TransactionNotFound { + transaction_id: transaction_id.to_string(), + } + })?; + + let user_request: EoaTransactionRequest = serde_json::from_str(user_request_json)?; + + // Extract receipt if present + let receipt = hash_data + .get("receipt") + .and_then(|receipt_str| serde_json::from_str(receipt_str).ok()); + + // Extract attempts from separate list + let attempts_key = self.transaction_attempts_list_name(transaction_id); + let attempts_json_list: Vec = conn.lrange(&attempts_key, 0, -1).await?; + let mut attempts = Vec::new(); + for attempt_json in attempts_json_list { + if let Ok(attempt) = serde_json::from_str::(&attempt_json) { + attempts.push(attempt); + } + } + + Ok(Some(TransactionData { + transaction_id: transaction_id.to_string(), + user_request, + receipt, + attempts, + })) + } + + /// Mark transaction as successful and remove from submitted + pub async fn succeed_transaction( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + transaction_id: &str, + hash: &str, + receipt: &str, + ) -> Result<(), TransactionStoreError> { + self.with_lock_check(eoa, chain_id, worker_id, |pipeline| { + let submitted_key = self.submitted_transactions_zset_name(eoa, chain_id); + let hash_to_id_key = self.transaction_hash_to_id_key_name(hash); + let tx_data_key = self.transaction_data_key_name(transaction_id); + let now = chrono::Utc::now().timestamp_millis().max(0) as u64; + + // Remove this hash:id from submitted + let hash_id_value = format!("{}:{}", hash, transaction_id); + pipeline.zrem(&submitted_key, &hash_id_value); + + // Remove hash mapping + pipeline.del(&hash_to_id_key); + + // Update transaction data with success + pipeline.hset(&tx_data_key, "completed_at", now); + pipeline.hset(&tx_data_key, "receipt", receipt); + pipeline.hset(&tx_data_key, "status", "confirmed"); + }) + .await + } + + /// Add a gas bump attempt (new hash) to submitted transactions + pub async fn add_gas_bump_attempt( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + transaction_id: &str, + signed_transaction: Signed, + ) -> Result<(), TransactionStoreError> { + let new_hash = signed_transaction.hash().to_string(); + let nonce = signed_transaction.nonce(); + + // Create new attempt + let new_attempt = TransactionAttempt { + transaction_id: transaction_id.to_string(), + details: signed_transaction, + sent_at: chrono::Utc::now().timestamp_millis().max(0) as u64, + attempt_number: 0, // Will be set correctly when reading all attempts + }; + + // Serialize the new attempt + let attempt_json = serde_json::to_string(&new_attempt)?; + + // Get key names + let attempts_list_key = self.transaction_attempts_list_name(transaction_id); + let submitted_key = self.submitted_transactions_zset_name(eoa, chain_id); + let hash_to_id_key = self.transaction_hash_to_id_key_name(&new_hash); + let hash_id_value = format!("{}:{}", new_hash, transaction_id); + + // Now perform the atomic update + self.with_lock_check(eoa, chain_id, worker_id, |pipeline| { + // Add new hash:id to submitted (keeping old ones) + pipeline.zadd(&submitted_key, &hash_id_value, nonce); + + // Still maintain separate hash-to-ID mapping for backward compatibility + pipeline.set(&hash_to_id_key, transaction_id); + + // Simply push the new attempt to the attempts list + pipeline.lpush(&attempts_list_key, &attempt_json); + }) + .await + } + + /// Efficiently batch fail and requeue multiple transactions + /// This avoids hash-to-ID lookups since we already have both pieces of information + pub async fn batch_fail_and_requeue_transactions( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + failures: Vec, + ) -> Result<(), TransactionStoreError> { + if failures.is_empty() { + return Ok(()); + } + + self.with_lock_check(eoa, chain_id, worker_id, |pipeline| { + let submitted_key = self.submitted_transactions_zset_name(eoa, chain_id); + let pending_key = self.pending_transactions_list_name(eoa, chain_id); + + // Remove all hash:id values from submitted + for failure in &failures { + let hash_id_value = format!("{}:{}", failure.hash, failure.transaction_id); + pipeline.zrem(&submitted_key, &hash_id_value); + + // Remove separate hash-to-ID mapping + let hash_to_id_key = self.transaction_hash_to_id_key_name(&failure.hash); + pipeline.del(&hash_to_id_key); + } + + // Add unique transaction IDs back to pending (avoid duplicates) + let mut unique_tx_ids = std::collections::HashSet::new(); + for failure in &failures { + unique_tx_ids.insert(&failure.transaction_id); + } + + for transaction_id in unique_tx_ids { + pipeline.lpush(&pending_key, transaction_id); + } + }) + .await + } + + /// Efficiently batch succeed multiple transactions + /// This avoids hash-to-ID lookups since we already have both pieces of information + pub async fn batch_succeed_transactions( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + successes: Vec, + ) -> Result<(), TransactionStoreError> { + if successes.is_empty() { + return Ok(()); + } + + self.with_lock_check(eoa, chain_id, worker_id, |pipeline| { + let submitted_key = self.submitted_transactions_zset_name(eoa, chain_id); + let now = chrono::Utc::now().timestamp_millis().max(0) as u64; + + for success in &successes { + // Remove hash:id from submitted + let hash_id_value = format!("{}:{}", success.hash, success.transaction_id); + pipeline.zrem(&submitted_key, &hash_id_value); + + // Remove separate hash-to-ID mapping + let hash_to_id_key = self.transaction_hash_to_id_key_name(&success.hash); + pipeline.del(&hash_to_id_key); + + // Update transaction data with success (following existing Redis hash pattern) + let tx_data_key = self.transaction_data_key_name(&success.transaction_id); + pipeline.hset(&tx_data_key, "completed_at", now); + pipeline.hset(&tx_data_key, "receipt", &success.receipt_data); + pipeline.hset(&tx_data_key, "status", "confirmed"); + } + }) + .await + } + + // ========== SEND FLOW ========== + + /// Get cached transaction count + pub async fn get_cached_transaction_count( + &self, + eoa: Address, + chain_id: u64, + ) -> Result { + let tx_count_key = self.last_transaction_count_key_name(eoa, chain_id); + let mut conn = self.redis.clone(); + + let count: Option = conn.get(&tx_count_key).await?; + match count { + Some(count) => Ok(count), + None => Err(TransactionStoreError::NonceSyncRequired { eoa, chain_id }), + } + } + + /// Peek next available nonce (recycled or new) + pub async fn peek_next_available_nonce( + &self, + eoa: Address, + chain_id: u64, + ) -> Result { + // Check recycled nonces first + let recycled = self.peek_recycled_nonces(eoa, chain_id).await?; + if !recycled.is_empty() { + return Ok(NonceType::Recycled(recycled[0])); + } + + // Get next optimistic nonce + let optimistic_key = self.optimistic_transaction_count_key_name(eoa, chain_id); + let mut conn = self.redis.clone(); + let current_optimistic: Option = conn.get(&optimistic_key).await?; + + match current_optimistic { + Some(nonce) => Ok(NonceType::Incremented(nonce)), + None => Err(TransactionStoreError::NonceSyncRequired { eoa, chain_id }), + } + } + + /// Synchronize nonces with the chain + /// + /// Part of standard nonce management flow, called in the confirm stage when chain nonce advances, and we need to update our cached nonce + pub async fn synchronize_nonces_with_chain( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + current_chain_tx_count: u64, + ) -> Result<(), TransactionStoreError> { + let now = chrono::Utc::now().timestamp_millis().max(0) as u64; + + // First, read current health data + let current_health = self.check_eoa_health(eoa, chain_id).await?; + + // Prepare health update if health data exists + let health_update = if let Some(mut health) = current_health { + health.last_nonce_movement_at = now; + health.last_confirmation_at = now; + Some(serde_json::to_string(&health)?) + } else { + None + }; + + self.with_lock_check(eoa, chain_id, worker_id, |pipeline| { + let tx_count_key = self.last_transaction_count_key_name(eoa, chain_id); + + // Update cached transaction count + pipeline.set(&tx_count_key, current_chain_tx_count); + + // Update health data only if it exists + if let Some(ref health_json) = health_update { + let health_key = self.eoa_health_key_name(eoa, chain_id); + pipeline.set(&health_key, health_json); + } + }) + .await + } + + /// Reset nonces to specified value + /// + /// This is called when we have too many recycled nonces and detect something wrong + /// We want to start fresh, with the chain nonce as the new optimistic nonce + pub async fn reset_nonces( + &self, + eoa: Address, + chain_id: u64, + worker_id: &str, + current_chain_tx_count: u64, + ) -> Result<(), TransactionStoreError> { + let now = chrono::Utc::now().timestamp_millis().max(0) as u64; + + let current_health = self.check_eoa_health(eoa, chain_id).await?; + + // Prepare health update if health data exists + let health_update = if let Some(mut health) = current_health { + health.nonce_resets.push(now); + Some(serde_json::to_string(&health)?) + } else { + None + }; + + self.with_lock_check(eoa, chain_id, worker_id, |pipeline| { + let optimistic_key = self.optimistic_transaction_count_key_name(eoa, chain_id); + let cached_nonce_key = self.last_transaction_count_key_name(eoa, chain_id); + let recycled_key = self.recycled_nonces_set_name(eoa, chain_id); + + // Update health data only if it exists + if let Some(ref health_json) = health_update { + let health_key = self.eoa_health_key_name(eoa, chain_id); + pipeline.set(&health_key, health_json); + } + + // Reset the optimistic nonce + pipeline.set(&optimistic_key, current_chain_tx_count); + + // Reset the cached nonce + pipeline.set(&cached_nonce_key, current_chain_tx_count); + + // Reset the recycled nonces + pipeline.del(recycled_key); + }) + .await + } + + /// Add a transaction to the pending queue and store its data + /// This is called when a new transaction request comes in for an EOA + pub async fn add_transaction( + &self, + transaction_request: EoaTransactionRequest, + ) -> Result<(), TransactionStoreError> { + let transaction_id = &transaction_request.transaction_id; + let eoa = transaction_request.from; + let chain_id = transaction_request.chain_id; + + let tx_data_key = self.transaction_data_key_name(transaction_id); + let pending_key = self.pending_transactions_list_name(eoa, chain_id); + + // Store transaction data as JSON in the user_request field of the hash + let user_request_json = serde_json::to_string(&transaction_request)?; + let now = chrono::Utc::now().timestamp_millis().max(0) as u64; + + let mut conn = self.redis.clone(); + + // Use a pipeline to atomically store data and add to pending queue + let mut pipeline = twmq::redis::pipe(); + + // Store transaction data + pipeline.hset(&tx_data_key, "user_request", &user_request_json); + pipeline.hset(&tx_data_key, "status", "pending"); + pipeline.hset(&tx_data_key, "created_at", now); + + // Add to pending queue + pipeline.lpush(&pending_key, transaction_id); + + pipeline.query_async::<()>(&mut conn).await?; + + Ok(()) + } +} + +// Additional error types +#[derive(Debug, thiserror::Error, Serialize, Deserialize, Clone)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE", tag = "errorCode")] +pub enum TransactionStoreError { + #[error("Redis error: {message}")] + RedisError { message: String }, + + #[error("Serialization error: {message}")] + DeserError { message: String, text: String }, + + #[error("Transaction not found: {transaction_id}")] + TransactionNotFound { transaction_id: String }, + + #[error("Lost EOA lock: {eoa}:{chain_id} worker: {worker_id}")] + LockLost { + eoa: Address, + chain_id: u64, + worker_id: String, + }, + + #[error("Internal error - worker should quit: {message}")] + InternalError { message: String }, + + #[error("Transaction {transaction_id} not in borrowed state for nonce {nonce}")] + TransactionNotInBorrowedState { transaction_id: String, nonce: u64 }, + + #[error("Hash {hash} not found in submitted transactions")] + HashNotInSubmittedState { hash: String }, + + #[error("Transaction {transaction_id} has no hashes in submitted state")] + TransactionNotInSubmittedState { transaction_id: String }, + + #[error("Nonce {nonce} not available in recycled set")] + NonceNotInRecycledSet { nonce: u64 }, + + #[error("Transaction {transaction_id} not found in pending queue")] + TransactionNotInPendingQueue { transaction_id: String }, + + #[error("Optimistic nonce changed: expected {expected}, found {actual}")] + OptimisticNonceChanged { expected: u64, actual: u64 }, + + #[error("WATCH failed - state changed during operation")] + WatchFailed, + + #[error( + "Nonce synchronization required for {eoa}:{chain_id} - no cached transaction count available" + )] + NonceSyncRequired { eoa: Address, chain_id: u64 }, +} + +impl From for TransactionStoreError { + fn from(error: twmq::redis::RedisError) -> Self { + TransactionStoreError::RedisError { + message: error.to_string(), + } + } +} + +impl From for TransactionStoreError { + fn from(error: serde_json::Error) -> Self { + TransactionStoreError::DeserError { + message: error.to_string(), + text: error.to_string(), + } + } +} + +const MAX_RETRIES: u32 = 10; +const RETRY_BASE_DELAY_MS: u64 = 10; + +/// Scoped transaction store for a specific EOA, chain, and worker +/// +/// This wrapper eliminates the need to repeatedly pass EOA, chain_id, and worker_id +/// to every method call. It provides the same interface as TransactionStore but with +/// these parameters already bound. +/// +/// ## Usage: +/// ```rust +/// let scoped = ScopedTransactionStore::build(store, eoa, chain_id, worker_id).await?; +/// +/// // Much cleaner method calls: +/// scoped.peek_pending_transactions(limit).await?; +/// scoped.move_borrowed_to_submitted(nonce, hash, tx_id, attempt).await?; +/// ``` +pub struct ScopedEoaExecutorStore<'a> { + store: &'a EoaExecutorStore, + eoa: Address, + chain_id: u64, + worker_id: String, +} + +impl<'a> ScopedEoaExecutorStore<'a> { + /// Build a scoped transaction store for a specific EOA, chain, and worker + /// + /// This acquires the lock for the given EOA/chain. + /// If the lock is not acquired, returns a LockLost error. + #[tracing::instrument(skip_all, fields(eoa = %eoa, chain_id = chain_id, worker_id = %worker_id))] + pub async fn build( + store: &'a EoaExecutorStore, + eoa: Address, + chain_id: u64, + worker_id: String, + ) -> Result { + // 1. ACQUIRE LOCK AGGRESSIVELY + tracing::info!("Acquiring EOA lock aggressively"); + store + .acquire_eoa_lock_aggressively(eoa, chain_id, &worker_id) + .await + .map_err(|e| { + tracing::error!("Failed to acquire EOA lock: {}", e); + TransactionStoreError::LockLost { + eoa, + chain_id, + worker_id: worker_id.clone(), + } + })?; + + Ok(Self { + store, + eoa, + chain_id, + worker_id, + }) + } + + /// Create a scoped store without lock validation (for read-only operations) + pub fn new_unchecked( + store: &'a EoaExecutorStore, + eoa: Address, + chain_id: u64, + worker_id: String, + ) -> Self { + Self { + store, + eoa, + chain_id, + worker_id, + } + } + + // ========== ATOMIC OPERATIONS ========== + + /// Atomically move specific transaction from pending to borrowed with recycled nonce allocation + pub async fn atomic_move_pending_to_borrowed_with_recycled_nonce( + &self, + transaction_id: &str, + nonce: u64, + prepared_tx: &BorrowedTransactionData, + ) -> Result<(), TransactionStoreError> { + self.store + .atomic_move_pending_to_borrowed_with_recycled_nonce( + self.eoa, + self.chain_id, + &self.worker_id, + transaction_id, + nonce, + prepared_tx, + ) + .await + } + + /// Atomically move specific transaction from pending to borrowed with new nonce allocation + pub async fn atomic_move_pending_to_borrowed_with_new_nonce( + &self, + transaction_id: &str, + expected_nonce: u64, + prepared_tx: &BorrowedTransactionData, + ) -> Result<(), TransactionStoreError> { + self.store + .atomic_move_pending_to_borrowed_with_new_nonce( + self.eoa, + self.chain_id, + &self.worker_id, + transaction_id, + expected_nonce, + prepared_tx, + ) + .await + } + + /// Peek all borrowed transactions without removing them + pub async fn peek_borrowed_transactions( + &self, + ) -> Result, TransactionStoreError> { + self.store + .peek_borrowed_transactions(self.eoa, self.chain_id) + .await + } + + /// Atomically move borrowed transaction to submitted state + pub async fn move_borrowed_to_submitted( + &self, + nonce: u64, + hash: &str, + transaction_id: &str, + ) -> Result<(), TransactionStoreError> { + self.store + .move_borrowed_to_submitted( + self.eoa, + self.chain_id, + &self.worker_id, + nonce, + hash, + transaction_id, + ) + .await + } + + /// Atomically move borrowed transaction back to recycled nonces and pending queue + pub async fn move_borrowed_to_recycled( + &self, + nonce: u64, + transaction_id: &str, + ) -> Result<(), TransactionStoreError> { + self.store + .move_borrowed_to_recycled( + self.eoa, + self.chain_id, + &self.worker_id, + nonce, + transaction_id, + ) + .await + } + + /// Get all hashes below a certain nonce from submitted transactions + /// Returns (nonce, hash, transaction_id) tuples + pub async fn get_hashes_below_nonce( + &self, + below_nonce: u64, + ) -> Result, TransactionStoreError> { + self.store + .get_hashes_below_nonce(self.eoa, self.chain_id, below_nonce) + .await + } + + /// Get all transaction IDs for a specific nonce + pub async fn get_transaction_ids_for_nonce( + &self, + nonce: u64, + ) -> Result, TransactionStoreError> { + self.store + .get_transaction_ids_for_nonce(self.eoa, self.chain_id, nonce) + .await + } + + /// Remove all hashes for a transaction and requeue it + pub async fn fail_and_requeue_transaction( + &self, + transaction_id: &str, + ) -> Result<(), TransactionStoreError> { + self.store + .fail_and_requeue_transaction(self.eoa, self.chain_id, &self.worker_id, transaction_id) + .await + } + + /// Efficiently batch fail and requeue multiple transactions + pub async fn batch_fail_and_requeue_transactions( + &self, + failures: Vec, + ) -> Result<(), TransactionStoreError> { + self.store + .batch_fail_and_requeue_transactions(self.eoa, self.chain_id, &self.worker_id, failures) + .await + } + + /// Efficiently batch succeed multiple transactions + pub async fn batch_succeed_transactions( + &self, + successes: Vec, + ) -> Result<(), TransactionStoreError> { + self.store + .batch_succeed_transactions(self.eoa, self.chain_id, &self.worker_id, successes) + .await + } + + // ========== EOA HEALTH & NONCE MANAGEMENT ========== + + /// Check EOA health (balance, etc.) + pub async fn check_eoa_health(&self) -> Result, TransactionStoreError> { + self.store.check_eoa_health(self.eoa, self.chain_id).await + } + + /// Update EOA health data + pub async fn update_health_data( + &self, + health: &EoaHealth, + ) -> Result<(), TransactionStoreError> { + self.store + .update_health_data(self.eoa, self.chain_id, &self.worker_id, health) + .await + } + + /// Update cached transaction count + pub async fn update_cached_transaction_count( + &self, + transaction_count: u64, + ) -> Result<(), TransactionStoreError> { + self.store + .update_cached_transaction_count( + self.eoa, + self.chain_id, + &self.worker_id, + transaction_count, + ) + .await + } + + /// Peek recycled nonces without removing them + pub async fn peek_recycled_nonces(&self) -> Result, TransactionStoreError> { + self.store + .peek_recycled_nonces(self.eoa, self.chain_id) + .await + } + + /// Peek at pending transactions without removing them + pub async fn peek_pending_transactions( + &self, + limit: u64, + ) -> Result, TransactionStoreError> { + self.store + .peek_pending_transactions(self.eoa, self.chain_id, limit) + .await + } + + /// Get inflight budget (how many new transactions can be sent) + pub async fn get_inflight_budget( + &self, + max_inflight: u64, + ) -> Result { + self.store + .get_inflight_budget(self.eoa, self.chain_id, max_inflight) + .await + } + + /// Get current optimistic nonce (without incrementing) + pub async fn get_optimistic_nonce(&self) -> Result { + self.store + .get_optimistic_nonce(self.eoa, self.chain_id) + .await + } + + /// Mark transaction as successful and remove from submitted + pub async fn succeed_transaction( + &self, + transaction_id: &str, + hash: &str, + receipt: &str, + ) -> Result<(), TransactionStoreError> { + self.store + .succeed_transaction( + self.eoa, + self.chain_id, + &self.worker_id, + transaction_id, + hash, + receipt, + ) + .await + } + + /// Add a gas bump attempt (new hash) to submitted transactions + pub async fn add_gas_bump_attempt( + &self, + transaction_id: &str, + signed_transaction: Signed, + ) -> Result<(), TransactionStoreError> { + self.store + .add_gas_bump_attempt( + self.eoa, + self.chain_id, + &self.worker_id, + transaction_id, + signed_transaction, + ) + .await + } + + pub async fn synchronize_nonces_with_chain( + &self, + nonce: u64, + ) -> Result<(), TransactionStoreError> { + self.store + .synchronize_nonces_with_chain(self.eoa, self.chain_id, &self.worker_id, nonce) + .await + } + + pub async fn reset_nonces(&self, nonce: u64) -> Result<(), TransactionStoreError> { + self.store + .reset_nonces(self.eoa, self.chain_id, &self.worker_id, nonce) + .await + } + + // ========== READ-ONLY OPERATIONS ========== + + /// Get cached transaction count + pub async fn get_cached_transaction_count(&self) -> Result { + self.store + .get_cached_transaction_count(self.eoa, self.chain_id) + .await + } + + /// Peek next available nonce (recycled or new) + pub async fn peek_next_available_nonce(&self) -> Result { + self.store + .peek_next_available_nonce(self.eoa, self.chain_id) + .await + } + + // ========== ACCESSORS ========== + + /// Get the EOA address this store is scoped to + pub fn eoa(&self) -> Address { + self.eoa + } + + /// Get the chain ID this store is scoped to + pub fn chain_id(&self) -> u64 { + self.chain_id + } + + /// Get the worker ID this store is scoped to + pub fn worker_id(&self) -> &str { + &self.worker_id + } + + /// Get a reference to the underlying transaction store + pub fn inner(&self) -> &EoaExecutorStore { + self.store + } + + /// Get transaction data by transaction ID + pub async fn get_transaction_data( + &self, + transaction_id: &str, + ) -> Result, TransactionStoreError> { + self.store.get_transaction_data(transaction_id).await + } +} diff --git a/executors/src/eoa/worker.rs b/executors/src/eoa/worker.rs new file mode 100644 index 0000000..fba7601 --- /dev/null +++ b/executors/src/eoa/worker.rs @@ -0,0 +1,1936 @@ +use alloy::consensus::{ + SignableTransaction, Signed, Transaction, TxEip4844Variant, TxEip4844WithSidecar, + TypedTransaction, +}; +use alloy::network::{TransactionBuilder, TransactionBuilder7702}; +use alloy::primitives::utils::Unit; +use alloy::primitives::{Address, B256, Bytes, U256}; +use alloy::providers::Provider; +use alloy::rpc::types::TransactionRequest as AlloyTransactionRequest; +use alloy::signers::Signature; +use alloy::transports::{RpcError, TransportErrorKind}; +use engine_core::error::EngineError; +use engine_core::execution_options::eoa::EoaTransactionTypeData; +use engine_core::signer::AccountSigner; +use engine_core::{ + chain::{Chain, ChainService, RpcCredentials}, + credentials::SigningCredential, + error::{AlloyRpcErrorToEngineError, RpcErrorKind}, + signer::{EoaSigner, EoaSigningOptions}, +}; +use hex; +use serde::{Deserialize, Serialize}; +use std::{sync::Arc, time::Duration}; +use tokio::time::sleep; +use twmq::{ + DurableExecution, FailHookData, NackHookData, SuccessHookData, UserCancellable, + error::TwmqError, + hooks::TransactionContext, + job::{BorrowedJob, JobResult, RequeuePosition, ToJobResult}, +}; + +use crate::eoa::store::{ + BorrowedTransactionData, EoaExecutorStore, EoaHealth, EoaTransactionRequest, + ScopedEoaExecutorStore, TransactionData, TransactionStoreError, +}; + +// ========== SPEC-COMPLIANT CONSTANTS ========== +const MAX_INFLIGHT_PER_EOA: u64 = 100; // Default from spec +const MAX_RECYCLED_THRESHOLD: u64 = 50; // Circuit breaker from spec +const TARGET_TRANSACTIONS_PER_EOA: u64 = 10; // Fleet management from spec +const MIN_TRANSACTIONS_PER_EOA: u64 = 1; // Fleet management from spec +const HEALTH_CHECK_INTERVAL: u64 = 300; // 5 minutes in seconds +const NONCE_STALL_TIMEOUT: u64 = 300_000; // 5 minutes in milliseconds - after this time, attempt gas bump + +// ========== JOB DATA ========== +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct EoaExecutorWorkerJobData { + pub eoa_address: Address, + pub chain_id: u64, + pub worker_id: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct EoaExecutorWorkerResult { + pub recovered_transactions: u32, + pub confirmed_transactions: u32, + pub failed_transactions: u32, + pub sent_transactions: u32, +} + +#[derive(Serialize, Deserialize, Debug, Clone, thiserror::Error)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE", tag = "errorCode")] +pub enum EoaExecutorWorkerError { + #[error("Chain service error for chainId {chain_id}: {message}")] + ChainServiceError { chain_id: u64, message: String }, + + #[error("Store error: {message}")] + StoreError { + message: String, + inner_error: TransactionStoreError, + }, + + #[error("Transaction not found: {transaction_id}")] + TransactionNotFound { transaction_id: String }, + + #[error("Transaction simulation failed: {message}")] + TransactionSimulationFailed { + message: String, + inner_error: EngineError, + }, + + #[error("Transaction build failed: {message}")] + TransactionBuildFailed { message: String }, + + #[error("RPC error: {message}")] + RpcError { + message: String, + inner_error: EngineError, + }, + + #[error("Signature parsing failed: {message}")] + SignatureParsingFailed { message: String }, + + #[error("Transaction signing failed: {message}")] + SigningError { + message: String, + inner_error: EngineError, + }, + + #[error("Work still remaining: {message}")] + WorkRemaining { message: String }, + + #[error("Internal error: {message}")] + InternalError { message: String }, + + #[error("User cancelled")] + UserCancelled, +} + +impl From for EoaExecutorWorkerError { + fn from(error: TwmqError) -> Self { + EoaExecutorWorkerError::InternalError { + message: format!("Queue error: {}", error), + } + } +} + +impl From for EoaExecutorWorkerError { + fn from(error: TransactionStoreError) -> Self { + EoaExecutorWorkerError::StoreError { + message: error.to_string(), + inner_error: error, + } + } +} + +impl UserCancellable for EoaExecutorWorkerError { + fn user_cancelled() -> Self { + EoaExecutorWorkerError::UserCancelled + } +} + +// ========== SIMPLE ERROR CLASSIFICATION ========== +#[derive(Debug)] +enum SendErrorClassification { + PossiblySent, // "nonce too low", "already known" etc + DeterministicFailure, // Invalid signature, malformed tx, insufficient funds etc +} + +#[derive(PartialEq, Eq, Debug)] +enum SendContext { + Rebroadcast, + InitialBroadcast, +} + +#[tracing::instrument(skip_all, fields(error = %error, context = ?context))] +fn classify_send_error( + error: &RpcError, + context: SendContext, +) -> SendErrorClassification { + if !error.is_error_resp() { + return SendErrorClassification::DeterministicFailure; + } + + let error_str = error.to_string().to_lowercase(); + + // Deterministic failures that didn't consume nonce (spec-compliant) + if error_str.contains("invalid signature") + || error_str.contains("malformed transaction") + || (context == SendContext::InitialBroadcast && error_str.contains("insufficient funds")) + || error_str.contains("invalid transaction format") + || error_str.contains("nonce too high") + // Should trigger nonce reset + { + return SendErrorClassification::DeterministicFailure; + } + + // Transaction possibly made it to mempool (spec-compliant) + if error_str.contains("nonce too low") + || error_str.contains("already known") + || error_str.contains("replacement transaction underpriced") + { + return SendErrorClassification::PossiblySent; + } + + // Additional common failures that didn't consume nonce + if error_str.contains("malformed") + || error_str.contains("gas limit") + || error_str.contains("intrinsic gas too low") + { + return SendErrorClassification::DeterministicFailure; + } + + tracing::warn!( + "Unknown send error: {}. PLEASE REPORT FOR ADDING CORRECT CLASSIFICATION [NOTIFY]", + error_str + ); + + // Default: assume possibly sent for safety + SendErrorClassification::PossiblySent +} + +fn should_trigger_nonce_reset(error: &RpcError) -> bool { + let error_str = error.to_string().to_lowercase(); + + // "nonce too high" should trigger nonce reset as per spec + error_str.contains("nonce too high") +} + +fn should_update_balance_threshold(error: &EngineError) -> bool { + match error { + EngineError::RpcError { kind, .. } + | EngineError::PaymasterError { kind, .. } + | EngineError::BundlerError { kind, .. } => match kind { + RpcErrorKind::ErrorResp(resp) => { + let message = resp.message.to_lowercase(); + message.contains("insufficient funds") + || message.contains("insufficient balance") + || message.contains("out of gas") + || message.contains("insufficient eth") + || message.contains("balance too low") + || message.contains("not enough funds") + || message.contains("insufficient native token") + } + _ => false, + }, + _ => false, + } +} + +fn is_retryable_rpc_error(kind: &RpcErrorKind) -> bool { + match kind { + RpcErrorKind::TransportHttpError { status, .. } if *status >= 400 && *status < 500 => false, + RpcErrorKind::UnsupportedFeature { .. } => false, + _ => true, + } +} + +// ========== PREPARED TRANSACTION ========== +#[derive(Debug, Clone)] +struct PreparedTransaction { + transaction_id: String, + signed_tx: Signed, + nonce: u64, +} + +// ========== CONFIRMATION FLOW DATA STRUCTURES ========== +#[derive(Debug, Clone)] +struct PendingTransaction { + nonce: u64, + hash: String, + transaction_id: String, +} + +#[derive(Debug, Clone)] +struct ConfirmedTransaction { + nonce: u64, + hash: String, + transaction_id: String, + receipt: alloy::rpc::types::TransactionReceipt, +} + +#[derive(Debug, Clone)] +struct FailedTransaction { + hash: String, + transaction_id: String, +} + +// ========== STORE BATCH OPERATION TYPES ========== +#[derive(Debug, Clone)] +pub struct TransactionSuccess { + pub hash: String, + pub transaction_id: String, + pub receipt_data: String, +} + +#[derive(Debug, Clone)] +pub struct TransactionFailure { + pub hash: String, + pub transaction_id: String, +} + +// ========== MAIN WORKER ========== +/// EOA Executor Worker +/// +/// ## Core Workflow: +/// 1. **Acquire Lock Aggressively** - Takes over stalled workers using force acquisition. This is a lock over EOA:CHAIN +/// 2. **Crash Recovery** - Rebroadcasts borrowed transactions, handles deterministic failures +/// 3. **Confirmation Flow** - Fetches receipts, confirms transactions, handles nonce sync, requeues replaced transactions +/// 4. **Send Flow** - Processes recycled nonces first, then new transactions with in-flight budget control +/// 5. **Lock Release** - Explicit release in finally pattern as per spec +/// +/// ## Key Features: +/// - **Atomic Operations**: All state transitions use Redis WATCH/MULTI/EXEC for durability +/// - **Borrowed State**: Mid-send crash recovery with atomic pending->borrowed->submitted transitions +/// - **Nonce Management**: Optimistic nonce tracking with recycled nonce priority +/// - **Error Classification**: Spec-compliant deterministic vs. possibly-sent error handling +/// - **Circuit Breakers**: Automatic recycled nonce nuking when threshold exceeded +/// - **Health Monitoring**: Balance checking with configurable thresholds +pub struct EoaExecutorWorker +where + CS: ChainService + Send + Sync + 'static, +{ + pub chain_service: Arc, + pub store: Arc, + pub eoa_signer: Arc, + pub max_inflight: u64, // Note: Spec uses MAX_INFLIGHT_PER_EOA constant + pub max_recycled_nonces: u64, // Note: Spec uses MAX_RECYCLED_THRESHOLD constant +} + +impl DurableExecution for EoaExecutorWorker +where + CS: ChainService + Send + Sync + 'static, +{ + type Output = EoaExecutorWorkerResult; + type ErrorData = EoaExecutorWorkerError; + type JobData = EoaExecutorWorkerJobData; + + #[tracing::instrument(skip_all, fields(eoa = %job.job.data.eoa_address, chain_id = job.job.data.chain_id))] + async fn process( + &self, + job: &BorrowedJob, + ) -> JobResult { + let data = &job.job.data; + + // 1. GET CHAIN + let chain = self + .chain_service + .get_chain(data.chain_id) + .map_err(|e| EoaExecutorWorkerError::ChainServiceError { + chain_id: data.chain_id, + message: format!("Failed to get chain: {}", e), + }) + .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)?; + + // 2. CREATE SCOPED STORE (acquires lock) + let scoped = ScopedEoaExecutorStore::build( + &self.store, + data.eoa_address, + data.chain_id, + data.worker_id.clone(), + ) + .await + .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)?; + + // initiate health data if doesn't exist + self.get_eoa_health(&scoped, &chain) + .await + .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)?; + + // Execute main workflow with proper error handling + self.execute_main_workflow(&scoped, &chain).await + } + + async fn on_success( + &self, + job: &BorrowedJob, + _success_data: SuccessHookData<'_, Self::Output>, + _tx: &mut TransactionContext<'_>, + ) { + // Release EOA lock on success + self.release_eoa_lock( + job.job.data.eoa_address, + job.job.data.chain_id, + &job.job.data.worker_id, + ) + .await; + } + + async fn on_nack( + &self, + job: &BorrowedJob, + _nack_data: NackHookData<'_, Self::ErrorData>, + _tx: &mut TransactionContext<'_>, + ) { + // Release EOA lock on nack + self.release_eoa_lock( + job.job.data.eoa_address, + job.job.data.chain_id, + &job.job.data.worker_id, + ) + .await; + } + + async fn on_fail( + &self, + job: &BorrowedJob, + _fail_data: FailHookData<'_, Self::ErrorData>, + _tx: &mut TransactionContext<'_>, + ) { + // Release EOA lock on fail + self.release_eoa_lock( + job.job.data.eoa_address, + job.job.data.chain_id, + &job.job.data.worker_id, + ) + .await; + } +} + +impl EoaExecutorWorker +where + CS: ChainService + Send + Sync + 'static, +{ + /// Execute the main EOA worker workflow + async fn execute_main_workflow( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + chain: &impl Chain, + ) -> JobResult { + // 1. CRASH RECOVERY + let recovered = self + .recover_borrowed_state(scoped, chain) + .await + .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)?; + + // 2. CONFIRM FLOW + let (confirmed, failed) = self + .confirm_flow(scoped, chain) + .await + .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)?; + + // 3. SEND FLOW + let sent = self + .send_flow(scoped, chain) + .await + .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)?; + + // 4. CHECK FOR REMAINING WORK + let pending_count = scoped + .peek_pending_transactions(1000) + .await + .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)? + .len(); + let borrowed_count = scoped + .peek_borrowed_transactions() + .await + .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)? + .len(); + let recycled_count = scoped + .peek_recycled_nonces() + .await + .map_err_nack(Some(Duration::from_secs(10)), RequeuePosition::Last)? + .len(); + + // NACK here is a yield, when you think of the queue as a distributed EOA scheduler + if pending_count > 0 || borrowed_count > 0 || recycled_count > 0 { + return Err(EoaExecutorWorkerError::WorkRemaining { + message: format!( + "Work remaining: {} pending, {} borrowed, {} recycled", + pending_count, borrowed_count, recycled_count + ), + }) + .map_err_nack(Some(Duration::from_secs(2)), RequeuePosition::Last); + } + + // Only succeed if no work remains + Ok(EoaExecutorWorkerResult { + recovered_transactions: recovered, + confirmed_transactions: confirmed, + failed_transactions: failed, + sent_transactions: sent, + }) + } + + /// Release EOA lock following the spec's finally pattern + async fn release_eoa_lock(&self, eoa: Address, chain_id: u64, worker_id: &str) { + if let Err(e) = self.store.release_eoa_lock(eoa, chain_id, worker_id).await { + tracing::error!( + eoa = %eoa, + chain_id = %chain_id, + worker_id = %worker_id, + error = %e, + "Failed to release EOA lock" + ); + } + } + + // ========== CRASH RECOVERY ========== + #[tracing::instrument(skip_all)] + async fn recover_borrowed_state( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + chain: &impl Chain, + ) -> Result { + let mut borrowed_transactions = scoped.peek_borrowed_transactions().await?; + + if borrowed_transactions.is_empty() { + return Ok(0); + } + + tracing::warn!( + "Recovering {} borrowed transactions. This indicates a worker crash or system issue", + borrowed_transactions.len() + ); + + // Sort borrowed transactions by nonce to ensure proper ordering + borrowed_transactions.sort_by_key(|tx| tx.signed_transaction.nonce()); + + // Rebroadcast all transactions in parallel + let rebroadcast_futures: Vec<_> = borrowed_transactions + .iter() + .map(|borrowed| { + let tx_envelope = borrowed.signed_transaction.clone().into(); + let nonce = borrowed.signed_transaction.nonce(); + let transaction_id = borrowed.transaction_id.clone(); + + tracing::info!( + transaction_id = %transaction_id, + nonce = nonce, + "Recovering borrowed transaction" + ); + + async move { + let send_result = chain.provider().send_tx_envelope(tx_envelope).await; + (borrowed, send_result) + } + }) + .collect(); + + let rebroadcast_results = futures::future::join_all(rebroadcast_futures).await; + + // Process results sequentially for Redis state changes + let mut recovered_count = 0; + for (borrowed, send_result) in rebroadcast_results { + let nonce = borrowed.signed_transaction.nonce(); + + match send_result { + Ok(_) => { + // Transaction was sent successfully + scoped + .move_borrowed_to_submitted( + nonce, + &format!("{:?}", borrowed.hash), + &borrowed.transaction_id, + ) + .await?; + tracing::info!(transaction_id = %borrowed.transaction_id, nonce = nonce, "Moved recovered transaction to submitted"); + } + Err(e) => { + match classify_send_error(&e, SendContext::Rebroadcast) { + SendErrorClassification::PossiblySent => { + // Transaction possibly sent, move to submitted + scoped + .move_borrowed_to_submitted( + nonce, + &format!("{:?}", borrowed.hash), + &borrowed.transaction_id, + ) + .await?; + tracing::info!(transaction_id = %borrowed.transaction_id, nonce = nonce, "Moved recovered transaction to submitted (possibly sent)"); + } + SendErrorClassification::DeterministicFailure => { + // Transaction is broken, recycle nonce and requeue + scoped + .move_borrowed_to_recycled(nonce, &borrowed.transaction_id) + .await?; + tracing::warn!(transaction_id = %borrowed.transaction_id, nonce = nonce, error = %e, "Recycled failed transaction"); + + if should_update_balance_threshold(&e.to_engine_error(chain)) { + self.update_balance_threshold(scoped, chain).await?; + } + + // Check if this should trigger nonce reset + if should_trigger_nonce_reset(&e) { + tracing::warn!( + eoa = %scoped.eoa(), + chain_id = %scoped.chain_id(), + "Nonce too high error detected, may need nonce synchronization" + ); + // The next confirm_flow will fetch fresh nonce and auto-sync + } + } + } + } + } + + recovered_count += 1; + } + + Ok(recovered_count) + } + + // ========== CONFIRM FLOW ========== + #[tracing::instrument(skip_all)] + async fn confirm_flow( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + chain: &impl Chain, + ) -> Result<(u32, u32), EoaExecutorWorkerError> { + // Get fresh on-chain transaction count + let current_chain_nonce = chain + .provider() + .get_transaction_count(scoped.eoa()) + .await + .map_err(|e| { + let engine_error = e.to_engine_error(chain); + EoaExecutorWorkerError::RpcError { + message: format!("Failed to get transaction count: {}", engine_error), + inner_error: engine_error, + } + })?; + + let cached_nonce = match scoped.get_cached_transaction_count().await { + Err(e) => match e { + TransactionStoreError::NonceSyncRequired { .. } => { + scoped.reset_nonces(current_chain_nonce).await?; + current_chain_nonce + } + _ => return Err(e.into()), + }, + Ok(cached_nonce) => cached_nonce, + }; + + // no nonce progress + if current_chain_nonce == cached_nonce { + let current_health = self.get_eoa_health(scoped, chain).await?; + let now = chrono::Utc::now().timestamp_millis().max(0) as u64; + // No nonce progress - check if we should attempt gas bumping for stalled nonce + let time_since_movement = now.saturating_sub(current_health.last_nonce_movement_at); + + if time_since_movement > NONCE_STALL_TIMEOUT { + tracing::info!( + time_since_movement = time_since_movement, + stall_timeout = NONCE_STALL_TIMEOUT, + current_chain_nonce = current_chain_nonce, + "Nonce has been stalled, attempting gas bump" + ); + + // Attempt gas bump for the next expected nonce + if let Err(e) = self + .attempt_gas_bump_for_stalled_nonce(scoped, chain, current_chain_nonce) + .await + { + tracing::warn!( + error = %e, + "Failed to attempt gas bump for stalled nonce" + ); + } + } + + tracing::debug!("No nonce progress, skipping confirm flow"); + return Ok((0, 0)); + } + + tracing::info!( + current_chain_nonce = current_chain_nonce, + cached_nonce = cached_nonce, + "Processing confirmations" + ); + + // Get all pending transactions below the current chain nonce + let pending_txs = self + .get_pending_transactions_below_nonce(scoped, current_chain_nonce) + .await?; + + if pending_txs.is_empty() { + tracing::debug!("No pending transactions to confirm"); + return Ok((0, 0)); + } + + // Fetch receipts and categorize transactions + let (confirmed_txs, failed_txs) = self + .fetch_and_categorize_transactions(chain, pending_txs) + .await; + + // Process confirmed transactions + let confirmed_count = if !confirmed_txs.is_empty() { + let successes: Vec = confirmed_txs + .into_iter() + .map(|tx| { + let receipt_data = match serde_json::to_string(&tx.receipt) { + Ok(receipt_json) => receipt_json, + Err(e) => { + tracing::warn!( + transaction_id = %tx.transaction_id, + hash = %tx.hash, + error = %e, + "Failed to serialize receipt as JSON, using debug format" + ); + format!("{:?}", tx.receipt) + } + }; + + tracing::info!( + transaction_id = %tx.transaction_id, + nonce = tx.nonce, + hash = %tx.hash, + "Transaction confirmed" + ); + + TransactionSuccess { + hash: tx.hash, + transaction_id: tx.transaction_id, + receipt_data, + } + }) + .collect(); + + let count = successes.len() as u32; + scoped.batch_succeed_transactions(successes).await?; + count + } else { + 0 + }; + + // Process failed transactions + let failed_count = if !failed_txs.is_empty() { + let failures: Vec = failed_txs + .into_iter() + .map(|tx| { + tracing::warn!( + transaction_id = %tx.transaction_id, + hash = %tx.hash, + "Transaction failed, requeued" + ); + TransactionFailure { + hash: tx.hash, + transaction_id: tx.transaction_id, + } + }) + .collect(); + + let count = failures.len() as u32; + scoped.batch_fail_and_requeue_transactions(failures).await?; + count + } else { + 0 + }; + + // Update cached transaction count + scoped + .update_cached_transaction_count(current_chain_nonce) + .await?; + + // Synchronize nonces to ensure consistency + if let Err(e) = self + .store + .synchronize_nonces_with_chain( + scoped.eoa(), + scoped.chain_id(), + scoped.worker_id(), + current_chain_nonce, + ) + .await + { + tracing::warn!(error = %e, "Failed to synchronize nonces with chain"); + } + + Ok((confirmed_count, failed_count)) + } + + // ========== SEND FLOW ========== + #[tracing::instrument(skip_all)] + async fn send_flow( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + chain: &impl Chain, + ) -> Result { + // 1. Get EOA health (initializes if needed) and check if we should update balance + let mut health = self.get_eoa_health(scoped, chain).await?; + let now = chrono::Utc::now().timestamp_millis().max(0) as u64; + + // Update balance if it's stale + if now - health.balance_fetched_at > HEALTH_CHECK_INTERVAL { + let balance = chain + .provider() + .get_balance(scoped.eoa()) + .await + .map_err(|e| { + let engine_error = e.to_engine_error(chain); + EoaExecutorWorkerError::RpcError { + message: format!("Failed to get balance: {}", engine_error), + inner_error: engine_error, + } + })?; + + health.balance = balance; + health.balance_fetched_at = now; + scoped.update_health_data(&health).await?; + } + + if health.balance <= health.balance_threshold { + tracing::warn!( + "EOA has insufficient balance (<= {} wei), skipping send flow", + health.balance_threshold + ); + return Ok(0); + } + + let mut total_sent = 0; + + // 2. Process recycled nonces first + total_sent += self.process_recycled_nonces(scoped, chain).await?; + + // 3. Only proceed to new nonces if we successfully used all recycled nonces + let remaining_recycled = scoped.peek_recycled_nonces().await?.len(); + if remaining_recycled == 0 { + let inflight_budget = scoped.get_inflight_budget(self.max_inflight).await?; + if inflight_budget > 0 { + total_sent += self + .process_new_transactions(scoped, chain, inflight_budget) + .await?; + } + } else { + tracing::warn!( + "Still have {} recycled nonces, not sending new transactions", + remaining_recycled + ); + } + + Ok(total_sent) + } + + async fn process_recycled_nonces( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + chain: &impl Chain, + ) -> Result { + let recycled_nonces = scoped.peek_recycled_nonces().await?; + + if recycled_nonces.is_empty() { + return Ok(0); + } + + // Get pending transactions (one per recycled nonce) + let pending_txs = scoped + .peek_pending_transactions(recycled_nonces.len() as u64) + .await?; + + // 1. SEQUENTIAL REDIS: Collect nonce-transaction pairs + let mut nonce_tx_pairs = Vec::new(); + for (i, nonce) in recycled_nonces.into_iter().enumerate() { + if let Some(tx_id) = pending_txs.get(i) { + // Get transaction data + if let Some(tx_data) = scoped.get_transaction_data(tx_id).await? { + nonce_tx_pairs.push((nonce, tx_id.clone(), tx_data)); + } else { + tracing::warn!("Transaction data not found for {}", tx_id); + continue; + } + } else { + // No pending transactions - skip recycled nonces without pending transactions + tracing::debug!("No pending transaction for recycled nonce {}", nonce); + continue; + } + } + + if nonce_tx_pairs.is_empty() { + return Ok(0); + } + + // 2. PARALLEL BUILD/SIGN: Build and sign all transactions in parallel + let build_futures: Vec<_> = nonce_tx_pairs + .iter() + .map(|(nonce, transaction_id, tx_data)| async move { + let prepared = self + .build_and_sign_transaction(tx_data, *nonce, chain) + .await; + (*nonce, transaction_id, prepared) + }) + .collect(); + + let build_results = futures::future::join_all(build_futures).await; + + // 3. SEQUENTIAL REDIS: Move successfully built transactions to borrowed state + let mut prepared_txs = Vec::new(); + let mut balance_threshold_update_needed = false; + + for (nonce, transaction_id, build_result) in build_results { + match build_result { + Ok(signed_tx) => { + let borrowed_data = BorrowedTransactionData { + transaction_id: transaction_id.clone(), + signed_transaction: signed_tx.clone(), + hash: signed_tx.hash().to_string(), + borrowed_at: chrono::Utc::now().timestamp_millis().max(0) as u64, + }; + + // Try to atomically move from pending to borrowed with recycled nonce + match scoped + .atomic_move_pending_to_borrowed_with_recycled_nonce( + transaction_id, + nonce, + &borrowed_data, + ) + .await + { + Ok(()) => { + let prepared = PreparedTransaction { + transaction_id: transaction_id.clone(), + signed_tx, + nonce, + }; + prepared_txs.push(prepared); + } + Err(TransactionStoreError::NonceNotInRecycledSet { .. }) => { + tracing::debug!("Nonce {} was consumed by another worker", nonce); + continue; + } + Err(TransactionStoreError::TransactionNotInPendingQueue { .. }) => { + tracing::debug!("Transaction {} already processed", transaction_id); + continue; + } + Err(e) => { + tracing::error!("Failed to move {} to borrowed: {}", transaction_id, e); + continue; + } + } + } + Err(e) => { + // Accumulate balance threshold issues instead of updating immediately + if let EoaExecutorWorkerError::TransactionSimulationFailed { + inner_error, .. + } = &e + { + if should_update_balance_threshold(inner_error) { + balance_threshold_update_needed = true; + } + } else if let EoaExecutorWorkerError::RpcError { inner_error, .. } = &e { + if should_update_balance_threshold(inner_error) { + balance_threshold_update_needed = true; + } + } + + tracing::warn!("Failed to build transaction {}: {}", transaction_id, e); + continue; + } + } + } + + // Update balance threshold once if any build failures were due to balance issues + if balance_threshold_update_needed { + if let Err(e) = self.update_balance_threshold(scoped, chain).await { + tracing::error!( + "Failed to update balance threshold after parallel build failures: {}", + e + ); + } + } + + if prepared_txs.is_empty() { + return Ok(0); + } + + // 4. PARALLEL SEND: Send all transactions in parallel + let send_futures: Vec<_> = prepared_txs + .iter() + .map(|prepared| async move { + let result = chain + .provider() + .send_tx_envelope(prepared.signed_tx.clone().into()) + .await; + (prepared, result) + }) + .collect(); + + let send_results = futures::future::join_all(send_futures).await; + + // 5. SEQUENTIAL REDIS: Process results and update states + let mut sent_count = 0; + for (prepared, send_result) in send_results { + match send_result { + Ok(_) => { + // Transaction sent successfully + match scoped + .move_borrowed_to_submitted( + prepared.nonce, + &format!("{:?}", prepared.signed_tx.hash()), + &prepared.transaction_id, + ) + .await + { + Ok(()) => { + sent_count += 1; + tracing::info!( + transaction_id = %prepared.transaction_id, + nonce = prepared.nonce, + hash = ?prepared.signed_tx.hash(), + "Successfully sent recycled transaction" + ); + } + Err(e) => { + tracing::error!( + "Failed to move {} to submitted: {}", + prepared.transaction_id, + e + ); + } + } + } + Err(e) => { + match classify_send_error(&e, SendContext::InitialBroadcast) { + SendErrorClassification::PossiblySent => { + // Move to submitted state + match scoped + .move_borrowed_to_submitted( + prepared.nonce, + &format!("{:?}", prepared.signed_tx.hash()), + &prepared.transaction_id, + ) + .await + { + Ok(()) => { + sent_count += 1; + tracing::info!( + transaction_id = %prepared.transaction_id, + nonce = prepared.nonce, + "Recycled transaction possibly sent" + ); + } + Err(e) => { + tracing::error!( + "Failed to move {} to submitted: {}", + prepared.transaction_id, + e + ); + } + } + } + SendErrorClassification::DeterministicFailure => { + // Recycle nonce and requeue transaction + match scoped + .move_borrowed_to_recycled(prepared.nonce, &prepared.transaction_id) + .await + { + Ok(()) => { + tracing::warn!( + transaction_id = %prepared.transaction_id, + nonce = prepared.nonce, + error = %e, + "Recycled transaction failed, re-recycled nonce" + ); + + if should_update_balance_threshold(&e.to_engine_error(chain)) { + if let Err(e) = + self.update_balance_threshold(scoped, chain).await + { + tracing::error!( + "Failed to update balance threshold: {}", + e + ); + } + } + + if should_trigger_nonce_reset(&e) { + tracing::warn!( + nonce = prepared.nonce, + "Nonce too high error detected, may need nonce synchronization" + ); + } + } + Err(e) => { + tracing::error!( + "Failed to move {} back to recycled: {}", + prepared.transaction_id, + e + ); + } + } + } + } + } + } + } + + Ok(sent_count) + } + + async fn process_new_transactions( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + chain: &impl Chain, + budget: u64, + ) -> Result { + if budget == 0 { + return Ok(0); + } + + // 1. SEQUENTIAL REDIS: Get pending transactions + let pending_txs = scoped.peek_pending_transactions(budget).await?; + if pending_txs.is_empty() { + return Ok(0); + } + + let optimistic_nonce = scoped.get_optimistic_nonce().await?; + + // 2. PARALLEL BUILD/SIGN: Build and sign all transactions in parallel + let build_tasks: Vec<_> = pending_txs + .iter() + .enumerate() + .map(|(i, tx_id)| { + let expected_nonce = optimistic_nonce + i as u64; + self.build_and_sign_single_transaction(scoped, tx_id, expected_nonce, chain) + }) + .collect(); + + let prepared_results = futures::future::join_all(build_tasks).await; + + // 3. SEQUENTIAL REDIS: Move successful transactions to borrowed state (maintain nonce order) + let mut prepared_txs = Vec::new(); + let mut balance_threshold_update_needed = false; + + for (i, result) in prepared_results.into_iter().enumerate() { + match result { + Ok(prepared) => { + let borrowed_data = BorrowedTransactionData { + transaction_id: prepared.transaction_id.clone(), + signed_transaction: prepared.signed_tx.clone(), + hash: prepared.signed_tx.hash().to_string(), + borrowed_at: chrono::Utc::now().timestamp_millis().max(0) as u64, + }; + + match scoped + .atomic_move_pending_to_borrowed_with_new_nonce( + &prepared.transaction_id, + prepared.nonce, + &borrowed_data, + ) + .await + { + Ok(()) => prepared_txs.push(prepared), + Err(TransactionStoreError::OptimisticNonceChanged { .. }) => { + tracing::debug!( + "Nonce changed for transaction {}, skipping", + prepared.transaction_id + ); + break; // Stop processing if nonce changed + } + Err(TransactionStoreError::TransactionNotInPendingQueue { .. }) => { + tracing::debug!( + "Transaction {} already processed, skipping", + prepared.transaction_id + ); + continue; + } + Err(e) => { + tracing::error!( + "Failed to move transaction {} to borrowed: {}", + prepared.transaction_id, + e + ); + continue; + } + } + } + Err(e) => { + // Accumulate balance threshold issues instead of updating immediately + if let EoaExecutorWorkerError::TransactionSimulationFailed { + inner_error, .. + } = &e + { + if should_update_balance_threshold(inner_error) { + balance_threshold_update_needed = true; + } + } else if let EoaExecutorWorkerError::RpcError { inner_error, .. } = &e { + if should_update_balance_threshold(inner_error) { + balance_threshold_update_needed = true; + } + } + + tracing::warn!("Failed to build transaction {}: {}", pending_txs[i], e); + // Individual transaction failure doesn't stop the worker + continue; + } + } + } + + // Update balance threshold once if any build failures were due to balance issues + if balance_threshold_update_needed { + if let Err(e) = self.update_balance_threshold(scoped, chain).await { + tracing::error!( + "Failed to update balance threshold after parallel build failures: {}", + e + ); + } + } + + if prepared_txs.is_empty() { + return Ok(0); + } + + // 4. PARALLEL SEND (but ordered): Send all transactions in parallel but in nonce order + let send_futures: Vec<_> = prepared_txs + .iter() + .enumerate() + .map(|(i, prepared)| async move { + // Add delay for ordering (except first transaction) + if i > 0 { + sleep(Duration::from_millis(50)).await; // 50ms delay between consecutive nonces + } + + let result = chain + .provider() + .send_tx_envelope(prepared.signed_tx.clone().into()) + .await; + (prepared, result) + }) + .collect(); + + let send_results = futures::future::join_all(send_futures).await; + + // 5. SEQUENTIAL REDIS: Process results and update states + let mut sent_count = 0; + for (prepared, send_result) in send_results { + match send_result { + Ok(_) => { + // Transaction sent successfully + match scoped + .move_borrowed_to_submitted( + prepared.nonce, + &format!("{:?}", prepared.signed_tx.hash()), + &prepared.transaction_id, + ) + .await + { + Ok(()) => { + sent_count += 1; + tracing::info!( + transaction_id = %prepared.transaction_id, + nonce = prepared.nonce, + hash = ?prepared.signed_tx.hash(), + "Successfully sent new transaction" + ); + } + Err(e) => { + tracing::error!( + "Failed to move {} to submitted: {}", + prepared.transaction_id, + e + ); + } + } + } + Err(e) => { + match classify_send_error(&e, SendContext::InitialBroadcast) { + SendErrorClassification::PossiblySent => { + // Move to submitted state + match scoped + .move_borrowed_to_submitted( + prepared.nonce, + &format!("{:?}", prepared.signed_tx.hash()), + &prepared.transaction_id, + ) + .await + { + Ok(()) => { + sent_count += 1; + tracing::info!( + transaction_id = %prepared.transaction_id, + nonce = prepared.nonce, + "New transaction possibly sent" + ); + } + Err(e) => { + tracing::error!( + "Failed to move {} to submitted: {}", + prepared.transaction_id, + e + ); + } + } + } + SendErrorClassification::DeterministicFailure => { + // Recycle nonce and requeue transaction + match scoped + .move_borrowed_to_recycled(prepared.nonce, &prepared.transaction_id) + .await + { + Ok(()) => { + tracing::warn!( + transaction_id = %prepared.transaction_id, + nonce = prepared.nonce, + error = %e, + "New transaction failed, recycled nonce" + ); + + if should_update_balance_threshold(&e.to_engine_error(chain)) { + if let Err(e) = + self.update_balance_threshold(scoped, chain).await + { + tracing::error!( + "Failed to update balance threshold: {}", + e + ); + } + } + + if should_trigger_nonce_reset(&e) { + tracing::warn!( + nonce = prepared.nonce, + "Nonce too high error detected, may need nonce synchronization" + ); + } + } + Err(e) => { + tracing::error!( + "Failed to move {} to recycled: {}", + prepared.transaction_id, + e + ); + } + } + } + } + } + } + } + + Ok(sent_count) + } + + // ========== TRANSACTION BUILDING & SENDING ========== + async fn build_and_sign_single_transaction( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + transaction_id: &str, + nonce: u64, + chain: &impl Chain, + ) -> Result { + // Get transaction data + let tx_data = scoped + .get_transaction_data(transaction_id) + .await? + .ok_or_else(|| EoaExecutorWorkerError::TransactionNotFound { + transaction_id: transaction_id.to_string(), + })?; + + // Build and sign transaction + let signed_tx = self + .build_and_sign_transaction(&tx_data, nonce, chain) + .await?; + + Ok(PreparedTransaction { + transaction_id: transaction_id.to_string(), + signed_tx, + nonce, + }) + } + + async fn send_noop_transaction( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + chain: &impl Chain, + nonce: u64, + ) -> Result { + // Create a minimal transaction to consume the recycled nonce + // Send 0 ETH to self with minimal gas + let eoa = scoped.eoa(); + + // Build no-op transaction (send 0 to self) + let mut tx_request = AlloyTransactionRequest::default() + .with_from(eoa) + .with_to(eoa) // Send to self + .with_value(U256::ZERO) // Send 0 value + .with_input(Bytes::new()) // No data + .with_chain_id(scoped.chain_id()) + .with_nonce(nonce) + .with_gas_limit(21000); // Minimal gas for basic transfer + + // Estimate gas to ensure the transaction is valid + match chain.provider().estimate_gas(tx_request.clone()).await { + Ok(gas_limit) => { + tx_request = tx_request.with_gas_limit(gas_limit); + } + Err(e) => { + tracing::warn!( + nonce = nonce, + error = %e, + "Failed to estimate gas for no-op transaction" + ); + return Ok(false); + } + } + + // Build typed transaction + let typed_tx = match tx_request.build_typed_tx() { + Ok(tx) => tx, + Err(e) => { + tracing::warn!( + nonce = nonce, + error = ?e, + "Failed to build typed transaction for no-op" + ); + return Ok(false); + } + }; + + // Get signing credential from health or use default approach + // For no-op transactions, we need to find a valid signing credential + // This is a limitation of the current design - no-op transactions + // need access to signing credentials which are transaction-specific + tracing::warn!( + nonce = nonce, + "No-op transaction requires signing credential access - recycled nonce will remain unconsumed" + ); + Ok(false) + } + + // ========== GAS BUMP METHODS ========== + + /// Attempt to gas bump a stalled transaction for the next expected nonce + async fn attempt_gas_bump_for_stalled_nonce( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + chain: &impl Chain, + expected_nonce: u64, + ) -> Result { + tracing::info!( + nonce = expected_nonce, + "Attempting gas bump for stalled nonce" + ); + + // Get all transaction IDs for this nonce + let transaction_ids = scoped.get_transaction_ids_for_nonce(expected_nonce).await?; + + if transaction_ids.is_empty() { + tracing::debug!( + nonce = expected_nonce, + "No transactions found for stalled nonce" + ); + return Ok(false); + } + + // Load transaction data for all IDs and find the newest one + let mut newest_transaction: Option<(String, TransactionData)> = None; + let mut newest_submitted_at = 0u64; + + for transaction_id in transaction_ids { + if let Some(tx_data) = scoped.get_transaction_data(&transaction_id).await? { + // Find the most recent attempt for this transaction + if let Some(latest_attempt) = tx_data.attempts.last() { + let submitted_at = latest_attempt.sent_at; + if submitted_at > newest_submitted_at { + newest_submitted_at = submitted_at; + newest_transaction = Some((transaction_id, tx_data)); + } + } + } + } + + if let Some((transaction_id, tx_data)) = newest_transaction { + tracing::info!( + transaction_id = %transaction_id, + nonce = expected_nonce, + "Found newest transaction for gas bump" + ); + + // Get the latest attempt to extract gas values from + // Build typed transaction -> manually bump -> sign + let typed_tx = match self + .build_typed_transaction(&tx_data, expected_nonce, chain) + .await + { + Ok(tx) => tx, + Err(e) => { + // Check if this is a balance threshold issue during simulation + if let EoaExecutorWorkerError::TransactionSimulationFailed { + inner_error, .. + } = &e + { + if should_update_balance_threshold(inner_error) { + if let Err(e) = self.update_balance_threshold(scoped, chain).await { + tracing::error!("Failed to update balance threshold: {}", e); + } + } + } else if let EoaExecutorWorkerError::RpcError { inner_error, .. } = &e { + if should_update_balance_threshold(inner_error) { + if let Err(e) = self.update_balance_threshold(scoped, chain).await { + tracing::error!("Failed to update balance threshold: {}", e); + } + } + } + + tracing::warn!( + transaction_id = %transaction_id, + nonce = expected_nonce, + error = %e, + "Failed to build typed transaction for gas bump" + ); + return Ok(false); + } + }; + let bumped_typed_tx = self.apply_gas_bump_to_typed_transaction(typed_tx, 120); // 20% increase + let bumped_tx = match self.sign_transaction(bumped_typed_tx, &tx_data).await { + Ok(tx) => tx, + Err(e) => { + tracing::warn!( + transaction_id = %transaction_id, + nonce = expected_nonce, + error = %e, + "Failed to sign transaction for gas bump" + ); + return Ok(false); + } + }; + + // Record the gas bump attempt + scoped + .add_gas_bump_attempt(&transaction_id, bumped_tx.clone()) + .await?; + + // Send the bumped transaction + let tx_envelope = bumped_tx.into(); + match chain.provider().send_tx_envelope(tx_envelope).await { + Ok(_) => { + tracing::info!( + transaction_id = %transaction_id, + nonce = expected_nonce, + "Successfully sent gas bumped transaction" + ); + return Ok(true); + } + Err(e) => { + tracing::warn!( + transaction_id = %transaction_id, + nonce = expected_nonce, + error = %e, + "Failed to send gas bumped transaction" + ); + // Don't fail the worker, just log the error + return Ok(false); + } + } + } + + Ok(false) + } + + // ========== HEALTH ACCESSOR ========== + + /// Get EOA health, initializing it if it doesn't exist + /// This method ensures the health data is always available for the worker + async fn get_eoa_health( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + chain: &impl Chain, + ) -> Result { + let store_health = scoped.check_eoa_health().await?; + let now = chrono::Utc::now().timestamp_millis().max(0) as u64; + + match store_health { + Some(health) => Ok(health), + None => { + // Initialize with fresh data from chain + let balance = chain + .provider() + .get_balance(scoped.eoa()) + .await + .map_err(|e| { + let engine_error = e.to_engine_error(chain); + EoaExecutorWorkerError::RpcError { + message: format!( + "Failed to get balance during initialization: {}", + engine_error + ), + inner_error: engine_error, + } + })?; + + let health = EoaHealth { + balance, + balance_threshold: U256::ZERO, + balance_fetched_at: now, + last_confirmation_at: now, + last_nonce_movement_at: now, + nonce_resets: Vec::new(), + }; + + // Save to store + scoped.update_health_data(&health).await?; + Ok(health) + } + } + } + + #[tracing::instrument(skip_all, fields(eoa = %scoped.eoa(), chain_id = %chain.chain_id()))] + async fn update_balance_threshold( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + chain: &impl Chain, + ) -> Result<(), EoaExecutorWorkerError> { + let mut health = self.get_eoa_health(scoped, chain).await?; + + tracing::info!("Updating balance threshold"); + let balance_threshold = chain + .provider() + .get_balance(scoped.eoa()) + .await + .map_err(|e| { + let engine_error = e.to_engine_error(chain); + EoaExecutorWorkerError::RpcError { + message: format!("Failed to get balance: {}", engine_error), + inner_error: engine_error, + } + })?; + + health.balance_threshold = balance_threshold; + scoped.update_health_data(&health).await?; + Ok(()) + } + + // ========== CONFIRMATION FLOW HELPERS ========== + + /// Get pending transactions below the given nonce + async fn get_pending_transactions_below_nonce( + &self, + scoped: &ScopedEoaExecutorStore<'_>, + nonce: u64, + ) -> Result, EoaExecutorWorkerError> { + let pending_hashes = scoped.get_hashes_below_nonce(nonce).await?; + + let pending_txs = pending_hashes + .into_iter() + .map(|(nonce, hash, transaction_id)| PendingTransaction { + nonce, + hash, + transaction_id, + }) + .collect(); + + Ok(pending_txs) + } + + /// Fetch receipts for all pending transactions and categorize them + async fn fetch_and_categorize_transactions( + &self, + chain: &impl Chain, + pending_txs: Vec, + ) -> (Vec, Vec) { + // Fetch all receipts in parallel + let receipt_futures: Vec<_> = pending_txs + .iter() + .filter_map(|tx| match tx.hash.parse::() { + Ok(hash_bytes) => Some(async move { + let receipt = chain.provider().get_transaction_receipt(hash_bytes).await; + (tx, receipt) + }), + Err(_) => { + tracing::warn!("Invalid hash format: {}, skipping", tx.hash); + None + } + }) + .collect(); + + let receipt_results = futures::future::join_all(receipt_futures).await; + + // Categorize transactions + let mut confirmed_txs = Vec::new(); + let mut failed_txs = Vec::new(); + + for (tx, receipt_result) in receipt_results { + match receipt_result { + Ok(Some(receipt)) => { + confirmed_txs.push(ConfirmedTransaction { + nonce: tx.nonce, + hash: tx.hash.clone(), + transaction_id: tx.transaction_id.clone(), + receipt, + }); + } + Ok(None) | Err(_) => { + failed_txs.push(FailedTransaction { + hash: tx.hash.clone(), + transaction_id: tx.transaction_id.clone(), + }); + } + } + } + + (confirmed_txs, failed_txs) + } + + // ========== HELPER METHODS ========== + async fn estimate_gas_fees( + &self, + chain: &impl Chain, + tx: AlloyTransactionRequest, + ) -> Result { + // Check what fees are missing and need to be estimated + + // If we have gas_price set, we're doing legacy - don't estimate EIP-1559 + if tx.gas_price.is_some() { + return Ok(tx); + } + + // If we have both EIP-1559 fees set, don't estimate + if tx.max_fee_per_gas.is_some() && tx.max_priority_fee_per_gas.is_some() { + return Ok(tx); + } + + // Try EIP-1559 fees first, fall back to legacy if unsupported + match chain.provider().estimate_eip1559_fees().await { + Ok(eip1559_fees) => { + tracing::debug!( + "Using EIP-1559 fees: max_fee={}, max_priority_fee={}", + eip1559_fees.max_fee_per_gas, + eip1559_fees.max_priority_fee_per_gas + ); + + let mut result = tx; + // Only set fees that are missing + if result.max_fee_per_gas.is_none() { + result = result.with_max_fee_per_gas(eip1559_fees.max_fee_per_gas); + } + if result.max_priority_fee_per_gas.is_none() { + result = + result.with_max_priority_fee_per_gas(eip1559_fees.max_priority_fee_per_gas); + } + + Ok(result) + } + Err(eip1559_error) => { + // Check if this is an "unsupported feature" error + if let RpcError::UnsupportedFeature(_) = &eip1559_error { + tracing::debug!("EIP-1559 not supported, falling back to legacy gas price"); + + // Fall back to legacy gas price only if no gas price is set + if tx.authorization_list().is_none() { + match chain.provider().get_gas_price().await { + Ok(gas_price) => { + tracing::debug!("Using legacy gas price: {}", gas_price); + Ok(tx.with_gas_price(gas_price)) + } + Err(legacy_error) => Err(EoaExecutorWorkerError::RpcError { + message: format!( + "Failed to get legacy gas price: {}", + legacy_error + ), + inner_error: legacy_error.to_engine_error(chain), + }), + } + } else { + Err(EoaExecutorWorkerError::TransactionBuildFailed { + message: "EIP7702 transactions not supported on chain".to_string(), + }) + } + } else { + // Other EIP-1559 error + Err(EoaExecutorWorkerError::RpcError { + message: format!("Failed to estimate EIP-1559 fees: {}", eip1559_error), + inner_error: eip1559_error.to_engine_error(chain), + }) + } + } + } + } + + async fn build_typed_transaction( + &self, + tx_data: &TransactionData, + nonce: u64, + chain: &impl Chain, + ) -> Result { + // Build transaction request from stored data + let mut tx_request = AlloyTransactionRequest::default() + .with_from(tx_data.user_request.from) + .with_value(tx_data.user_request.value) + .with_input(tx_data.user_request.data.clone()) + .with_chain_id(tx_data.user_request.chain_id) + .with_nonce(nonce); + + if let Some(to) = tx_data.user_request.to { + tx_request = tx_request.with_to(to); + } + + if let Some(gas_limit) = tx_data.user_request.gas_limit { + tx_request = tx_request.with_gas_limit(gas_limit); + } + + // Handle gas fees - either from user settings or estimation + tx_request = if let Some(type_data) = &tx_data.user_request.transaction_type_data { + // User provided gas settings - respect them first + match type_data { + EoaTransactionTypeData::Eip1559(data) => { + let mut req = tx_request; + if let Some(max_fee) = data.max_fee_per_gas { + req = req.with_max_fee_per_gas(max_fee); + } + if let Some(max_priority) = data.max_priority_fee_per_gas { + req = req.with_max_priority_fee_per_gas(max_priority); + } + + // if either not set, estimate the other one + if req.max_fee_per_gas.is_none() || req.max_priority_fee_per_gas.is_none() { + req = self.estimate_gas_fees(chain, req).await?; + } + + req + } + EoaTransactionTypeData::Legacy(data) => { + if let Some(gas_price) = data.gas_price { + tx_request.with_gas_price(gas_price) + } else { + // User didn't provide gas price, estimate it + self.estimate_gas_fees(chain, tx_request).await? + } + } + EoaTransactionTypeData::Eip7702(data) => { + let mut req = tx_request; + if let Some(authorization_list) = &data.authorization_list { + req = req.with_authorization_list(authorization_list.clone()); + } + if let Some(max_fee) = data.max_fee_per_gas { + req = req.with_max_fee_per_gas(max_fee); + } + if let Some(max_priority) = data.max_priority_fee_per_gas { + req = req.with_max_priority_fee_per_gas(max_priority); + } + + // if either not set, estimate the other one + if req.max_fee_per_gas.is_none() || req.max_priority_fee_per_gas.is_none() { + req = self.estimate_gas_fees(chain, req).await?; + } + + req + } + } + } else { + // No user settings - estimate appropriate fees + self.estimate_gas_fees(chain, tx_request).await? + }; + + // Estimate gas if needed + if tx_request.gas.is_none() { + match chain.provider().estimate_gas(tx_request.clone()).await { + Ok(gas_limit) => { + tx_request = tx_request.with_gas_limit(gas_limit * 110 / 100); // 10% buffer + } + Err(e) => { + // Check if this is a revert + if let RpcError::ErrorResp(error_payload) = &e { + if let Some(revert_data) = error_payload.as_revert_data() { + // This is a revert - the transaction is fundamentally broken + // This should fail the individual transaction, not the worker + return Err(EoaExecutorWorkerError::TransactionSimulationFailed { + message: format!( + "Transaction reverted during gas estimation: {} (revert: {})", + error_payload.message, + hex::encode(&revert_data) + ), + inner_error: e.to_engine_error(chain), + }); + } + } + + // Not a revert - could be RPC issue, this should nack the worker + let engine_error = e.to_engine_error(chain); + return Err(EoaExecutorWorkerError::RpcError { + message: format!("Gas estimation failed: {}", engine_error), + inner_error: engine_error, + }); + } + } + } + + // Build typed transaction + tx_request + .build_typed_tx() + .map_err(|e| EoaExecutorWorkerError::TransactionBuildFailed { + message: format!("Failed to build typed transaction: {:?}", e), + }) + } + + async fn sign_transaction( + &self, + typed_tx: TypedTransaction, + tx_data: &TransactionData, + ) -> Result, EoaExecutorWorkerError> { + let signing_options = EoaSigningOptions { + from: tx_data.user_request.from, + chain_id: Some(tx_data.user_request.chain_id), + }; + + let signature = self + .eoa_signer + .sign_transaction( + signing_options, + typed_tx.clone(), + tx_data.user_request.signing_credential.clone(), + ) + .await + .map_err(|engine_error| EoaExecutorWorkerError::SigningError { + message: format!("Failed to sign transaction: {}", engine_error), + inner_error: engine_error, + })?; + + let signature = signature.parse::().map_err(|e| { + EoaExecutorWorkerError::SignatureParsingFailed { + message: format!("Failed to parse signature: {}", e), + } + })?; + + Ok(typed_tx.into_signed(signature)) + } + + async fn build_and_sign_transaction( + &self, + tx_data: &TransactionData, + nonce: u64, + chain: &impl Chain, + ) -> Result, EoaExecutorWorkerError> { + let typed_tx = self.build_typed_transaction(tx_data, nonce, chain).await?; + self.sign_transaction(typed_tx, tx_data).await + } + + fn apply_gas_bump_to_typed_transaction( + &self, + mut typed_tx: TypedTransaction, + bump_multiplier: u32, // e.g., 120 for 20% increase + ) -> TypedTransaction { + match &mut typed_tx { + TypedTransaction::Eip1559(tx) => { + tx.max_fee_per_gas = tx.max_fee_per_gas * bump_multiplier as u128 / 100; + tx.max_priority_fee_per_gas = + tx.max_priority_fee_per_gas * bump_multiplier as u128 / 100; + } + TypedTransaction::Legacy(tx) => { + tx.gas_price = tx.gas_price * bump_multiplier as u128 / 100; + } + TypedTransaction::Eip2930(tx) => { + tx.gas_price = tx.gas_price * bump_multiplier as u128 / 100; + } + TypedTransaction::Eip7702(tx) => { + tx.max_fee_per_gas = tx.max_fee_per_gas * bump_multiplier as u128 / 100; + tx.max_priority_fee_per_gas = + tx.max_priority_fee_per_gas * bump_multiplier as u128 / 100; + } + TypedTransaction::Eip4844(tx) => match tx { + TxEip4844Variant::TxEip4844(tx) => { + tx.max_fee_per_gas = tx.max_fee_per_gas * bump_multiplier as u128 / 100; + tx.max_priority_fee_per_gas = + tx.max_priority_fee_per_gas * bump_multiplier as u128 / 100; + } + TxEip4844Variant::TxEip4844WithSidecar(TxEip4844WithSidecar { tx, .. }) => { + tx.max_fee_per_gas = tx.max_fee_per_gas * bump_multiplier as u128 / 100; + tx.max_priority_fee_per_gas = + tx.max_priority_fee_per_gas * bump_multiplier as u128 / 100; + } + }, + } + typed_tx + } +} diff --git a/executors/src/lib.rs b/executors/src/lib.rs index c8de07f..842ee7d 100644 --- a/executors/src/lib.rs +++ b/executors/src/lib.rs @@ -1,4 +1,5 @@ -pub mod external_bundler; pub mod eip7702_executor; -pub mod webhook; +pub mod eoa; +pub mod external_bundler; pub mod transaction_registry; +pub mod webhook; diff --git a/server/Cargo.toml b/server/Cargo.toml index d96d04e..93c0ab0 100644 --- a/server/Cargo.toml +++ b/server/Cargo.toml @@ -4,13 +4,13 @@ version = "0.1.0" edition = "2024" [dependencies] -alloy = { version = "1.0.8", features = ["serde"] } +alloy = { workspace = true, features = ["serde"] } axum = { version = "0.8.4", features = ["macros"] } config = "0.15.11" serde = { version = "1.0.219", features = ["derive"] } serde_json = "1.0.140" -vault-sdk = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "main" } -vault-types = { version = "0.1.0", git = "ssh://git@github.com/thirdweb-dev/vault.git", branch = "main" } +vault-sdk = { workspace = true } +vault-types = { workspace = true } engine-core = { path = "../core" } engine-aa-core = { path = "../aa-core" } engine-executors = { path = "../executors" } diff --git a/server/configuration/server_base.yaml b/server/configuration/server_base.yaml index e236348..43f4f62 100644 --- a/server/configuration/server_base.yaml +++ b/server/configuration/server_base.yaml @@ -20,6 +20,7 @@ queue: webhook_workers: 100 external_bundler_send_workers: 100 userop_confirm_workers: 100 + eoa_executor_workers: 100 local_concurrency: 100 polling_interval_ms: 100 lease_duration_seconds: 600 diff --git a/server/src/config.rs b/server/src/config.rs index e9bd8ad..d248764 100644 --- a/server/src/config.rs +++ b/server/src/config.rs @@ -17,6 +17,7 @@ pub struct QueueConfig { pub external_bundler_send_workers: usize, pub userop_confirm_workers: usize, + pub eoa_executor_workers: usize, pub execution_namespace: Option, diff --git a/server/src/execution_router/mod.rs b/server/src/execution_router/mod.rs index 6272394..5209c99 100644 --- a/server/src/execution_router/mod.rs +++ b/server/src/execution_router/mod.rs @@ -9,6 +9,7 @@ use engine_core::{ execution_options::{ BaseExecutionOptions, QueuedTransaction, SendTransactionRequest, SpecificExecutionOptions, WebhookOptions, aa::Erc4337ExecutionOptions, eip7702::Eip7702ExecutionOptions, + eoa::EoaExecutionOptions, }, transaction::InnerTransaction, }; @@ -17,6 +18,7 @@ use engine_executors::{ confirm::Eip7702ConfirmationHandler, send::{Eip7702SendHandler, Eip7702SendJobData}, }, + eoa::{EoaExecutorStore, EoaExecutorWorker, EoaExecutorWorkerJobData, EoaTransactionRequest}, external_bundler::{ confirm::UserOpConfirmationHandler, send::{ExternalBundlerSendHandler, ExternalBundlerSendJobData}, @@ -38,6 +40,8 @@ pub struct ExecutionRouter { pub webhook_queue: Arc>, pub external_bundler_send_queue: Arc>>, pub userop_confirm_queue: Arc>>, + pub eoa_executor_queue: Arc>>, + pub eoa_executor_store: Arc, pub eip7702_send_queue: Arc>>, pub eip7702_confirm_queue: Arc>>, pub transaction_registry: Arc, @@ -253,6 +257,31 @@ impl ExecutionRouter { SpecificExecutionOptions::Auto(_auto_execution_options) => { todo!() } + + SpecificExecutionOptions::EOA(ref eoa_execution_options) => { + self.execute_eoa( + &execution_request.execution_options.base, + eoa_execution_options, + &execution_request.webhook_options, + &execution_request.params, + rpc_credentials, + signing_credential, + ) + .await?; + + let queued_transaction = QueuedTransaction { + id: execution_request + .execution_options + .base + .idempotency_key + .clone(), + batch_index: 0, + execution_params: execution_request.execution_options, + transaction_params: execution_request.params, + }; + + Ok(vec![queued_transaction]) + } } } @@ -349,4 +378,84 @@ impl ExecutionRouter { Ok(()) } + + async fn execute_eoa( + &self, + base_execution_options: &BaseExecutionOptions, + eoa_execution_options: &EoaExecutionOptions, + webhook_options: &Option>, + transactions: &[InnerTransaction], + rpc_credentials: RpcCredentials, + signing_credential: SigningCredential, + ) -> Result<(), TwmqError> { + if transactions.len() != 1 { + return Err(TwmqError::Runtime { + message: "EOA execution currently supports only single transactions".to_string(), + }); + } + + let transaction = &transactions[0]; + let eoa_transaction_request = EoaTransactionRequest { + transaction_id: base_execution_options.idempotency_key.clone(), + chain_id: base_execution_options.chain_id, + from: eoa_execution_options.from, + to: transaction.to, + value: transaction.value, + data: transaction.data.clone(), + gas_limit: eoa_execution_options.gas_limit, + webhook_options: webhook_options.clone(), + signing_credential, + rpc_credentials, + transaction_type_data: eoa_execution_options.transaction_type_data.clone(), + }; + + // Add transaction to the store + self.eoa_executor_store + .add_transaction(eoa_transaction_request) + .await + .map_err(|e| TwmqError::Runtime { + message: format!("Failed to add transaction to EOA store: {}", e), + })?; + + // Register transaction in registry + self.transaction_registry + .set_transaction_queue(&base_execution_options.idempotency_key, "eoa_executor") + .await + .map_err(|e| TwmqError::Runtime { + message: format!("Failed to register transaction: {}", e), + })?; + + // Ensure an idempotent job exists for this EOA:chain combination + let eoa_job_data = EoaExecutorWorkerJobData { + eoa_address: eoa_execution_options.from, + chain_id: base_execution_options.chain_id, + worker_id: format!( + "eoa_{}_{}", + eoa_execution_options.from, base_execution_options.chain_id + ), + }; + + // Create idempotent job for this EOA:chain - only one will exist + let job_id = format!( + "eoa_{}_{}", + eoa_execution_options.from, base_execution_options.chain_id + ); + + self.eoa_executor_queue + .clone() + .job(eoa_job_data) + .with_id(&job_id) + .push() + .await?; + + tracing::debug!( + transaction_id = %base_execution_options.idempotency_key, + eoa = %eoa_execution_options.from, + chain_id = %base_execution_options.chain_id, + queue = "eoa_executor", + "EOA transaction added to store and worker job ensured" + ); + + Ok(()) + } } diff --git a/server/src/main.rs b/server/src/main.rs index 243db04..995317b 100644 --- a/server/src/main.rs +++ b/server/src/main.rs @@ -77,6 +77,8 @@ async fn main() -> anyhow::Result<()> { webhook_queue: queue_manager.webhook_queue.clone(), external_bundler_send_queue: queue_manager.external_bundler_send_queue.clone(), userop_confirm_queue: queue_manager.userop_confirm_queue.clone(), + eoa_executor_queue: queue_manager.eoa_executor_queue.clone(), + eoa_executor_store: queue_manager.eoa_executor_store.clone(), eip7702_send_queue: queue_manager.eip7702_send_queue.clone(), eip7702_confirm_queue: queue_manager.eip7702_confirm_queue.clone(), transaction_registry: queue_manager.transaction_registry.clone(), diff --git a/server/src/queue/manager.rs b/server/src/queue/manager.rs index 4e93d50..04e216e 100644 --- a/server/src/queue/manager.rs +++ b/server/src/queue/manager.rs @@ -5,6 +5,7 @@ use alloy::transports::http::reqwest; use engine_core::error::EngineError; use engine_executors::{ eip7702_executor::{confirm::Eip7702ConfirmationHandler, send::Eip7702SendHandler}, + eoa::{EoaExecutorStore, EoaExecutorWorker}, external_bundler::{ confirm::UserOpConfirmationHandler, deployment::{RedisDeploymentCache, RedisDeploymentLock}, @@ -24,6 +25,8 @@ pub struct QueueManager { pub webhook_queue: Arc>, pub external_bundler_send_queue: Arc>>, pub userop_confirm_queue: Arc>>, + pub eoa_executor_queue: Arc>>, + pub eoa_executor_store: Arc, pub eip7702_send_queue: Arc>>, pub eip7702_confirm_queue: Arc>>, pub transaction_registry: Arc, @@ -41,6 +44,7 @@ const USEROP_CONFIRM_QUEUE_NAME: &str = "userop_confirm"; const EIP7702_SEND_QUEUE_NAME: &str = "eip7702_send"; const EIP7702_CONFIRM_QUEUE_NAME: &str = "eip7702_confirm"; const WEBHOOK_QUEUE_NAME: &str = "webhook"; +const EOA_EXECUTOR_QUEUE_NAME: &str = "eoa_executor"; impl QueueManager { pub async fn new( @@ -59,6 +63,12 @@ impl QueueManager { queue_config.execution_namespace.clone(), )); + // Create EOA executor store + let eoa_executor_store = Arc::new(EoaExecutorStore::new( + redis_client.get_connection_manager().await?, + queue_config.execution_namespace.clone(), + )); + // Create deployment cache and lock let deployment_cache = RedisDeploymentCache::new(redis_client.clone()).await?; let deployment_lock = RedisDeploymentLock::new(redis_client.clone()).await?; @@ -68,6 +78,7 @@ impl QueueManager { local_concurrency: queue_config.local_concurrency, polling_interval: Duration::from_millis(queue_config.polling_interval_ms), lease_duration: Duration::from_secs(queue_config.lease_duration_seconds), + idempotency_mode: twmq::IdempotencyMode::Permanent, always_poll: false, max_success: 1000, max_failed: 1000, @@ -89,6 +100,10 @@ impl QueueManager { let mut webhook_queue_opts = base_queue_opts.clone(); webhook_queue_opts.local_concurrency = queue_config.webhook_workers; + let mut eoa_executor_queue_opts = base_queue_opts.clone(); + eoa_executor_queue_opts.idempotency_mode = twmq::IdempotencyMode::Active; + eoa_executor_queue_opts.local_concurrency = queue_config.eoa_executor_workers; + // Create webhook queue let webhook_handler = WebhookJobHandler { http_client: reqwest::Client::new(), @@ -118,6 +133,11 @@ impl QueueManager { EIP7702_CONFIRM_QUEUE_NAME, ); + let eoa_executor_queue_name = get_queue_name_for_namespace( + &queue_config.execution_namespace, + EOA_EXECUTOR_QUEUE_NAME, + ); + let webhook_queue = Queue::builder() .name(webhook_queue_name) .options(webhook_queue_opts) @@ -183,7 +203,7 @@ impl QueueManager { // Create EIP-7702 send queue let eip7702_send_handler = Eip7702SendHandler { chain_service: chain_service.clone(), - eoa_signer, + eoa_signer: eoa_signer.clone(), webhook_queue: webhook_queue.clone(), confirm_queue: eip7702_confirm_queue.clone(), transaction_registry: transaction_registry.clone(), @@ -198,10 +218,30 @@ impl QueueManager { .await? .arc(); + // Create EOA executor queue + let eoa_executor_handler = EoaExecutorWorker { + chain_service: chain_service.clone(), + store: eoa_executor_store.clone(), + eoa_signer: eoa_signer.clone(), + max_inflight: 100, + max_recycled_nonces: 50, + }; + + let eoa_executor_queue = Queue::builder() + .name(eoa_executor_queue_name) + .options(eoa_executor_queue_opts) + .handler(eoa_executor_handler) + .redis_client(redis_client.clone()) + .build() + .await? + .arc(); + Ok(Self { webhook_queue, external_bundler_send_queue, userop_confirm_queue, + eoa_executor_queue, + eoa_executor_store, eip7702_send_queue, eip7702_confirm_queue, transaction_registry, @@ -224,6 +264,10 @@ impl QueueManager { tracing::info!("Starting external bundler confirmation worker"); let userop_confirm_worker = self.userop_confirm_queue.work(); + // Start EOA executor workers + tracing::info!("Starting EOA executor worker"); + let eoa_executor_worker = self.eoa_executor_queue.work(); + // Start EIP-7702 send workers tracing::info!("Starting EIP-7702 send worker"); let eip7702_send_worker = self.eip7702_send_queue.work(); @@ -233,10 +277,11 @@ impl QueueManager { let eip7702_confirm_worker = self.eip7702_confirm_queue.work(); tracing::info!( - "Started {} webhook workers, {} send workers, {} confirm workers, {} EIP-7702 send workers, {} EIP-7702 confirm workers", + "Started {} webhook workers, {} send workers, {} confirm workers, {} eoa workers, {} EIP-7702 send workers, {} EIP-7702 confirm workers", queue_config.webhook_workers, queue_config.external_bundler_send_workers, queue_config.userop_confirm_workers, + queue_config.eoa_executor_workers, queue_config.external_bundler_send_workers, // Reusing same config for now queue_config.userop_confirm_workers // Reusing same config for now ); @@ -244,6 +289,7 @@ impl QueueManager { ShutdownHandle::with_worker(webhook_worker) .and_worker(external_bundler_send_worker) .and_worker(userop_confirm_worker) + .and_worker(eoa_executor_worker) .and_worker(eip7702_send_worker) .and_worker(eip7702_confirm_worker) } @@ -291,6 +337,14 @@ impl QueueManager { failed: self.userop_confirm_queue.count(JobStatus::Failed).await?, }; + let eoa_executor_stats = QueueStatistics { + pending: self.eoa_executor_queue.count(JobStatus::Pending).await?, + active: self.eoa_executor_queue.count(JobStatus::Active).await?, + delayed: self.eoa_executor_queue.count(JobStatus::Delayed).await?, + success: self.eoa_executor_queue.count(JobStatus::Success).await?, + failed: self.eoa_executor_queue.count(JobStatus::Failed).await?, + }; + let eip7702_send_stats = QueueStatistics { pending: self.eip7702_send_queue.count(JobStatus::Pending).await?, active: self.eip7702_send_queue.count(JobStatus::Active).await?, @@ -311,6 +365,7 @@ impl QueueManager { webhook: webhook_stats, external_bundler_send: send_stats, userop_confirm: confirm_stats, + eoa_executor: eoa_executor_stats, eip7702_send: eip7702_send_stats, eip7702_confirm: eip7702_confirm_stats, }) @@ -322,6 +377,7 @@ pub struct QueueStats { pub webhook: QueueStatistics, pub external_bundler_send: QueueStatistics, pub userop_confirm: QueueStatistics, + pub eoa_executor: QueueStatistics, pub eip7702_send: QueueStatistics, pub eip7702_confirm: QueueStatistics, } diff --git a/thirdweb-core/Cargo.toml b/thirdweb-core/Cargo.toml index d38fb0d..cb12a75 100644 --- a/thirdweb-core/Cargo.toml +++ b/thirdweb-core/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = "2024" [dependencies] -alloy = { version = "1.0.9", features = [ +alloy = { workspace = true, features = [ "json-abi", "consensus", "dyn-abi", diff --git a/twmq/benches/throughput.rs b/twmq/benches/throughput.rs index f666634..4056fa0 100644 --- a/twmq/benches/throughput.rs +++ b/twmq/benches/throughput.rs @@ -10,7 +10,7 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::runtime::Runtime; use twmq::error::TwmqError; use twmq::job::JobError; -use twmq::{BorrowedJob, UserCancellable}; +use twmq::{BorrowedJob, IdempotencyMode, UserCancellable}; use twmq::{ DurableExecution, Queue, @@ -174,6 +174,7 @@ async fn load_test_throughput( // Optimize queue for high throughput let queue_options = QueueOptions { + idempotency_mode: IdempotencyMode::Active, local_concurrency: 200, // High concurrency polling_interval: Duration::from_millis(10), // Fast polling always_poll: true, // Always poll for max responsiveness diff --git a/twmq/src/job.rs b/twmq/src/job.rs index 43b37d3..afc382b 100644 --- a/twmq/src/job.rs +++ b/twmq/src/job.rs @@ -56,17 +56,20 @@ pub trait ToJobResult { fn map_err_fail(self) -> JobResult; } -impl ToJobResult for Result { +impl ToJobResult for Result +where + ErrorType: Into, +{ fn map_err_nack(self, delay: Option, position: RequeuePosition) -> JobResult { self.map_err(|e| JobError::Nack { - error: e, + error: e.into(), delay, position, }) } fn map_err_fail(self) -> JobResult { - self.map_err(|e| JobError::Fail(e)) + self.map_err(|e| JobError::Fail(e.into())) } } @@ -156,16 +159,16 @@ impl BorrowedJob { pub fn new(job: Job, lease_token: String) -> Self { Self { job, lease_token } } - + // Convenience methods to access job fields pub fn id(&self) -> &str { &self.job.id } - + pub fn data(&self) -> &T { &self.job.data } - + pub fn attempts(&self) -> u32 { self.job.attempts } diff --git a/twmq/src/lib.rs b/twmq/src/lib.rs index e516e1a..be6733c 100644 --- a/twmq/src/lib.rs +++ b/twmq/src/lib.rs @@ -1,6 +1,7 @@ pub mod error; pub mod hooks; pub mod job; +pub mod multilane; pub mod queue; pub mod shutdown; @@ -9,11 +10,12 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use error::TwmqError; use hooks::TransactionContext; +pub use job::BorrowedJob; use job::{ DelayOptions, Job, JobError, JobErrorRecord, JobErrorType, JobOptions, JobResult, JobStatus, PushableJob, RequeuePosition, }; -pub use job::BorrowedJob; +pub use multilane::{MultilanePushableJob, MultilaneQueue}; use queue::QueueOptions; use redis::Pipeline; use redis::{AsyncCommands, RedisResult, aio::ConnectionManager}; @@ -22,6 +24,7 @@ use shutdown::WorkerHandle; use tokio::sync::Semaphore; use tokio::time::sleep; +pub use queue::IdempotencyMode; pub use redis; use tracing::Instrument; @@ -72,7 +75,7 @@ pub trait DurableExecution: Sized + Send + Sync + 'static { _job: &BorrowedJob, _d: SuccessHookData, _tx: &mut TransactionContext<'_>, - ) -> impl Future + Send + Sync { + ) -> impl Future + Send { std::future::ready(()) } @@ -81,7 +84,7 @@ pub trait DurableExecution: Sized + Send + Sync + 'static { _job: &BorrowedJob, _d: NackHookData, _tx: &mut TransactionContext<'_>, - ) -> impl Future + Send + Sync { + ) -> impl Future + Send { std::future::ready(()) } @@ -90,7 +93,7 @@ pub trait DurableExecution: Sized + Send + Sync + 'static { _job: &BorrowedJob, _d: FailHookData, _tx: &mut TransactionContext<'_>, - ) -> impl Future + Send + Sync { + ) -> impl Future + Send { std::future::ready(()) } @@ -108,7 +111,7 @@ pub trait DurableExecution: Sized + Send + Sync + 'static { _job: &Job>, _d: QueueInternalErrorHookData<'_>, _tx: &mut TransactionContext<'_>, - ) -> impl Future + Send + Sync { + ) -> impl Future + Send { std::future::ready(()) } } @@ -440,14 +443,16 @@ impl Queue { ); } Ok(CancelResult::CancelledImmediately) - }, + } "cancellation_pending" => Ok(CancelResult::CancellationPending), "not_found" => Ok(CancelResult::NotFound), - _ => Err(TwmqError::Runtime { message: format!("Unexpected cancel result: {}", result) }), + _ => Err(TwmqError::Runtime { + message: format!("Unexpected cancel result: {}", result), + }), } } - pub fn work(self: &Arc) -> WorkerHandle { + pub fn work(self: &Arc) -> WorkerHandle> { let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel::<()>(); // Local semaphore to limit concurrency per instance let semaphore = Arc::new(Semaphore::new(self.options.local_concurrency)); @@ -535,8 +540,8 @@ impl Queue { .await .into_iter() .collect::, _>>() - .map_err(|e| { - TwmqError::Runtime { message: format!("Failed to acquire permits during shutdown: {}", e) } + .map_err(|e| TwmqError::Runtime { + message: format!("Failed to acquire permits during shutdown: {}", e), })?; tracing::info!( @@ -718,7 +723,11 @@ impl Queue { .unwrap() .as_secs(); - let results_from_lua: (Vec<(String, String, String, String, String, String)>, Vec, Vec) = script + let results_from_lua: ( + Vec<(String, String, String, String, String, String)>, + Vec, + Vec, + ) = script .key(self.name()) .key(self.delayed_zset_name()) .key(self.pending_list_name()) @@ -734,7 +743,7 @@ impl Queue { .await?; let (job_results, cancelled_jobs, timed_out_jobs) = results_from_lua; - + // Log individual lease timeouts and cancellations for job_id in &timed_out_jobs { tracing::warn!(job_id = %job_id, "Job lease expired, moved back to pending"); @@ -744,8 +753,14 @@ impl Queue { } let mut jobs = Vec::new(); - for (job_id_str, job_data_t_json, attempts_str, created_at_str, processed_at_str, lease_token) in - job_results + for ( + job_id_str, + job_data_t_json, + attempts_str, + created_at_str, + processed_at_str, + lease_token, + ) in job_results { match serde_json::from_str::(&job_data_t_json) { Ok(data_t) => { @@ -792,7 +807,7 @@ impl Queue { }; let twmq_error: TwmqError = e.into(); - + // Complete job using queue error method with lease token if let Err(e) = queue_clone .complete_job_queue_error(&job, &lease_token, &twmq_error.into()) @@ -833,37 +848,37 @@ impl Queue { Some(job) => { // Create cancellation error using the trait let cancellation_error = H::ErrorData::user_cancelled(); - - // Create transaction pipeline for atomicity + + // Create transaction pipeline for atomicity let mut pipeline = redis::pipe(); pipeline.atomic(); - + // Create transaction context with mutable access to pipeline - let mut tx_context = TransactionContext::new( - &mut pipeline, - self.name().to_string(), - ); - + let mut tx_context = + TransactionContext::new(&mut pipeline, self.name().to_string()); + let fail_hook_data = FailHookData { error: &cancellation_error, }; - + // Create a BorrowedJob with a dummy lease token since cancelled jobs don't have active leases let borrowed_job = BorrowedJob::new(job, "cancelled".to_string()); - + // Call fail hook for user cancellation - self.handler.on_fail(&borrowed_job, fail_hook_data, &mut tx_context).await; - + self.handler + .on_fail(&borrowed_job, fail_hook_data, &mut tx_context) + .await; + // Execute the pipeline (just hook commands, job already moved to failed) pipeline.query_async::<()>(&mut self.redis.clone()).await?; - + tracing::info!( job_id = %job_id, "Successfully processed job cancellation hooks" ); - + Ok(()) - }, + } None => { tracing::warn!( job_id = %job_id, @@ -890,7 +905,7 @@ impl Queue { // Delete the lease key to consume it pipeline.del(&lease_key); - // Add job completion operations + // Add job completion operations pipeline .hdel(self.active_hash_name(), &job.job.id) .lpush(self.success_list_name(), &job.job.id) @@ -900,6 +915,11 @@ impl Queue { let result_json = serde_json::to_string(result)?; pipeline.hset(self.job_result_hash_name(), &job.job.id, result_json); + // For "active" idempotency mode, remove from deduplication set immediately + if self.options.idempotency_mode == queue::IdempotencyMode::Active { + pipeline.srem(self.dedupe_set_name(), &job.job.id); + } + Ok(()) } @@ -1048,6 +1068,11 @@ impl Queue { let error_json = serde_json::to_string(&error_record)?; pipeline.lpush(self.job_errors_list_name(&job.job.id), error_json); + // For "active" idempotency mode, remove from deduplication set immediately + if self.options.idempotency_mode == queue::IdempotencyMode::Active { + pipeline.srem(self.dedupe_set_name(), &job.job.id); + } + Ok(()) } @@ -1109,21 +1134,31 @@ impl Queue { match &result { Ok(output) => { let success_hook_data = SuccessHookData { result: output }; - self.handler.on_success(job, success_hook_data, &mut tx_context).await; + self.handler + .on_success(job, success_hook_data, &mut tx_context) + .await; self.add_success_operations(job, output, &mut hook_pipeline)?; } - Err(JobError::Nack { error, delay, position }) => { + Err(JobError::Nack { + error, + delay, + position, + }) => { let nack_hook_data = NackHookData { error, delay: *delay, position: *position, }; - self.handler.on_nack(job, nack_hook_data, &mut tx_context).await; + self.handler + .on_nack(job, nack_hook_data, &mut tx_context) + .await; self.add_nack_operations(job, error, *delay, *position, &mut hook_pipeline)?; } Err(JobError::Fail(error)) => { let fail_hook_data = FailHookData { error }; - self.handler.on_fail(job, fail_hook_data, &mut tx_context).await; + self.handler + .on_fail(job, fail_hook_data, &mut tx_context) + .await; self.add_fail_operations(job, error, &mut hook_pipeline)?; } } @@ -1143,9 +1178,7 @@ impl Queue { // Check if lease exists - if not, job was cancelled or timed out let lease_exists: bool = conn.exists(&lease_key).await?; if !lease_exists { - redis::cmd("UNWATCH") - .query_async::<()>(&mut conn) - .await?; + redis::cmd("UNWATCH").query_async::<()>(&mut conn).await?; tracing::warn!(job_id = %job.job.id, "Lease no longer exists, job was cancelled or timed out"); return Ok(()); } @@ -1155,7 +1188,10 @@ impl Queue { atomic_pipeline.atomic(); // Execute atomically with WATCH/MULTI/EXEC - match atomic_pipeline.query_async::>(&mut conn).await { + match atomic_pipeline + .query_async::>(&mut conn) + .await + { Ok(_) => { // Success! Now run post-completion methods match &result { @@ -1188,9 +1224,13 @@ impl Queue { let mut hook_pipeline = redis::pipe(); let mut tx_context = TransactionContext::new(&mut hook_pipeline, self.name().to_string()); - let twmq_error = TwmqError::Runtime { message: "Job processing failed with user error".to_string() }; + let twmq_error = TwmqError::Runtime { + message: "Job processing failed with user error".to_string(), + }; let queue_error_hook_data = QueueInternalErrorHookData { error: &twmq_error }; - self.handler.on_queue_error(job, queue_error_hook_data, &mut tx_context).await; + self.handler + .on_queue_error(job, queue_error_hook_data, &mut tx_context) + .await; // Add fail operations to pipeline let now = SystemTime::now() @@ -1220,6 +1260,11 @@ impl Queue { let error_json = serde_json::to_string(&error_record)?; hook_pipeline.lpush(self.job_errors_list_name(&job.id), error_json); + // For "active" idempotency mode, remove from deduplication set immediately + if self.options.idempotency_mode == queue::IdempotencyMode::Active { + hook_pipeline.srem(self.dedupe_set_name(), &job.id); + } + // 2. Use pipeline in unlimited retry loop with lease check loop { let mut conn = self.redis.clone(); @@ -1233,9 +1278,7 @@ impl Queue { // Check if lease exists - if not, job was cancelled or timed out let lease_exists: bool = conn.exists(&lease_key).await?; if !lease_exists { - redis::cmd("UNWATCH") - .query_async::<()>(&mut conn) - .await?; + redis::cmd("UNWATCH").query_async::<()>(&mut conn).await?; tracing::warn!(job_id = %job.id, "Lease no longer exists, job was cancelled or timed out"); return Ok(()); } @@ -1245,7 +1288,10 @@ impl Queue { atomic_pipeline.atomic(); // Execute atomically with WATCH/MULTI/EXEC - match atomic_pipeline.query_async::>(&mut conn).await { + match atomic_pipeline + .query_async::>(&mut conn) + .await + { Ok(_) => { // Success! Run post-completion self.post_fail_completion().await?; @@ -1260,4 +1306,20 @@ impl Queue { } } } + + pub async fn remove_from_dedupe_set(&self, job_id: &str) -> Result<(), TwmqError> { + self.redis + .clone() + .srem::<&str, &str, ()>(&self.dedupe_set_name(), job_id) + .await?; + Ok(()) + } + + pub async fn empty_dedupe_set(&self) -> Result<(), TwmqError> { + self.redis + .clone() + .del::<&str, ()>(&self.dedupe_set_name()) + .await?; + Ok(()) + } } diff --git a/twmq/src/multilane.rs b/twmq/src/multilane.rs new file mode 100644 index 0000000..bb9b359 --- /dev/null +++ b/twmq/src/multilane.rs @@ -0,0 +1,1363 @@ +use std::collections::HashMap; +use std::sync::Arc; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + +use redis::{AsyncCommands, Pipeline, RedisResult, aio::ConnectionManager}; +use tokio::sync::Semaphore; +use tokio::time::sleep; +use tracing::Instrument; + +use crate::{ + CancelResult, DurableExecution, FailHookData, NackHookData, QueueInternalErrorHookData, + SuccessHookData, UserCancellable, + error::TwmqError, + hooks::TransactionContext, + job::{ + BorrowedJob, DelayOptions, Job, JobError, JobErrorRecord, JobErrorType, JobOptions, + JobResult, JobStatus, RequeuePosition, + }, + queue::QueueOptions, + shutdown::WorkerHandle, +}; + +/// A multilane queue that provides fair load balancing across multiple lanes +/// while maintaining the same reliability guarantees as the single-lane queue. +pub struct MultilaneQueue +where + H: DurableExecution, +{ + pub redis: ConnectionManager, + handler: Arc, + options: QueueOptions, + /// Unique identifier for this multilane queue instance + queue_id: String, +} + +/// Represents a job that can be pushed to a specific lane +pub struct MultilanePushableJob +where + H: DurableExecution, +{ + options: JobOptions, + queue: Arc>, + lane_id: String, +} + +impl MultilaneQueue { + pub async fn new( + redis_url: &str, + queue_id: &str, + options: Option, + handler: H, + ) -> Result { + let client = redis::Client::open(redis_url)?; + let redis = client.get_connection_manager().await?; + + let queue = Self { + redis, + queue_id: queue_id.to_string(), + options: options.unwrap_or_default(), + handler: Arc::new(handler), + }; + + Ok(queue) + } + + pub fn arc(self) -> Arc { + Arc::new(self) + } + + /// Create a job for a specific lane + pub fn job_for_lane( + self: Arc, + lane_id: &str, + data: H::JobData, + ) -> MultilanePushableJob { + MultilanePushableJob { + options: JobOptions::new(data), + queue: self, + lane_id: lane_id.to_string(), + } + } + + pub fn queue_id(&self) -> &str { + &self.queue_id + } + + // Redis key naming methods with proper multilane namespacing + pub fn lanes_zset_name(&self) -> String { + format!("twmq_multilane:{}:lanes", self.queue_id) + } + + pub fn lane_pending_list_name(&self, lane_id: &str) -> String { + format!("twmq_multilane:{}:lane:{}:pending", self.queue_id, lane_id) + } + + pub fn lane_delayed_zset_name(&self, lane_id: &str) -> String { + format!("twmq_multilane:{}:lane:{}:delayed", self.queue_id, lane_id) + } + + pub fn lane_active_hash_name(&self, lane_id: &str) -> String { + format!("twmq_multilane:{}:lane:{}:active", self.queue_id, lane_id) + } + + pub fn success_list_name(&self) -> String { + format!("twmq_multilane:{}:success", self.queue_id) + } + + pub fn failed_list_name(&self) -> String { + format!("twmq_multilane:{}:failed", self.queue_id) + } + + pub fn job_data_hash_name(&self) -> String { + format!("twmq_multilane:{}:jobs:data", self.queue_id) + } + + pub fn job_meta_hash_name(&self, job_id: &str) -> String { + format!("twmq_multilane:{}:job:{}:meta", self.queue_id, job_id) + } + + pub fn job_errors_list_name(&self, job_id: &str) -> String { + format!("twmq_multilane:{}:job:{}:errors", self.queue_id, job_id) + } + + pub fn job_result_hash_name(&self) -> String { + format!("twmq_multilane:{}:jobs:result", self.queue_id) + } + + pub fn dedupe_set_name(&self) -> String { + format!("twmq_multilane:{}:dedup", self.queue_id) + } + + pub fn pending_cancellation_set_name(&self) -> String { + format!("twmq_multilane:{}:pending_cancellations", self.queue_id) + } + + pub fn lease_key_name(&self, job_id: &str, lease_token: &str) -> String { + format!( + "twmq_multilane:{}:job:{}:lease:{}", + self.queue_id, job_id, lease_token + ) + } + + /// Push a job to a specific lane + pub async fn push_to_lane( + &self, + lane_id: &str, + job_options: JobOptions, + ) -> Result, TwmqError> { + let script = redis::Script::new( + r#" + local queue_id = ARGV[1] + local lane_id = ARGV[2] + local job_id = ARGV[3] + local job_data = ARGV[4] + local now = ARGV[5] + local delay = ARGV[6] + local reentry_position = ARGV[7] -- "first" or "last" + + local lanes_zset_name = KEYS[1] + local lane_delayed_zset_name = KEYS[2] + local lane_pending_list_name = KEYS[3] + local job_data_hash_name = KEYS[4] + local job_meta_hash_name = KEYS[5] + local dedupe_set_name = KEYS[6] + + -- Check if job already exists in any queue + if redis.call('SISMEMBER', dedupe_set_name, job_id) == 1 then + -- Job with this ID already exists, skip + return { 0, job_id } + end + + -- Store job data + redis.call('HSET', job_data_hash_name, job_id, job_data) + + -- Store job metadata as a hash + redis.call('HSET', job_meta_hash_name, 'created_at', now) + redis.call('HSET', job_meta_hash_name, 'attempts', 0) + redis.call('HSET', job_meta_hash_name, 'lane_id', lane_id) + + -- Add to deduplication set + redis.call('SADD', dedupe_set_name, job_id) + + -- Add lane to lanes zset if not exists (score 0 means never processed) + redis.call('ZADD', lanes_zset_name, 'NX', 0, lane_id) + + -- Add to appropriate queue based on delay + if tonumber(delay) > 0 then + local process_at = now + tonumber(delay) + -- Store position information for this delayed job + redis.call('HSET', job_meta_hash_name, 'reentry_position', reentry_position) + redis.call('ZADD', lane_delayed_zset_name, process_at, job_id) + else + -- Non-delayed job always goes to end of pending + redis.call('RPUSH', lane_pending_list_name, job_id) + end + + return { 1, job_id } + "#, + ); + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + + let job = Job { + id: job_options.id.clone(), + data: job_options.data, + attempts: 0, + created_at: now, + processed_at: None, + finished_at: None, + }; + + let job_data = serde_json::to_string(&job.data)?; + + let delay = job_options.delay.unwrap_or(DelayOptions { + delay: Duration::ZERO, + position: RequeuePosition::Last, + }); + + let delay_secs = delay.delay.as_secs(); + let position_string = delay.position.to_string(); + + let _result: (i32, String) = script + .key(self.lanes_zset_name()) + .key(self.lane_delayed_zset_name(lane_id)) + .key(self.lane_pending_list_name(lane_id)) + .key(self.job_data_hash_name()) + .key(self.job_meta_hash_name(&job.id)) + .key(self.dedupe_set_name()) + .arg(&self.queue_id) + .arg(lane_id) + .arg(&job_options.id) + .arg(job_data) + .arg(now) + .arg(delay_secs) + .arg(position_string) + .invoke_async(&mut self.redis.clone()) + .await?; + + Ok(job) + } + + /// Get job by ID (works across all lanes) + pub async fn get_job(&self, job_id: &str) -> Result>, TwmqError> { + let mut conn = self.redis.clone(); + let job_data_t_json: Option = conn.hget(self.job_data_hash_name(), job_id).await?; + + if let Some(data_json) = job_data_t_json { + let data_t: H::JobData = serde_json::from_str(&data_json)?; + + // Fetch metadata + let meta_map: HashMap = + conn.hgetall(self.job_meta_hash_name(job_id)).await?; + + let attempts: u32 = meta_map + .get("attempts") + .and_then(|s| s.parse().ok()) + .unwrap_or(0); + let created_at: u64 = meta_map + .get("created_at") + .and_then(|s| s.parse().ok()) + .unwrap_or(0); + let processed_at: Option = + meta_map.get("processed_at").and_then(|s| s.parse().ok()); + let finished_at: Option = meta_map.get("finished_at").and_then(|s| s.parse().ok()); + + Ok(Some(Job { + id: job_id.to_string(), + data: data_t, + attempts, + created_at, + processed_at, + finished_at, + })) + } else { + Ok(None) + } + } + + /// Count jobs by status across all lanes or for a specific lane + pub async fn count( + &self, + status: JobStatus, + lane_id: Option<&str>, + ) -> Result { + let mut conn = self.redis.clone(); + + let count = match status { + JobStatus::Pending => { + if let Some(lane) = lane_id { + let count: usize = conn.llen(self.lane_pending_list_name(lane)).await?; + count + } else { + // Sum across all active lanes + let lanes: Vec = conn.zrange(self.lanes_zset_name(), 0, -1).await?; + let mut total = 0; + for lane in lanes { + let count: usize = conn.llen(self.lane_pending_list_name(&lane)).await?; + total += count; + } + total + } + } + JobStatus::Active => { + if let Some(lane) = lane_id { + let count: usize = conn.hlen(self.lane_active_hash_name(lane)).await?; + count + } else { + // Sum across all active lanes + let lanes: Vec = conn.zrange(self.lanes_zset_name(), 0, -1).await?; + let mut total = 0; + for lane in lanes { + let count: usize = conn.hlen(self.lane_active_hash_name(&lane)).await?; + total += count; + } + total + } + } + JobStatus::Delayed => { + if let Some(lane) = lane_id { + let count: usize = conn.zcard(self.lane_delayed_zset_name(lane)).await?; + count + } else { + // Sum across all active lanes + let lanes: Vec = conn.zrange(self.lanes_zset_name(), 0, -1).await?; + let mut total = 0; + for lane in lanes { + let count: usize = conn.zcard(self.lane_delayed_zset_name(&lane)).await?; + total += count; + } + total + } + } + JobStatus::Success => { + let count: usize = conn.llen(self.success_list_name()).await?; + count + } + JobStatus::Failed => { + let count: usize = conn.llen(self.failed_list_name()).await?; + count + } + }; + + Ok(count) + } + + pub async fn lanes_count(&self) -> Result { + let mut conn = self.redis.clone(); + let count: usize = conn.zcard(self.lanes_zset_name()).await?; + Ok(count) + } + + /// Cancel a job by ID (works across all lanes) + pub async fn cancel_job(&self, job_id: &str) -> Result { + let script = redis::Script::new( + r#" + local queue_id = ARGV[1] + local job_id = ARGV[2] + local now = ARGV[3] + + local lanes_zset = KEYS[1] + local failed_list = KEYS[2] + local pending_cancellation_set = KEYS[3] + local job_meta_hash = KEYS[4] + local job_data_hash = KEYS[5] + + -- Get the lane for this job + local lane_id = redis.call('HGET', job_meta_hash, 'lane_id') + if not lane_id then + return "not_found" + end + + local lane_pending_list = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':pending' + local lane_delayed_zset = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':delayed' + local lane_active_hash = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':active' + + -- Try to remove from pending queue + if redis.call('LREM', lane_pending_list, 0, job_id) > 0 then + -- Move to failed state with cancellation + redis.call('LPUSH', failed_list, job_id) + redis.call('HSET', job_meta_hash, 'finished_at', now) + return "cancelled_immediately" + end + + -- Try to remove from delayed queue + if redis.call('ZREM', lane_delayed_zset, job_id) > 0 then + -- Move to failed state with cancellation + redis.call('LPUSH', failed_list, job_id) + redis.call('HSET', job_meta_hash, 'finished_at', now) + return "cancelled_immediately" + end + + -- Check if job is active + if redis.call('HEXISTS', lane_active_hash, job_id) == 1 then + -- Add to pending cancellations set + redis.call('SADD', pending_cancellation_set, job_id) + return "cancellation_pending" + end + + return "not_found" + "#, + ); + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + + let result: String = script + .key(self.lanes_zset_name()) + .key(self.failed_list_name()) + .key(self.pending_cancellation_set_name()) + .key(self.job_meta_hash_name(job_id)) + .key(self.job_data_hash_name()) + .arg(&self.queue_id) + .arg(job_id) + .arg(now) + .invoke_async(&mut self.redis.clone()) + .await?; + + match result.as_str() { + "cancelled_immediately" => { + if let Err(e) = self.process_cancelled_job(job_id).await { + tracing::error!( + job_id = %job_id, + error = ?e, + "Failed to process immediately cancelled job" + ); + } + Ok(CancelResult::CancelledImmediately) + } + "cancellation_pending" => Ok(CancelResult::CancellationPending), + "not_found" => Ok(CancelResult::NotFound), + _ => Err(TwmqError::Runtime { + message: format!("Unexpected cancel result: {}", result), + }), + } + } + + /// Start the multilane worker + pub fn work(self: &Arc) -> WorkerHandle> { + let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel::<()>(); + let semaphore = Arc::new(Semaphore::new(self.options.local_concurrency)); + let handler = self.handler.clone(); + let outer_queue_clone = self.clone(); + + let join_handle = tokio::spawn(async move { + let mut interval = tokio::time::interval(outer_queue_clone.options.polling_interval); + let handler_clone = handler.clone(); + let always_poll = outer_queue_clone.options.always_poll; + + tracing::info!( + "Multilane worker started for queue: {}", + outer_queue_clone.queue_id() + ); + + loop { + tokio::select! { + _ = &mut shutdown_rx => { + tracing::info!("Shutdown signal received for multilane queue: {}", outer_queue_clone.queue_id()); + break; + } + + _ = interval.tick() => { + let queue_clone = outer_queue_clone.clone(); + let available_permits = semaphore.available_permits(); + + if available_permits == 0 && !always_poll { + tracing::trace!("No permits available, waiting..."); + continue; + } + + tracing::trace!("Available permits: {}", available_permits); + + match queue_clone.pop_batch_jobs(available_permits).await { + Ok(jobs) => { + tracing::trace!("Got {} jobs across lanes", jobs.len()); + + for (lane_id, job) in jobs { + let permit = semaphore.clone().acquire_owned().await.unwrap(); + let queue_clone = queue_clone.clone(); + let job_id = job.id().to_string(); + let handler_clone = handler_clone.clone(); + + tokio::spawn(async move { + let result = handler_clone.process(&job).await; + + if let Err(e) = queue_clone.complete_job(&job, result).await { + tracing::error!( + "Failed to complete job {} handling: {:?}", + job.id(), + e + ); + } + + drop(permit); + }.instrument(tracing::info_span!("twmq_multilane_worker", job_id, lane_id))); + } + } + Err(e) => { + tracing::error!("Failed to pop batch jobs: {:?}", e); + sleep(Duration::from_millis(1000)).await; + } + }; + } + } + } + + // Graceful shutdown + tracing::info!( + "Waiting for {} active jobs to complete for multilane queue: {}", + outer_queue_clone.options.local_concurrency - semaphore.available_permits(), + outer_queue_clone.queue_id() + ); + + let _permits: Vec<_> = (0..outer_queue_clone.options.local_concurrency) + .map(|_| semaphore.clone().acquire_owned()) + .collect::>() + .await + .into_iter() + .collect::, _>>() + .map_err(|e| TwmqError::Runtime { + message: format!("Failed to acquire permits during shutdown: {}", e), + })?; + + tracing::info!( + "All jobs completed, multilane worker shutdown complete for queue: {}", + outer_queue_clone.queue_id() + ); + Ok(()) + }); + + WorkerHandle { + join_handle, + shutdown_tx, + queue: self.clone(), + } + } + + /// Pop jobs from multiple lanes in a fair round-robin manner with full atomicity + pub async fn pop_batch_jobs( + self: &Arc, + batch_size: usize, + ) -> RedisResult)>> { + let script = redis::Script::new( + r#" + local queue_id = ARGV[1] + local now = tonumber(ARGV[2]) + local batch_size = tonumber(ARGV[3]) + local lease_seconds = tonumber(ARGV[4]) + + local lanes_zset_name = KEYS[1] + local job_data_hash_name = KEYS[2] + local pending_cancellation_set = KEYS[3] + local failed_list_name = KEYS[4] + local success_list_name = KEYS[5] + + local result_jobs = {} + local timed_out_jobs = {} + local cancelled_jobs = {} + + -- Helper function to cleanup expired leases for a specific lane + local function cleanup_lane_leases(lane_id) + local lane_active_hash = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':active' + local lane_pending_list = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':pending' + + local active_jobs = redis.call('HGETALL', lane_active_hash) + + for i = 1, #active_jobs, 2 do + local job_id = active_jobs[i] + local attempts = active_jobs[i + 1] + local job_meta_hash = 'twmq_multilane:' .. queue_id .. ':job:' .. job_id .. ':meta' + + local current_lease_token = redis.call('HGET', job_meta_hash, 'lease_token') + + if current_lease_token then + local lease_key = 'twmq_multilane:' .. queue_id .. ':job:' .. job_id .. ':lease:' .. current_lease_token + local lease_exists = redis.call('EXISTS', lease_key) + + if lease_exists == 0 then + redis.call('HINCRBY', job_meta_hash, 'attempts', 1) + redis.call('HDEL', job_meta_hash, 'lease_token') + redis.call('HDEL', lane_active_hash, job_id) + redis.call('LPUSH', lane_pending_list, job_id) + table.insert(timed_out_jobs, {lane_id, job_id}) + end + else + redis.call('HINCRBY', job_meta_hash, 'attempts', 1) + redis.call('HDEL', lane_active_hash, job_id) + redis.call('LPUSH', lane_pending_list, job_id) + table.insert(timed_out_jobs, {lane_id, job_id}) + end + end + end + + -- Helper function to move delayed jobs to pending for a specific lane + local function process_delayed_jobs(lane_id) + local lane_delayed_zset = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':delayed' + local lane_pending_list = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':pending' + + local delayed_jobs = redis.call('ZRANGEBYSCORE', lane_delayed_zset, 0, now) + for i, job_id in ipairs(delayed_jobs) do + local job_meta_hash = 'twmq_multilane:' .. queue_id .. ':job:' .. job_id .. ':meta' + local reentry_position = redis.call('HGET', job_meta_hash, 'reentry_position') or 'last' + + redis.call('ZREM', lane_delayed_zset, job_id) + redis.call('HDEL', job_meta_hash, 'reentry_position') + + if reentry_position == 'first' then + redis.call('LPUSH', lane_pending_list, job_id) + else + redis.call('RPUSH', lane_pending_list, job_id) + end + end + end + + -- Helper function to pop one job from a lane + local function pop_job_from_lane(lane_id) + local lane_pending_list = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':pending' + local lane_active_hash = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':active' + + local job_id = redis.call('RPOP', lane_pending_list) + if not job_id then + return nil + end + + local job_data = redis.call('HGET', job_data_hash_name, job_id) + if not job_data then + return nil + end + + local job_meta_hash = 'twmq_multilane:' .. queue_id .. ':job:' .. job_id .. ':meta' + redis.call('HSET', job_meta_hash, 'processed_at', now) + local created_at = redis.call('HGET', job_meta_hash, 'created_at') or now + local attempts = redis.call('HINCRBY', job_meta_hash, 'attempts', 1) + + local lease_token = now .. '_' .. job_id .. '_' .. attempts + local lease_key = 'twmq_multilane:' .. queue_id .. ':job:' .. job_id .. ':lease:' .. lease_token + + redis.call('SET', lease_key, '1') + redis.call('EXPIRE', lease_key, lease_seconds) + redis.call('HSET', job_meta_hash, 'lease_token', lease_token) + redis.call('HSET', lane_active_hash, job_id, attempts) + + return {job_id, job_data, tostring(attempts), tostring(created_at), tostring(now), lease_token} + end + + -- Step 1: Process pending cancellations first + local cancel_requests = redis.call('SMEMBERS', pending_cancellation_set) + + for i, job_id in ipairs(cancel_requests) do + local job_meta_hash = 'twmq_multilane:' .. queue_id .. ':job:' .. job_id .. ':meta' + local lane_id = redis.call('HGET', job_meta_hash, 'lane_id') + + if lane_id then + local lane_active_hash = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':active' + + if redis.call('HEXISTS', lane_active_hash, job_id) == 1 then + -- Still processing, keep in cancellation set + else + -- Job finished processing, check outcome + local success_count = redis.call('LREM', success_list_name, 0, job_id) + if success_count > 0 then + -- Job succeeded, add it back to success list + redis.call('LPUSH', success_list_name, job_id) + else + redis.call('LPUSH', failed_list_name, job_id) + redis.call('HSET', job_meta_hash, 'finished_at', now) + table.insert(cancelled_jobs, {lane_id, job_id}) + end + redis.call('SREM', pending_cancellation_set, job_id) + end + end + end + + -- Step 2: Efficient lane processing + local jobs_popped = 0 + local lanes_with_scores = redis.call('ZRANGE', lanes_zset_name, 0, -1, 'WITHSCORES') + local total_lanes = #lanes_with_scores / 2 + + if total_lanes == 0 then + return {result_jobs, cancelled_jobs, timed_out_jobs} + end + + local lane_index = 1 + local empty_lanes_count = 0 + + while jobs_popped < batch_size and empty_lanes_count < total_lanes do + local lane_id = lanes_with_scores[lane_index * 2 - 1] + + -- Skip if we've already marked this lane as empty + if lane_id == nil then + lane_index = lane_index + 1 + if lane_index > total_lanes then + lane_index = 1 + end + else + local last_score = tonumber(lanes_with_scores[lane_index * 2]) + + -- Only cleanup if not visited this batch (score != now) + if last_score ~= now then + cleanup_lane_leases(lane_id) + process_delayed_jobs(lane_id) + redis.call('ZADD', lanes_zset_name, now, lane_id) + lanes_with_scores[lane_index * 2] = tostring(now) + end + + -- Try to pop a job from this lane + local job_result = pop_job_from_lane(lane_id) + + if job_result then + table.insert(result_jobs, {lane_id, job_result[1], job_result[2], job_result[3], job_result[4], job_result[5], job_result[6]}) + jobs_popped = jobs_popped + 1 + else + -- Lane is empty, mark it and count it + lanes_with_scores[lane_index * 2 - 1] = nil + lanes_with_scores[lane_index * 2] = nil + empty_lanes_count = empty_lanes_count + 1 + + -- Check if lane should be removed from Redis + local lane_pending_list = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':pending' + local lane_delayed_zset = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':delayed' + local lane_active_hash = 'twmq_multilane:' .. queue_id .. ':lane:' .. lane_id .. ':active' + + local pending_count = redis.call('LLEN', lane_pending_list) + local delayed_count = redis.call('ZCARD', lane_delayed_zset) + local active_count = redis.call('HLEN', lane_active_hash) + + if pending_count == 0 and delayed_count == 0 and active_count == 0 then + redis.call('ZREM', lanes_zset_name, lane_id) + end + end + + -- Move to next lane + lane_index = lane_index + 1 + if lane_index > total_lanes then + lane_index = 1 + end + end + end + + return {result_jobs, cancelled_jobs, timed_out_jobs} + "#, + ); + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + + let results_from_lua: ( + Vec<(String, String, String, String, String, String, String)>, + Vec<(String, String)>, + Vec<(String, String)>, + ) = script + .key(self.lanes_zset_name()) + .key(self.job_data_hash_name()) + .key(self.pending_cancellation_set_name()) + .key(self.failed_list_name()) + .key(self.success_list_name()) + .arg(&self.queue_id) + .arg(now) + .arg(batch_size) + .arg(self.options.lease_duration.as_secs()) + .invoke_async(&mut self.redis.clone()) + .await?; + + let (job_results, cancelled_jobs, timed_out_jobs) = results_from_lua; + + // Log lease timeouts and cancellations with lane context + for (lane_id, job_id) in &timed_out_jobs { + tracing::warn!(job_id = %job_id, lane_id = %lane_id, "Job lease expired, moved back to pending"); + } + for (lane_id, job_id) in &cancelled_jobs { + tracing::info!(job_id = %job_id, lane_id = %lane_id, "Job cancelled by user request"); + } + + let mut jobs = Vec::new(); + for ( + lane_id, + job_id_str, + job_data_t_json, + attempts_str, + created_at_str, + processed_at_str, + lease_token, + ) in job_results + { + match serde_json::from_str::(&job_data_t_json) { + Ok(data_t) => { + let attempts: u32 = attempts_str.parse().unwrap_or(1); + let created_at: u64 = created_at_str.parse().unwrap_or(now); + let processed_at: u64 = processed_at_str.parse().unwrap_or(now); + + let job = Job { + id: job_id_str, + data: data_t, + attempts, + created_at, + processed_at: Some(processed_at), + finished_at: None, + }; + + jobs.push((lane_id, BorrowedJob::new(job, lease_token))); + } + Err(e) => { + tracing::error!( + job_id = job_id_str, + lane_id = lane_id, + error = ?e, + "Failed to deserialize job data. Spawning task to move job to failed state.", + ); + + let queue_clone = self.clone(); + tokio::spawn(async move { + let mut pipeline = redis::pipe(); + pipeline.atomic(); + + let mut _tx_context = TransactionContext::new( + &mut pipeline, + queue_clone.queue_id().to_string(), + ); + + let job: Job> = Job { + id: job_id_str.to_string(), + data: None, + attempts: attempts_str.parse().unwrap_or(1), + created_at: created_at_str.parse().unwrap_or(now), + processed_at: processed_at_str.parse().ok(), + finished_at: Some(now), + }; + + let twmq_error: TwmqError = e.into(); + + if let Err(e) = queue_clone + .complete_job_queue_error(&job, &lease_token, &twmq_error.into()) + .await + { + tracing::error!( + job_id = job.id, + lane_id = lane_id, + error = ?e, + "Failed to complete job fail handling successfully", + ); + } + }); + } + } + } + + // Process cancelled jobs through hook system + for (lane_id, job_id) in cancelled_jobs { + let queue_clone = self.clone(); + tokio::spawn(async move { + if let Err(e) = queue_clone.process_cancelled_job(&job_id).await { + tracing::error!( + job_id = %job_id, + lane_id = %lane_id, + error = ?e, + "Failed to process cancelled job" + ); + } + }); + } + + Ok(jobs) + } + + /// Process a cancelled job through the hook system with user cancellation error + async fn process_cancelled_job(&self, job_id: &str) -> Result<(), TwmqError> { + match self.get_job(job_id).await? { + Some(job) => { + let cancellation_error = H::ErrorData::user_cancelled(); + + let mut pipeline = redis::pipe(); + pipeline.atomic(); + + let mut tx_context = + TransactionContext::new(&mut pipeline, self.queue_id().to_string()); + + let fail_hook_data = FailHookData { + error: &cancellation_error, + }; + + let borrowed_job = BorrowedJob::new(job, "cancelled".to_string()); + self.handler + .on_fail(&borrowed_job, fail_hook_data, &mut tx_context) + .await; + + pipeline.query_async::<()>(&mut self.redis.clone()).await?; + + tracing::info!( + job_id = %job_id, + "Successfully processed job cancellation hooks" + ); + + Ok(()) + } + None => { + tracing::warn!( + job_id = %job_id, + "Cancelled job not found when trying to process hooks" + ); + Ok(()) + } + } + } + + // Job completion methods (same as single-lane queue but with multilane naming) + fn add_success_operations( + &self, + job: &BorrowedJob, + result: &H::Output, + pipeline: &mut Pipeline, + ) -> Result<(), TwmqError> { + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + + let lease_key = self.lease_key_name(&job.job.id, &job.lease_token); + + pipeline.del(&lease_key); + + // Get lane_id from job metadata to remove from correct lane active hash + let job_meta_hash = self.job_meta_hash_name(&job.job.id); + + // We need to get lane_id first, then remove from that lane's active hash + // This requires a separate Redis call before the pipeline, but ensures atomicity within the pipeline + pipeline + .lpush(self.success_list_name(), &job.job.id) + .hset(&job_meta_hash, "finished_at", now) + .hdel(&job_meta_hash, "lease_token"); + + let result_json = serde_json::to_string(result)?; + pipeline.hset(self.job_result_hash_name(), &job.job.id, result_json); + + // For "active" idempotency mode, remove from deduplication set immediately + if self.options.idempotency_mode == crate::queue::IdempotencyMode::Active { + pipeline.srem(self.dedupe_set_name(), &job.job.id); + } + + Ok(()) + } + + async fn post_success_completion(&self) -> Result<(), TwmqError> { + let trim_script = redis::Script::new( + r#" + local queue_id = KEYS[1] + local list_name = KEYS[2] + local job_data_hash = KEYS[3] + local results_hash = KEYS[4] + local dedupe_set_name = KEYS[5] + + local max_len = tonumber(ARGV[1]) + + local job_ids_to_delete = redis.call('LRANGE', list_name, max_len, -1) + + if #job_ids_to_delete > 0 then + for _, j_id in ipairs(job_ids_to_delete) do + local job_meta_hash = 'twmq_multilane:' .. queue_id .. ':job:' .. j_id .. ':meta' + local errors_list_name = 'twmq_multilane:' .. queue_id .. ':job:' .. j_id .. ':errors' + + redis.call('SREM', dedupe_set_name, j_id) + redis.call('HDEL', job_data_hash, j_id) + redis.call('DEL', job_meta_hash) + redis.call('HDEL', results_hash, j_id) + redis.call('DEL', errors_list_name) + end + redis.call('LTRIM', list_name, 0, max_len - 1) + end + return #job_ids_to_delete + "#, + ); + + let trimmed_count: usize = trim_script + .key(self.queue_id()) + .key(self.success_list_name()) + .key(self.job_data_hash_name()) + .key(self.job_result_hash_name()) + .key(self.dedupe_set_name()) + .arg(self.options.max_success) + .invoke_async(&mut self.redis.clone()) + .await?; + + if trimmed_count > 0 { + tracing::info!("Pruned {} successful jobs", trimmed_count); + } + + Ok(()) + } + + fn add_nack_operations( + &self, + job: &BorrowedJob, + error: &H::ErrorData, + delay: Option, + position: RequeuePosition, + pipeline: &mut Pipeline, + ) -> Result<(), TwmqError> { + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + + let lease_key = self.lease_key_name(&job.job.id, &job.lease_token); + let job_meta_hash = self.job_meta_hash_name(&job.job.id); + + pipeline.del(&lease_key); + pipeline.hdel(&job_meta_hash, "lease_token"); + + let error_record = JobErrorRecord { + attempt: job.job.attempts, + error, + details: JobErrorType::nack(delay, position), + created_at: now, + }; + + let error_json = serde_json::to_string(&error_record)?; + pipeline.lpush(self.job_errors_list_name(&job.job.id), error_json); + + // Note: The actual requeuing logic needs to be handled by a separate operation + // since we need the lane_id from metadata. This will be done in the complete_job method. + + Ok(()) + } + + async fn post_nack_completion(&self) -> Result<(), TwmqError> { + Ok(()) + } + + fn add_fail_operations( + &self, + job: &BorrowedJob, + error: &H::ErrorData, + pipeline: &mut Pipeline, + ) -> Result<(), TwmqError> { + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + + let lease_key = self.lease_key_name(&job.job.id, &job.lease_token); + let job_meta_hash = self.job_meta_hash_name(&job.job.id); + + pipeline.del(&lease_key); + pipeline + .lpush(self.failed_list_name(), &job.job.id) + .hset(&job_meta_hash, "finished_at", now) + .hdel(&job_meta_hash, "lease_token"); + + let error_record = JobErrorRecord { + attempt: job.job.attempts, + error, + details: JobErrorType::fail(), + created_at: now, + }; + let error_json = serde_json::to_string(&error_record)?; + pipeline.lpush(self.job_errors_list_name(&job.job.id), error_json); + + // For "active" idempotency mode, remove from deduplication set immediately + if self.options.idempotency_mode == crate::queue::IdempotencyMode::Active { + pipeline.srem(self.dedupe_set_name(), &job.job.id); + } + + Ok(()) + } + + async fn post_fail_completion(&self) -> Result<(), TwmqError> { + let trim_script = redis::Script::new( + r#" + local queue_id = KEYS[1] + local list_name = KEYS[2] + local job_data_hash = KEYS[3] + local dedupe_set_name = KEYS[4] + + local max_len = tonumber(ARGV[1]) + + local job_ids_to_delete = redis.call('LRANGE', list_name, max_len, -1) + + if #job_ids_to_delete > 0 then + for _, j_id in ipairs(job_ids_to_delete) do + local errors_list_name = 'twmq_multilane:' .. queue_id .. ':job:' .. j_id .. ':errors' + local job_meta_hash = 'twmq_multilane:' .. queue_id .. ':job:' .. j_id .. ':meta' + + redis.call('SREM', dedupe_set_name, j_id) + redis.call('HDEL', job_data_hash, j_id) + redis.call('DEL', job_meta_hash) + redis.call('DEL', errors_list_name) + end + redis.call('LTRIM', list_name, 0, max_len - 1) + end + return #job_ids_to_delete + "#, + ); + + let trimmed_count: usize = trim_script + .key(self.queue_id()) + .key(self.failed_list_name()) + .key(self.job_data_hash_name()) + .key(self.dedupe_set_name()) + .arg(self.options.max_failed) + .invoke_async(&mut self.redis.clone()) + .await?; + + if trimmed_count > 0 { + tracing::info!("Pruned {} failed jobs", trimmed_count); + } + + Ok(()) + } + + #[tracing::instrument(level = "debug", skip_all, fields(job_id = job.id(), queue = self.queue_id()))] + async fn complete_job( + &self, + job: &BorrowedJob, + result: JobResult, + ) -> Result<(), TwmqError> { + // First, we need to get the lane_id and remove from appropriate lane's active hash + let mut conn = self.redis.clone(); + let lane_id: Option = conn + .hget(self.job_meta_hash_name(&job.job.id), "lane_id") + .await?; + + let lane_id = lane_id.ok_or_else(|| TwmqError::Runtime { + message: format!("Job {} missing lane_id in metadata", job.job.id), + })?; + + // Build pipeline with hooks and operations + let mut hook_pipeline = redis::pipe(); + let mut tx_context = + TransactionContext::new(&mut hook_pipeline, self.queue_id().to_string()); + + match &result { + Ok(output) => { + let success_hook_data = SuccessHookData { result: output }; + self.handler + .on_success(job, success_hook_data, &mut tx_context) + .await; + self.add_success_operations(job, output, &mut hook_pipeline)?; + // Remove from lane's active hash + hook_pipeline.hdel(self.lane_active_hash_name(&lane_id), &job.job.id); + } + Err(JobError::Nack { + error, + delay, + position, + }) => { + let nack_hook_data = NackHookData { + error, + delay: *delay, + position: *position, + }; + self.handler + .on_nack(job, nack_hook_data, &mut tx_context) + .await; + self.add_nack_operations(job, error, *delay, *position, &mut hook_pipeline)?; + + // Remove from lane's active hash and requeue to appropriate lane queue + hook_pipeline.hdel(self.lane_active_hash_name(&lane_id), &job.job.id); + + if let Some(delay_duration) = delay { + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + let delay_until = now + delay_duration.as_secs(); + let pos_str = position.to_string(); + + hook_pipeline + .hset( + self.job_meta_hash_name(&job.job.id), + "reentry_position", + pos_str, + ) + .zadd( + self.lane_delayed_zset_name(&lane_id), + &job.job.id, + delay_until, + ); + } else { + match position { + RequeuePosition::First => { + hook_pipeline.lpush(self.lane_pending_list_name(&lane_id), &job.job.id); + } + RequeuePosition::Last => { + hook_pipeline.rpush(self.lane_pending_list_name(&lane_id), &job.job.id); + } + } + } + } + Err(JobError::Fail(error)) => { + let fail_hook_data = FailHookData { error }; + self.handler + .on_fail(job, fail_hook_data, &mut tx_context) + .await; + self.add_fail_operations(job, error, &mut hook_pipeline)?; + // Remove from lane's active hash + hook_pipeline.hdel(self.lane_active_hash_name(&lane_id), &job.job.id); + } + } + + // Execute with lease protection (same pattern as single-lane queue) + let lease_key = self.lease_key_name(&job.job.id, &job.lease_token); + + loop { + let mut conn = self.redis.clone(); + + redis::cmd("WATCH") + .arg(&lease_key) + .query_async::<()>(&mut conn) + .await?; + + let lease_exists: bool = conn.exists(&lease_key).await?; + if !lease_exists { + redis::cmd("UNWATCH").query_async::<()>(&mut conn).await?; + tracing::warn!(job_id = %job.job.id, "Lease no longer exists, job was cancelled or timed out"); + return Ok(()); + } + + let mut atomic_pipeline = hook_pipeline.clone(); + atomic_pipeline.atomic(); + + match atomic_pipeline + .query_async::>(&mut conn) + .await + { + Ok(_) => { + match &result { + Ok(_) => self.post_success_completion().await?, + Err(JobError::Nack { .. }) => self.post_nack_completion().await?, + Err(JobError::Fail(_)) => self.post_fail_completion().await?, + } + + tracing::debug!(job_id = %job.job.id, lane_id = %lane_id, "Job completion successful"); + return Ok(()); + } + Err(_) => { + tracing::debug!(job_id = %job.job.id, "WATCH failed during completion, retrying"); + continue; + } + } + } + } + + #[tracing::instrument(level = "debug", skip_all, fields(job_id = job.id, queue = self.queue_id()))] + async fn complete_job_queue_error( + &self, + job: &Job>, + lease_token: &str, + error: &H::ErrorData, + ) -> Result<(), TwmqError> { + // Get lane_id for proper cleanup + let mut conn = self.redis.clone(); + let lane_id: Option = conn + .hget(self.job_meta_hash_name(&job.id), "lane_id") + .await?; + + let lane_id = lane_id.unwrap_or_else(|| "unknown".to_string()); + + let mut hook_pipeline = redis::pipe(); + let mut tx_context = + TransactionContext::new(&mut hook_pipeline, self.queue_id().to_string()); + + let twmq_error = TwmqError::Runtime { + message: "Job processing failed with user error".to_string(), + }; + let queue_error_hook_data = QueueInternalErrorHookData { error: &twmq_error }; + self.handler + .on_queue_error(job, queue_error_hook_data, &mut tx_context) + .await; + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + + let lease_key = self.lease_key_name(&job.id, lease_token); + let job_meta_hash = self.job_meta_hash_name(&job.id); + + hook_pipeline.del(&lease_key); + hook_pipeline + .hdel(self.lane_active_hash_name(&lane_id), &job.id) + .lpush(self.failed_list_name(), &job.id) + .hset(&job_meta_hash, "finished_at", now) + .hdel(&job_meta_hash, "lease_token"); + + let error_record = JobErrorRecord { + attempt: job.attempts, + error, + details: JobErrorType::fail(), + created_at: now, + }; + let error_json = serde_json::to_string(&error_record)?; + hook_pipeline.lpush(self.job_errors_list_name(&job.id), error_json); + + // For "active" idempotency mode, remove from deduplication set immediately + if self.options.idempotency_mode == crate::queue::IdempotencyMode::Active { + hook_pipeline.srem(self.dedupe_set_name(), &job.id); + } + + // Execute with lease protection + loop { + let mut conn = self.redis.clone(); + + redis::cmd("WATCH") + .arg(&lease_key) + .query_async::<()>(&mut conn) + .await?; + + let lease_exists: bool = conn.exists(&lease_key).await?; + if !lease_exists { + redis::cmd("UNWATCH").query_async::<()>(&mut conn).await?; + tracing::warn!(job_id = %job.id, "Lease no longer exists, job was cancelled or timed out"); + return Ok(()); + } + + let mut atomic_pipeline = hook_pipeline.clone(); + atomic_pipeline.atomic(); + + match atomic_pipeline + .query_async::>(&mut conn) + .await + { + Ok(_) => { + self.post_fail_completion().await?; + tracing::debug!(job_id = %job.id, lane_id = %lane_id, "Queue error job completion successful"); + return Ok(()); + } + Err(_) => { + tracing::debug!(job_id = %job.id, "WATCH failed during queue error completion, retrying"); + continue; + } + } + } + } +} + +impl MultilanePushableJob { + pub fn delay(mut self, delay: Duration) -> Self { + self.options.delay = Some(DelayOptions { + delay, + position: RequeuePosition::Last, + }); + self + } + + pub fn delay_with_position(mut self, delay: Duration, position: RequeuePosition) -> Self { + self.options.delay = Some(DelayOptions { delay, position }); + self + } + + pub fn id(mut self, id: String) -> Self { + self.options.id = id; + self + } + + pub async fn push(self) -> Result, TwmqError> { + self.queue.push_to_lane(&self.lane_id, self.options).await + } +} diff --git a/twmq/src/queue.rs b/twmq/src/queue.rs index 0418576..819a3d6 100644 --- a/twmq/src/queue.rs +++ b/twmq/src/queue.rs @@ -1,9 +1,27 @@ use std::{marker::PhantomData, sync::Arc, time::Duration}; use redis::{Client, aio::ConnectionManager}; +use serde::{Serialize, Deserialize}; use crate::{DurableExecution, Queue, error::TwmqError}; +/// Defines how job idempotency is handled +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +pub enum IdempotencyMode { + /// Jobs stay in the deduplication set until pruned (existing behavior) + /// This prevents duplicate jobs from being added even after completion + Permanent, + /// Jobs are removed from the deduplication set immediately upon completion + /// This only prevents duplicates of pending/delayed/active jobs + Active, +} + +impl Default for IdempotencyMode { + fn default() -> Self { + Self::Permanent + } +} + #[derive(Clone, Debug)] pub struct QueueOptions { pub max_success: usize, @@ -18,6 +36,9 @@ pub struct QueueOptions { /// If you have a horiztonally scaled deployment, this can be set to the default of false /// But if there's only one node, you can set this to true to avoid the local concurrency from blocking queue housekeeping pub always_poll: bool, + + /// Controls how job idempotency is handled + pub idempotency_mode: IdempotencyMode, } impl Default for QueueOptions { @@ -29,6 +50,7 @@ impl Default for QueueOptions { polling_interval: Duration::from_millis(100), lease_duration: Duration::from_secs(30), always_poll: false, + idempotency_mode: IdempotencyMode::default(), } } } diff --git a/twmq/src/shutdown.rs b/twmq/src/shutdown.rs index 7046552..04341c3 100644 --- a/twmq/src/shutdown.rs +++ b/twmq/src/shutdown.rs @@ -2,25 +2,41 @@ use crate::error::TwmqError; use std::sync::Arc; /// Handle for a single worker that can be shut down gracefully -pub struct WorkerHandle { +pub struct WorkerHandle { pub join_handle: tokio::task::JoinHandle>, pub shutdown_tx: tokio::sync::oneshot::Sender<()>, - pub queue: Arc>, + pub queue: Arc, } -impl WorkerHandle { +pub trait QueueIdentifier { + fn queue_name(&self) -> &str; +} + +impl QueueIdentifier for crate::Queue { + fn queue_name(&self) -> &str { + self.name() + } +} + +impl QueueIdentifier for crate::MultilaneQueue { + fn queue_name(&self) -> &str { + self.queue_id() + } +} + +impl WorkerHandle { /// Shutdown this worker gracefully pub async fn shutdown(self) -> Result<(), TwmqError> { tracing::info!( "Initiating graceful shutdown of worker for queue: {}", - self.queue.name() + self.queue.queue_name() ); // Signal shutdown to the worker if self.shutdown_tx.send(()).is_err() { tracing::warn!( "Worker for queue {} was already shutting down", - self.queue.name() + self.queue.queue_name() ); } @@ -29,14 +45,14 @@ impl WorkerHandle { Ok(Ok(())) => { tracing::info!( "Worker for queue {} shut down gracefully", - self.queue.name() + self.queue.queue_name() ); Ok(()) } Ok(Err(e)) => { tracing::error!( "Worker for queue {} shut down with error: {:?}", - self.queue.name(), + self.queue.queue_name(), e ); Err(e) @@ -44,7 +60,7 @@ impl WorkerHandle { Err(e) => { tracing::error!( "Worker task for queue {} panicked during shutdown: {:?}", - self.queue.name(), + self.queue.queue_name(), e ); Err(TwmqError::Runtime { message: format!("Worker panic: {}", e) }) @@ -69,7 +85,7 @@ impl ShutdownHandle { } /// Add a worker to be managed by this shutdown handle - pub fn add_worker(&mut self, worker: WorkerHandle) { + pub fn add_worker(&mut self, worker: WorkerHandle) { self.join_handles.push(worker.join_handle); self.shutdown_txs.push(worker.shutdown_tx); } @@ -133,16 +149,16 @@ impl Default for ShutdownHandle { // Convenience methods to make collecting workers easier impl ShutdownHandle { /// Create a new shutdown handle with a single worker - pub fn with_worker(worker: WorkerHandle) -> Self { + pub fn with_worker(worker: WorkerHandle) -> Self { let mut handle = Self::new(); handle.add_worker(worker); handle } /// Add multiple workers at once - pub fn add_workers( + pub fn add_workers( &mut self, - workers: impl IntoIterator>, + workers: impl IntoIterator>, ) { for worker in workers { self.add_worker(worker); @@ -150,7 +166,7 @@ impl ShutdownHandle { } /// Builder-style method to add a worker - pub fn and_worker(mut self, worker: WorkerHandle) -> Self { + pub fn and_worker(mut self, worker: WorkerHandle) -> Self { self.add_worker(worker); self } diff --git a/twmq/tests/idempotency_modes.rs b/twmq/tests/idempotency_modes.rs new file mode 100644 index 0000000..663920b --- /dev/null +++ b/twmq/tests/idempotency_modes.rs @@ -0,0 +1,255 @@ +use serde::{Deserialize, Serialize}; +use std::sync::{ + Arc, + atomic::{AtomicUsize, Ordering}, +}; +use std::time::Duration; +use twmq::{ + DurableExecution, Queue, + job::{BorrowedJob, JobResult, JobStatus}, + queue::{IdempotencyMode, QueueOptions}, + redis::aio::ConnectionManager, +}; + +const REDIS_URL: &str = "redis://127.0.0.1:6379/"; + +#[derive(Serialize, Deserialize, Debug, Clone)] +struct TestJobData { + message: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +struct TestJobOutput { + processed: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +struct TestJobError { + error_message: String, +} + +impl From for TestJobError { + fn from(err: twmq::error::TwmqError) -> Self { + TestJobError { + error_message: err.to_string(), + } + } +} + +impl twmq::UserCancellable for TestJobError { + fn user_cancelled() -> Self { + TestJobError { + error_message: "User cancelled".to_string(), + } + } +} + +struct TestJobHandler { + processed_count: Arc, +} + +impl DurableExecution for TestJobHandler { + type Output = TestJobOutput; + type ErrorData = TestJobError; + type JobData = TestJobData; + + async fn process( + &self, + job: &BorrowedJob, + ) -> JobResult { + self.processed_count.fetch_add(1, Ordering::SeqCst); + + Ok(TestJobOutput { + processed: format!("Processed: {}", job.data().message), + }) + } +} + +// Helper to clean up Redis keys +async fn cleanup_redis_keys(conn_manager: &ConnectionManager, queue_name: &str) { + let mut conn = conn_manager.clone(); + let keys_pattern = format!("twmq:{}:*", queue_name); + + let keys: Vec = redis::cmd("KEYS") + .arg(&keys_pattern) + .query_async(&mut conn) + .await + .unwrap_or_default(); + if !keys.is_empty() { + redis::cmd("DEL") + .arg(keys) + .query_async::<()>(&mut conn) + .await + .unwrap_or_default(); + } +} + +#[tokio::test] +async fn test_permanent_idempotency_mode() { + let queue_name = format!("test_perm_{}", nanoid::nanoid!(6)); + let processed_count = Arc::new(AtomicUsize::new(0)); + + let mut queue_options = QueueOptions::default(); + queue_options.idempotency_mode = IdempotencyMode::Permanent; + queue_options.local_concurrency = 1; + + let handler = TestJobHandler { + processed_count: processed_count.clone(), + }; + + let queue = Arc::new( + Queue::new(REDIS_URL, &queue_name, Some(queue_options), handler) + .await + .expect("Failed to create queue"), + ); + + cleanup_redis_keys(&queue.redis, &queue_name).await; + + let job_data = TestJobData { + message: "test message".to_string(), + }; + + // Push the same job twice with the same ID + let job_id = "test_job_permanent"; + + let _job1 = queue + .clone() + .job(job_data.clone()) + .with_id(job_id) + .push() + .await + .unwrap(); + let _job2 = queue + .clone() + .job(job_data.clone()) + .with_id(job_id) + .push() + .await + .unwrap(); + + // Only one job should be in pending (deduplication should prevent the second) + let pending_count = queue.count(JobStatus::Pending).await.unwrap(); + assert_eq!( + pending_count, 1, + "Only one job should be pending due to deduplication" + ); + + // Start worker and let it process + let worker = queue.work(); + + // Wait for processing + tokio::time::sleep(Duration::from_millis(500)).await; + + // Should have processed exactly one job + assert_eq!( + processed_count.load(Ordering::SeqCst), + 1, + "Should have processed exactly one job" + ); + + // Try to add the same job again - should still be blocked by permanent idempotency + let _job3 = queue + .clone() + .job(job_data.clone()) + .with_id(job_id) + .push() + .await + .unwrap(); + + tokio::time::sleep(Duration::from_millis(500)).await; + + // Should still be only one processed job + assert_eq!( + processed_count.load(Ordering::SeqCst), + 1, + "Should still have processed only one job" + ); + + worker.shutdown().await.unwrap(); + cleanup_redis_keys(&queue.redis, &queue_name).await; +} + +#[tokio::test] +async fn test_active_idempotency_mode() { + let queue_name = format!("test_active_{}", nanoid::nanoid!(6)); + let processed_count = Arc::new(AtomicUsize::new(0)); + + let mut queue_options = QueueOptions::default(); + queue_options.idempotency_mode = IdempotencyMode::Active; + queue_options.local_concurrency = 1; + + let handler = TestJobHandler { + processed_count: processed_count.clone(), + }; + + let queue = Arc::new( + Queue::new(REDIS_URL, &queue_name, Some(queue_options), handler) + .await + .expect("Failed to create queue"), + ); + + cleanup_redis_keys(&queue.redis, &queue_name).await; + + let job_data = TestJobData { + message: "test message".to_string(), + }; + + // Push the same job twice with the same ID + let job_id = "test_job_active"; + + let _job1 = queue + .clone() + .job(job_data.clone()) + .with_id(job_id) + .push() + .await + .unwrap(); + let _job2 = queue + .clone() + .job(job_data.clone()) + .with_id(job_id) + .push() + .await + .unwrap(); + + // Only one job should be in pending (deduplication should prevent the second) + let pending_count = queue.count(JobStatus::Pending).await.unwrap(); + assert_eq!( + pending_count, 1, + "Only one job should be pending due to deduplication" + ); + + // Start worker and let it process + let worker = queue.work(); + + // Wait for processing + tokio::time::sleep(Duration::from_millis(500)).await; + + // Should have processed exactly one job + assert_eq!( + processed_count.load(Ordering::SeqCst), + 1, + "Should have processed exactly one job" + ); + + // Try to add the same job again - should be allowed with active idempotency + let _job3 = queue + .clone() + .job(job_data.clone()) + .with_id(job_id) + .push() + .await + .unwrap(); + + tokio::time::sleep(Duration::from_millis(500)).await; + + // Should have processed two jobs now + assert_eq!( + processed_count.load(Ordering::SeqCst), + 2, + "Should have processed two jobs with active idempotency" + ); + + worker.shutdown().await.unwrap(); + cleanup_redis_keys(&queue.redis, &queue_name).await; +} diff --git a/twmq/tests/lease_expiry.rs b/twmq/tests/lease_expiry.rs index b2ece8a..4c11378 100644 --- a/twmq/tests/lease_expiry.rs +++ b/twmq/tests/lease_expiry.rs @@ -8,7 +8,7 @@ use serde::{Deserialize, Serialize}; use tracing_subscriber::{EnvFilter, layer::SubscriberExt, util::SubscriberInitExt}; use twmq::{ - DurableExecution, FailHookData, NackHookData, Queue, SuccessHookData, + DurableExecution, FailHookData, IdempotencyMode, NackHookData, Queue, SuccessHookData, hooks::TransactionContext, job::{BorrowedJob, JobResult, JobStatus}, queue::QueueOptions, @@ -65,7 +65,10 @@ impl DurableExecution for SleepForeverHandler { type ErrorData = TestJobErrorData; type JobData = SleepForeverJobData; - async fn process(&self, job: &BorrowedJob) -> JobResult { + async fn process( + &self, + job: &BorrowedJob, + ) -> JobResult { tracing::info!( "SLEEP_JOB: Starting to process job {}, attempt {}", job.job.id, @@ -153,6 +156,7 @@ async fn test_job_lease_expiry() { polling_interval: Duration::from_millis(100), local_concurrency: 1, always_poll: true, + idempotency_mode: IdempotencyMode::Active, }; let handler = SleepForeverHandler { @@ -301,6 +305,7 @@ async fn test_multiple_job_lease_expiry() { lease_duration, polling_interval: Duration::from_millis(100), always_poll: true, + idempotency_mode: IdempotencyMode::Active, }; let queue = Arc::new( diff --git a/twmq/tests/multilane_batch_pop.rs b/twmq/tests/multilane_batch_pop.rs new file mode 100644 index 0000000..bc9e111 --- /dev/null +++ b/twmq/tests/multilane_batch_pop.rs @@ -0,0 +1,492 @@ +use std::collections::HashMap; +use std::sync::Arc; +use std::time::Duration; + +use serde::{Deserialize, Serialize}; +use tokio::time::timeout; + +use twmq::error::TwmqError; +use twmq::job::{BorrowedJob, JobResult}; +use twmq::{DurableExecution, MultilaneQueue, UserCancellable}; + +const REDIS_URL: &str = "redis://127.0.0.1:6379/"; + +// Simple test job that just holds an ID +#[derive(Serialize, Deserialize, Debug, Clone)] +struct TestJob { + id: u32, + data: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +struct TestOutput { + processed_id: u32, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +struct TestError { + message: String, +} + +impl From for TestError { + fn from(error: TwmqError) -> Self { + TestError { + message: error.to_string(), + } + } +} + +impl UserCancellable for TestError { + fn user_cancelled() -> Self { + TestError { + message: "Cancelled".to_string(), + } + } +} + +// Dummy handler - these tests focus on batch pop logic, not processing +struct DummyHandler; + +impl DurableExecution for DummyHandler { + type Output = TestOutput; + type ErrorData = TestError; + type JobData = TestJob; + + async fn process( + &self, + job: &BorrowedJob, + ) -> JobResult { + Ok(TestOutput { + processed_id: job.job.data.id, + }) + } +} + +/// Test harness for multilane queue batch operations +struct MultilaneTestHarness { + pub queue: Arc>, + pub queue_id: String, +} + +impl MultilaneTestHarness { + async fn new() -> Self { + let queue_id = format!("test_multilane_{}", nanoid::nanoid!(8)); + let handler = DummyHandler; + + let queue = Arc::new( + MultilaneQueue::new(REDIS_URL, &queue_id, None, handler) + .await + .expect("Failed to create multilane queue"), + ); + + // warm up redis connection + let _ = queue.count(twmq::job::JobStatus::Active, None).await; + + let harness = Self { queue, queue_id }; + harness.cleanup().await; + harness + } + + /// Clean up all Redis keys for this test + async fn cleanup(&self) { + let mut conn = self.queue.redis.clone(); + let keys_pattern = format!("twmq_multilane:{}:*", self.queue_id); + + let keys: Vec = redis::cmd("KEYS") + .arg(&keys_pattern) + .query_async(&mut conn) + .await + .unwrap_or_default(); + + if !keys.is_empty() { + redis::cmd("DEL") + .arg(keys) + .query_async::<()>(&mut conn) + .await + .unwrap_or_default(); + } + } + + /// Add jobs to specific lanes + async fn add_jobs_to_lanes(&self, jobs_per_lane: &HashMap>) { + for (lane_id, jobs) in jobs_per_lane { + for job in jobs { + self.queue + .clone() + .job_for_lane(lane_id, job.clone()) + .id(format!("job_{}_{}", lane_id, job.id)) + .push() + .await + .expect("Failed to push job"); + } + } + } + + /// Batch pop jobs and return the results grouped by lane + async fn batch_pop(&self, batch_size: usize) -> HashMap> { + let jobs = self + .queue + .pop_batch_jobs(batch_size) + .await + .expect("Failed to pop batch jobs"); + + let mut results = HashMap::new(); + for (lane_id, job) in jobs { + results + .entry(lane_id) + .or_insert_with(Vec::new) + .push(job.job.data.id); + } + results + } + + /// Count total jobs across all lanes by status + async fn count_total_jobs(&self, status: twmq::job::JobStatus) -> usize { + self.queue + .count(status, None) + .await + .expect("Failed to count jobs") + } + + /// Count jobs in specific lane by status + async fn count_lane_jobs(&self, lane_id: &str, status: twmq::job::JobStatus) -> usize { + self.queue + .count(status, Some(lane_id)) + .await + .expect("Failed to count lane jobs") + } +} + +impl Drop for MultilaneTestHarness { + fn drop(&mut self) { + // Cleanup in background since we can't await in drop + let queue_id = self.queue_id.clone(); + let redis = self.queue.clone().redis.clone(); + + tokio::spawn(async move { + let mut conn = redis; + let keys_pattern = format!("twmq_multilane:{}:*", queue_id); + let keys: Vec = redis::cmd("KEYS") + .arg(&keys_pattern) + .query_async(&mut conn) + .await + .unwrap_or_default(); + + if !keys.is_empty() { + redis::cmd("DEL") + .arg(keys) + .query_async::<()>(&mut conn) + .await + .unwrap_or_default(); + } + }); + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn multilane_test_batch_pop_single_lane_with_100k_empty_lanes() { + // Test: 100,000 lanes, only 1 has 100 jobs, batch pop 100 + println!("🧪 Testing batch pop with 100,000 lanes, 1 active lane with 100 jobs"); + + let harness = MultilaneTestHarness::new().await; + + // Create jobs for the single active lane + let active_lane = "lane_active".to_string(); + let mut jobs_per_lane = HashMap::new(); + + let mut jobs = Vec::new(); + for i in 0..100 { + jobs.push(TestJob { + id: i, + data: format!("job_{}", i), + }); + } + jobs_per_lane.insert(active_lane.clone(), jobs); + + // Add 99,999 empty lanes by creating them in Redis lanes zset + // We do this by adding empty lanes to the zset directly + let mut conn = harness.queue.redis.clone(); + for i in 0..99_999 { + let lane_id = format!("empty_lane_{}", i); + // Add lane to lanes zset with score 0 (never processed) + redis::cmd("ZADD") + .arg(harness.queue.lanes_zset_name()) + .arg("NX") // Only add if not exists + .arg(0) + .arg(&lane_id) + .query_async::<()>(&mut conn) + .await + .expect("Failed to add empty lane"); + } + + // Add the actual jobs + harness.add_jobs_to_lanes(&jobs_per_lane).await; + + // Verify setup + let pending_count = harness + .count_total_jobs(twmq::job::JobStatus::Pending) + .await; + assert_eq!(pending_count, 100, "Should have 100 pending jobs"); + + let active_lane_count = harness + .count_lane_jobs(&active_lane, twmq::job::JobStatus::Pending) + .await; + assert_eq!( + active_lane_count, 100, + "Active lane should have 100 pending jobs" + ); + + // Test batch pop with timeout to ensure it doesn't hang + println!("⏱️ Executing batch pop (should complete quickly despite 100k lanes)..."); + let start = std::time::Instant::now(); + + let result = timeout(Duration::from_secs(10), harness.batch_pop(100)) + .await + .expect("Batch pop should complete within 10 seconds"); + + let duration = start.elapsed(); + println!("✅ Batch pop completed in {:?}", duration); + + // Verify results + assert_eq!(result.len(), 1, "Should get jobs from exactly 1 lane"); + assert!( + result.contains_key(&active_lane), + "Should get jobs from active lane" + ); + + let jobs_from_active = &result[&active_lane]; + assert_eq!(jobs_from_active.len(), 100, "Should get all 100 jobs"); + + // Verify all job IDs are present + let mut expected_ids: Vec = (0..100).collect(); + let mut actual_ids = jobs_from_active.clone(); + expected_ids.sort(); + actual_ids.sort(); + assert_eq!(actual_ids, expected_ids, "Should get all expected job IDs"); + + // Verify no jobs left pending + let remaining_pending = harness + .count_total_jobs(twmq::job::JobStatus::Pending) + .await; + assert_eq!(remaining_pending, 0, "Should have no pending jobs left"); + + // Performance assertion - should complete in reasonable time even with 100k lanes + assert!( + duration < Duration::from_secs(5), + "Should complete within 5 seconds even with 100k lanes" + ); + + println!("✅ Test passed: Single lane with 100k empty lanes"); +} + +#[tokio::test(flavor = "multi_thread")] +async fn multilane_test_batch_pop_distributed_jobs_across_100k_lanes() { + // Test: 200 jobs distributed randomly across 100,000 lanes, batch pop 100 three times + println!("🧪 Testing batch pop with 200 jobs distributed across 100,000 lanes"); + + let harness = MultilaneTestHarness::new().await; + + // Create 200 jobs distributed across 200 different lanes (1 job per lane) + let mut jobs_per_lane = HashMap::new(); + for i in 0..200 { + let lane_id = format!("lane_{}", i); + let job = TestJob { + id: i, + data: format!("job_{}", i), + }; + jobs_per_lane.insert(lane_id, vec![job]); + } + + // Add 99,800 empty lanes to reach 100,000 total + let mut conn = harness.queue.redis.clone(); + for i in 200..100_000 { + let lane_id = format!("empty_lane_{}", i); + redis::cmd("ZADD") + .arg(harness.queue.lanes_zset_name()) + .arg("NX") + .arg(0) + .arg(&lane_id) + .query_async::<()>(&mut conn) + .await + .expect("Failed to add empty lane"); + } + + // Add the jobs + harness.add_jobs_to_lanes(&jobs_per_lane).await; + + // Verify setup + let pending_count = harness + .count_total_jobs(twmq::job::JobStatus::Pending) + .await; + assert_eq!(pending_count, 200, "Should have 200 pending jobs"); + + // First batch pop - should get 100 jobs + println!("[200 jobs - 200/100k lanes] ⏱️ First batch pop (100 jobs)..."); + let start = std::time::Instant::now(); + let result1 = timeout(Duration::from_secs(10), harness.batch_pop(100)) + .await + .expect("First batch pop should complete within 10 seconds"); + let duration1 = start.elapsed(); + println!( + "[200 jobs - 200/100k lanes] ✅ First batch pop completed in {:?}", + duration1 + ); + + let new_lanes_count = harness.queue.lanes_count().await.unwrap(); + println!( + "[200 jobs - 200/100k lanes] New lanes count after initial batch pop: {}", + new_lanes_count + ); + + let total_jobs_1: usize = result1.values().map(|jobs| jobs.len()).sum(); + assert_eq!(total_jobs_1, 100, "First batch should return 100 jobs"); + + // Verify remaining pending jobs + let remaining_after_1 = harness + .count_total_jobs(twmq::job::JobStatus::Pending) + .await; + assert_eq!( + remaining_after_1, 100, + "Should have 100 pending jobs after first batch" + ); + + // Second batch pop - should get 100 jobs + println!("[200 jobs - 200/100k lanes] ⏱️ Second batch pop (100 jobs)..."); + let start = std::time::Instant::now(); + let result2 = timeout(Duration::from_secs(10), harness.batch_pop(100)) + .await + .expect("Second batch pop should complete within 10 seconds"); + let duration2 = start.elapsed(); + println!( + "[200 jobs - 200/100k lanes] ✅ Second batch pop completed in {:?}", + duration2 + ); + + let total_jobs_2: usize = result2.values().map(|jobs| jobs.len()).sum(); + assert_eq!(total_jobs_2, 100, "Second batch should return 100 jobs"); + + // Verify no remaining pending jobs + let remaining_after_2 = harness + .count_total_jobs(twmq::job::JobStatus::Pending) + .await; + assert_eq!( + remaining_after_2, 0, + "Should have 0 pending jobs after second batch" + ); + + // Third batch pop - should get 0 jobs + println!("⏱️ Third batch pop (should get 0 jobs)..."); + let start = std::time::Instant::now(); + let result3 = timeout(Duration::from_secs(10), harness.batch_pop(100)) + .await + .expect("Third batch pop should complete within 10 seconds"); + let duration3 = start.elapsed(); + println!("✅ Third batch pop completed in {:?}", duration3); + + let total_jobs_3: usize = result3.values().map(|jobs| jobs.len()).sum(); + assert_eq!(total_jobs_3, 0, "Third batch should return 0 jobs"); + + // Verify all unique job IDs were returned across both batches + let mut all_job_ids: Vec = Vec::new(); + for jobs in result1.values() { + all_job_ids.extend(jobs); + } + for jobs in result2.values() { + all_job_ids.extend(jobs); + } + + all_job_ids.sort(); + let expected_ids: Vec = (0..200).collect(); + assert_eq!( + all_job_ids, expected_ids, + "Should get all 200 unique job IDs across two batches" + ); + + // Performance assertions + assert!( + duration1 < Duration::from_secs(5), + "First batch should complete quickly" + ); + assert!( + duration2 < Duration::from_secs(5), + "Second batch should complete quickly" + ); + assert!( + duration3 < Duration::from_secs(2), + "Third batch should complete very quickly (no jobs)" + ); + + println!("✅ Test passed: Distributed jobs across 100k lanes"); +} + +#[tokio::test(flavor = "multi_thread")] +async fn multilane_test_batch_pop_fairness_across_lanes() { + // Test fairness: ensure round-robin behavior across multiple lanes with jobs + println!("🧪 Testing batch pop fairness across multiple active lanes"); + + let harness = MultilaneTestHarness::new().await; + + // Create 10 lanes, each with 10 jobs (100 total) + let mut jobs_per_lane = HashMap::new(); + for lane_num in 0..10 { + let lane_id = format!("lane_{}", lane_num); + let mut jobs = Vec::new(); + for job_num in 0..10 { + jobs.push(TestJob { + id: lane_num * 10 + job_num, + data: format!("job_{}_{}", lane_num, job_num), + }); + } + jobs_per_lane.insert(lane_id, jobs); + } + + harness.add_jobs_to_lanes(&jobs_per_lane).await; + + // Batch pop 10 jobs - should get 1 from each lane (fairness) + let result = harness.batch_pop(10).await; + + assert_eq!(result.len(), 10, "Should get jobs from all 10 lanes"); + + for lane_num in 0..10 { + let lane_id = format!("lane_{}", lane_num); + assert!( + result.contains_key(&lane_id), + "Should have job from lane {}", + lane_num + ); + assert_eq!( + result[&lane_id].len(), + 1, + "Should get exactly 1 job from lane {}", + lane_num + ); + } + + // Verify remaining jobs + let remaining = harness + .count_total_jobs(twmq::job::JobStatus::Pending) + .await; + assert_eq!(remaining, 90, "Should have 90 jobs remaining"); + + println!("✅ Test passed: Fairness across multiple lanes"); +} + +#[tokio::test(flavor = "multi_thread")] +async fn multilane_test_batch_pop_empty_queue() { + // Edge case: batch pop from completely empty queue + println!("🧪 Testing batch pop from empty queue"); + + let harness = MultilaneTestHarness::new().await; + + // Don't add any jobs + let result = harness.batch_pop(100).await; + + assert_eq!(result.len(), 0, "Should get no jobs from empty queue"); + + let pending = harness + .count_total_jobs(twmq::job::JobStatus::Pending) + .await; + assert_eq!(pending, 0, "Should have no pending jobs"); + + println!("✅ Test passed: Empty queue handling"); +}