|
| 1 | +// Copyright (c) The Bitcoin Core developers |
| 2 | +// Distributed under the MIT software license, see the accompanying |
| 3 | +// file COPYING or http://www.opensource.org/licenses/mit-license.php. |
| 4 | + |
| 5 | +#include <bench/bench.h> |
| 6 | +#include <consensus/amount.h> |
| 7 | +#include <net.h> |
| 8 | +#include <policy/policy.h> |
| 9 | +#include <primitives/transaction.h> |
| 10 | +#include <pubkey.h> |
| 11 | +#include <script/sign.h> |
| 12 | +#include <test/util/setup_common.h> |
| 13 | +#include <node/txorphanage.h> |
| 14 | +#include <util/check.h> |
| 15 | +#include <test/util/transaction_utils.h> |
| 16 | + |
| 17 | +#include <cstdint> |
| 18 | +#include <memory> |
| 19 | + |
| 20 | +static constexpr node::TxOrphanage::Usage TINY_TX_WEIGHT{240}; |
| 21 | +static constexpr int64_t APPROX_WEIGHT_PER_INPUT{200}; |
| 22 | + |
| 23 | +// Creates a transaction with num_inputs inputs and 1 output, padded to target_weight. Use this function to maximize m_outpoint_to_orphan_it operations. |
| 24 | +// If num_inputs is 0, we maximize the number of inputs. |
| 25 | +static CTransactionRef MakeTransactionBulkedTo(unsigned int num_inputs, int64_t target_weight, FastRandomContext& det_rand) |
| 26 | +{ |
| 27 | + CMutableTransaction tx; |
| 28 | + assert(target_weight >= 40 + APPROX_WEIGHT_PER_INPUT); |
| 29 | + if (!num_inputs) num_inputs = (target_weight - 40) / APPROX_WEIGHT_PER_INPUT; |
| 30 | + for (unsigned int i = 0; i < num_inputs; ++i) { |
| 31 | + tx.vin.emplace_back(Txid::FromUint256(det_rand.rand256()), 0); |
| 32 | + } |
| 33 | + assert(GetTransactionWeight(*MakeTransactionRef(tx)) <= target_weight); |
| 34 | + |
| 35 | + tx.vout.resize(1); |
| 36 | + |
| 37 | + // If necessary, pad the transaction to the target weight. |
| 38 | + if (GetTransactionWeight(*MakeTransactionRef(tx)) < target_weight - 4) { |
| 39 | + BulkTransaction(tx, target_weight); |
| 40 | + } |
| 41 | + return MakeTransactionRef(tx); |
| 42 | +} |
| 43 | + |
| 44 | +// Constructs a transaction using a subset of inputs[start_input : start_input + num_inputs] up to the weight_limit. |
| 45 | +static CTransactionRef MakeTransactionSpendingUpTo(const std::vector<CTxIn>& inputs, unsigned int start_input, unsigned int num_inputs, int64_t weight_limit) |
| 46 | +{ |
| 47 | + CMutableTransaction tx; |
| 48 | + for (unsigned int i{start_input}; i < start_input + num_inputs; ++i) { |
| 49 | + if (GetTransactionWeight(*MakeTransactionRef(tx)) + APPROX_WEIGHT_PER_INPUT >= weight_limit) break; |
| 50 | + tx.vin.emplace_back(inputs.at(i % inputs.size())); |
| 51 | + } |
| 52 | + assert(tx.vin.size() > 0); |
| 53 | + return MakeTransactionRef(tx); |
| 54 | +} |
| 55 | +static void OrphanageSinglePeerEviction(benchmark::Bench& bench) |
| 56 | +{ |
| 57 | + FastRandomContext det_rand{true}; |
| 58 | + |
| 59 | + // Fill up announcements slots with tiny txns, followed by a single large one |
| 60 | + unsigned int NUM_TINY_TRANSACTIONS((node::DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE)); |
| 61 | + |
| 62 | + // Construct transactions to submit to orphanage: 1-in-1-out tiny transactions |
| 63 | + std::vector<CTransactionRef> tiny_txs; |
| 64 | + tiny_txs.reserve(NUM_TINY_TRANSACTIONS); |
| 65 | + for (unsigned int i{0}; i < NUM_TINY_TRANSACTIONS; ++i) { |
| 66 | + tiny_txs.emplace_back(MakeTransactionBulkedTo(1, TINY_TX_WEIGHT, det_rand)); |
| 67 | + } |
| 68 | + auto large_tx = MakeTransactionBulkedTo(1, MAX_STANDARD_TX_WEIGHT, det_rand); |
| 69 | + assert(GetTransactionWeight(*large_tx) <= MAX_STANDARD_TX_WEIGHT); |
| 70 | + |
| 71 | + const auto orphanage{node::MakeTxOrphanage(/*max_global_ann=*/node::DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE, /*reserved_peer_usage=*/node::DEFAULT_RESERVED_ORPHAN_WEIGHT_PER_PEER)}; |
| 72 | + |
| 73 | + // Populate the orphanage. To maximize the number of evictions, first fill up with tiny transactions, then add a huge one. |
| 74 | + NodeId peer{0}; |
| 75 | + // Add tiny transactions until we are just about to hit the memory limit, up to the max number of announcements. |
| 76 | + // We use the same tiny transactions for all peers to minimize their contribution to the usage limit. |
| 77 | + int64_t total_weight_to_add{0}; |
| 78 | + for (unsigned int txindex{0}; txindex < NUM_TINY_TRANSACTIONS; ++txindex) { |
| 79 | + const auto& tx{tiny_txs.at(txindex)}; |
| 80 | + |
| 81 | + total_weight_to_add += GetTransactionWeight(*tx); |
| 82 | + if (total_weight_to_add > orphanage->MaxGlobalUsage()) break; |
| 83 | + |
| 84 | + assert(orphanage->AddTx(tx, peer)); |
| 85 | + |
| 86 | + // Sanity check: we should always be exiting at the point of hitting the weight limit. |
| 87 | + assert(txindex < NUM_TINY_TRANSACTIONS - 1); |
| 88 | + } |
| 89 | + |
| 90 | + // In the real world, we always trim after each new tx. |
| 91 | + // If we need to trim already, that means the benchmark is not representative of what LimitOrphans may do in a single call. |
| 92 | + assert(orphanage->TotalOrphanUsage() <= orphanage->MaxGlobalUsage()); |
| 93 | + assert(orphanage->TotalLatencyScore() <= orphanage->MaxGlobalLatencyScore()); |
| 94 | + assert(orphanage->TotalOrphanUsage() + TINY_TX_WEIGHT > orphanage->MaxGlobalUsage()); |
| 95 | + |
| 96 | + bench.epochs(1).epochIterations(1).run([&]() NO_THREAD_SAFETY_ANALYSIS { |
| 97 | + // Lastly, add the large transaction. |
| 98 | + const auto num_announcements_before_trim{orphanage->CountAnnouncements()}; |
| 99 | + assert(orphanage->AddTx(large_tx, peer)); |
| 100 | + orphanage->LimitOrphans(); |
| 101 | + |
| 102 | + // If there are multiple peers, note that they all have the same DoS score. We will evict only 1 item at a time for each new DoSiest peer. |
| 103 | + const auto num_announcements_after_trim{orphanage->CountAnnouncements()}; |
| 104 | + const auto num_evicted{num_announcements_before_trim - num_announcements_after_trim}; |
| 105 | + |
| 106 | + // The number of evictions is the same regardless of the number of peers. In both cases, we can exceed the |
| 107 | + // usage limit using 1 maximally-sized transaction. |
| 108 | + assert(num_evicted == MAX_STANDARD_TX_WEIGHT / TINY_TX_WEIGHT); |
| 109 | + }); |
| 110 | +} |
| 111 | +static void OrphanageMultiPeerEviction(benchmark::Bench& bench) |
| 112 | +{ |
| 113 | + // Best number is just below sqrt(DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE) |
| 114 | + static constexpr unsigned int NUM_PEERS{39}; |
| 115 | + // All peers will have the same transactions. We want to be just under the weight limit, so divide the max usage limit by the number of unique transactions. |
| 116 | + static constexpr node::TxOrphanage::Count NUM_UNIQUE_TXNS{node::DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE / NUM_PEERS}; |
| 117 | + static constexpr node::TxOrphanage::Usage TOTAL_USAGE_LIMIT{node::DEFAULT_RESERVED_ORPHAN_WEIGHT_PER_PEER * NUM_PEERS}; |
| 118 | + // Subtract 4 because BulkTransaction rounds up and we must avoid going over the weight limit early. |
| 119 | + static constexpr node::TxOrphanage::Usage LARGE_TX_WEIGHT{TOTAL_USAGE_LIMIT / NUM_UNIQUE_TXNS - 4}; |
| 120 | + static_assert(LARGE_TX_WEIGHT >= TINY_TX_WEIGHT * 2, "Tx is too small, increase NUM_PEERS"); |
| 121 | + // The orphanage does not permit any transactions larger than 400'000, so this test will not work if the large tx is much larger. |
| 122 | + static_assert(LARGE_TX_WEIGHT <= MAX_STANDARD_TX_WEIGHT, "Tx is too large, decrease NUM_PEERS"); |
| 123 | + |
| 124 | + FastRandomContext det_rand{true}; |
| 125 | + // Construct large transactions |
| 126 | + std::vector<CTransactionRef> shared_txs; |
| 127 | + shared_txs.reserve(NUM_UNIQUE_TXNS); |
| 128 | + for (unsigned int i{0}; i < NUM_UNIQUE_TXNS; ++i) { |
| 129 | + shared_txs.emplace_back(MakeTransactionBulkedTo(9, LARGE_TX_WEIGHT, det_rand)); |
| 130 | + } |
| 131 | + std::vector<size_t> indexes; |
| 132 | + indexes.resize(NUM_UNIQUE_TXNS); |
| 133 | + std::iota(indexes.begin(), indexes.end(), 0); |
| 134 | + |
| 135 | + const auto orphanage{node::MakeTxOrphanage(/*max_global_ann=*/node::DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE, /*reserved_peer_usage=*/node::DEFAULT_RESERVED_ORPHAN_WEIGHT_PER_PEER)}; |
| 136 | + // Every peer sends the same transactions, all from shared_txs. |
| 137 | + // Each peer has 1 or 2 assigned transactions, which they must place as the last and second-to-last positions. |
| 138 | + // The assignments ensure that every transaction is in some peer's last 2 transactions, and is thus remains in the orphanage until the end of LimitOrphans. |
| 139 | + static_assert(NUM_UNIQUE_TXNS <= NUM_PEERS * 2); |
| 140 | + |
| 141 | + // We need each peer to send some transactions so that the global limit (which is a function of the number of peers providing at least 1 announcement) rises. |
| 142 | + for (unsigned int i{0}; i < NUM_UNIQUE_TXNS; ++i) { |
| 143 | + for (NodeId peer{0}; peer < NUM_PEERS; ++peer) { |
| 144 | + const CTransactionRef& reserved_last_tx{shared_txs.at(peer)}; |
| 145 | + CTransactionRef reserved_second_to_last_tx{peer < NUM_UNIQUE_TXNS - NUM_PEERS ? shared_txs.at(peer + NUM_PEERS) : nullptr}; |
| 146 | + |
| 147 | + const auto& tx{shared_txs.at(indexes.at(i))}; |
| 148 | + if (tx == reserved_last_tx) { |
| 149 | + // Skip |
| 150 | + } else if (reserved_second_to_last_tx && tx == reserved_second_to_last_tx) { |
| 151 | + // Skip |
| 152 | + } else { |
| 153 | + orphanage->AddTx(tx, peer); |
| 154 | + } |
| 155 | + } |
| 156 | + } |
| 157 | + |
| 158 | + // Now add the final reserved transactions. |
| 159 | + for (NodeId peer{0}; peer < NUM_PEERS; ++peer) { |
| 160 | + const CTransactionRef& reserved_last_tx{shared_txs.at(peer)}; |
| 161 | + CTransactionRef reserved_second_to_last_tx{peer < NUM_UNIQUE_TXNS - NUM_PEERS ? shared_txs.at(peer + NUM_PEERS) : nullptr}; |
| 162 | + // Add the final reserved transactions. |
| 163 | + if (reserved_second_to_last_tx) { |
| 164 | + orphanage->AddTx(reserved_second_to_last_tx, peer); |
| 165 | + } |
| 166 | + orphanage->AddTx(reserved_last_tx, peer); |
| 167 | + } |
| 168 | + |
| 169 | + assert(orphanage->CountAnnouncements() == NUM_PEERS * NUM_UNIQUE_TXNS); |
| 170 | + const auto total_usage{orphanage->TotalOrphanUsage()}; |
| 171 | + const auto max_usage{orphanage->MaxGlobalUsage()}; |
| 172 | + assert(max_usage - total_usage <= LARGE_TX_WEIGHT); |
| 173 | + assert(orphanage->TotalLatencyScore() <= orphanage->MaxGlobalLatencyScore()); |
| 174 | + |
| 175 | + auto last_tx = MakeTransactionBulkedTo(0, max_usage - total_usage + 1, det_rand); |
| 176 | + |
| 177 | + bench.epochs(1).epochIterations(1).run([&]() NO_THREAD_SAFETY_ANALYSIS { |
| 178 | + const auto num_announcements_before_trim{orphanage->CountAnnouncements()}; |
| 179 | + // There is a small gap between the total usage and the max usage. Add a transaction to fill it. |
| 180 | + assert(orphanage->AddTx(last_tx, 0)); |
| 181 | + orphanage->LimitOrphans(); |
| 182 | + |
| 183 | + // If there are multiple peers, note that they all have the same DoS score. We will evict only 1 item at a time for each new DoSiest peer. |
| 184 | + const auto num_evicted{num_announcements_before_trim - orphanage->CountAnnouncements() + 1}; |
| 185 | + // The trimming happens as a round robin. In the first NUM_UNIQUE_TXNS - 2 rounds for each peer, only duplicates are evicted. |
| 186 | + // Once each peer has 2 transactions left, it's possible to select a peer whose oldest transaction is unique. |
| 187 | + assert(num_evicted >= (NUM_UNIQUE_TXNS - 2) * NUM_PEERS); |
| 188 | + }); |
| 189 | +} |
| 190 | + |
| 191 | +static void OrphanageEraseAll(benchmark::Bench& bench, bool block_or_disconnect) |
| 192 | +{ |
| 193 | + FastRandomContext det_rand{true}; |
| 194 | + const auto orphanage{node::MakeTxOrphanage(/*max_global_ann=*/node::DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE, /*reserved_peer_usage=*/node::DEFAULT_RESERVED_ORPHAN_WEIGHT_PER_PEER)}; |
| 195 | + // This is an unrealistically large number of inputs for a block, as there is almost no room given to witness data, |
| 196 | + // outputs, and overhead for individual transactions. The entire block is 1 transaction with 20,000 inputs. |
| 197 | + constexpr unsigned int NUM_BLOCK_INPUTS{MAX_BLOCK_WEIGHT / APPROX_WEIGHT_PER_INPUT}; |
| 198 | + const auto block_tx{MakeTransactionBulkedTo(NUM_BLOCK_INPUTS, MAX_BLOCK_WEIGHT - 4000, det_rand)}; |
| 199 | + CBlock block; |
| 200 | + block.vtx.push_back(block_tx); |
| 201 | + |
| 202 | + // Transactions with 9 inputs maximize the computation / LatencyScore ratio. |
| 203 | + constexpr unsigned int INPUTS_PER_TX{9}; |
| 204 | + constexpr unsigned int NUM_PEERS{125}; |
| 205 | + constexpr unsigned int NUM_TXNS_PER_PEER = node::DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE / NUM_PEERS; |
| 206 | + |
| 207 | + // Divide the block's inputs evenly among the peers. |
| 208 | + constexpr unsigned int INPUTS_PER_PEER = NUM_BLOCK_INPUTS / NUM_PEERS; |
| 209 | + static_assert(INPUTS_PER_PEER > 0); |
| 210 | + // All the block inputs are spent by the orphanage transactions. Each peer is assigned 76 of them. |
| 211 | + // Each peer has 24 transactions spending 9 inputs each, so jumping by 3 ensures we cover all of the inputs. |
| 212 | + static_assert(7 * NUM_TXNS_PER_PEER + INPUTS_PER_TX - 1 >= INPUTS_PER_PEER); |
| 213 | + |
| 214 | + for (NodeId peer{0}; peer < NUM_PEERS; ++peer) { |
| 215 | + int64_t weight_left_for_peer{node::DEFAULT_RESERVED_ORPHAN_WEIGHT_PER_PEER}; |
| 216 | + for (unsigned int txnum{0}; txnum < node::DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE / NUM_PEERS; ++txnum) { |
| 217 | + // Transactions must be unique since they use different (though overlapping) inputs. |
| 218 | + const unsigned int start_input = peer * INPUTS_PER_PEER + txnum * 7; |
| 219 | + |
| 220 | + // Note that we shouldn't be able to hit the weight limit with these small transactions. |
| 221 | + const int64_t weight_limit{std::min<int64_t>(weight_left_for_peer, MAX_STANDARD_TX_WEIGHT)}; |
| 222 | + auto ptx = MakeTransactionSpendingUpTo(block_tx->vin, /*start_input=*/start_input, /*num_inputs=*/INPUTS_PER_TX, /*weight_limit=*/weight_limit); |
| 223 | + |
| 224 | + assert(GetTransactionWeight(*ptx) <= MAX_STANDARD_TX_WEIGHT); |
| 225 | + assert(!orphanage->HaveTx(ptx->GetWitnessHash())); |
| 226 | + assert(orphanage->AddTx(ptx, peer)); |
| 227 | + |
| 228 | + weight_left_for_peer -= GetTransactionWeight(*ptx); |
| 229 | + if (weight_left_for_peer < TINY_TX_WEIGHT * 2) break; |
| 230 | + } |
| 231 | + } |
| 232 | + |
| 233 | + // If these fail, it means this benchmark is not realistic because the orphanage would have been trimmed already. |
| 234 | + assert(orphanage->TotalLatencyScore() <= orphanage->MaxGlobalLatencyScore()); |
| 235 | + assert(orphanage->TotalOrphanUsage() <= orphanage->MaxGlobalUsage()); |
| 236 | + |
| 237 | + // 3000 announcements (and unique transactions) in the orphanage. |
| 238 | + // They spend a total of 27,000 inputs (20,000 unique ones) |
| 239 | + assert(orphanage->CountAnnouncements() == NUM_PEERS * NUM_TXNS_PER_PEER); |
| 240 | + assert(orphanage->TotalLatencyScore() == orphanage->CountAnnouncements()); |
| 241 | + |
| 242 | + bench.epochs(1).epochIterations(1).run([&]() NO_THREAD_SAFETY_ANALYSIS { |
| 243 | + if (block_or_disconnect) { |
| 244 | + // Erase everything through EraseForBlock. |
| 245 | + // Every tx conflicts with this block. |
| 246 | + orphanage->EraseForBlock(block); |
| 247 | + assert(orphanage->CountAnnouncements() == 0); |
| 248 | + } else { |
| 249 | + // Erase everything through EraseForPeer. |
| 250 | + for (NodeId peer{0}; peer < NUM_PEERS; ++peer) { |
| 251 | + orphanage->EraseForPeer(peer); |
| 252 | + } |
| 253 | + assert(orphanage->CountAnnouncements() == 0); |
| 254 | + } |
| 255 | + }); |
| 256 | +} |
| 257 | + |
| 258 | +static void OrphanageEraseForBlock(benchmark::Bench& bench) |
| 259 | +{ |
| 260 | + OrphanageEraseAll(bench, /*block_or_disconnect=*/true); |
| 261 | +} |
| 262 | +static void OrphanageEraseForPeer(benchmark::Bench& bench) |
| 263 | +{ |
| 264 | + OrphanageEraseAll(bench, /*block_or_disconnect=*/false); |
| 265 | +} |
| 266 | + |
| 267 | +BENCHMARK(OrphanageSinglePeerEviction, benchmark::PriorityLevel::LOW); |
| 268 | +BENCHMARK(OrphanageMultiPeerEviction, benchmark::PriorityLevel::LOW); |
| 269 | +BENCHMARK(OrphanageEraseForBlock, benchmark::PriorityLevel::LOW); |
| 270 | +BENCHMARK(OrphanageEraseForPeer, benchmark::PriorityLevel::LOW); |
0 commit comments