Skip to content

Commit 162821e

Browse files
committed
Update tests to test re-claiming of forwarded HTLCs on startup
1 parent 6696302 commit 162821e

File tree

4 files changed

+207
-39
lines changed

4 files changed

+207
-39
lines changed

lightning/src/chain/channelmonitor.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ use crate::sync::{Mutex, LockTestExt};
6666
/// much smaller than a full [`ChannelMonitor`]. However, for large single commitment transaction
6767
/// updates (e.g. ones during which there are hundreds of HTLCs pending on the commitment
6868
/// transaction), a single update may reach upwards of 1 MiB in serialized size.
69-
#[derive(Clone, PartialEq, Eq)]
69+
#[derive(Clone, Debug, PartialEq, Eq)]
7070
#[must_use]
7171
pub struct ChannelMonitorUpdate {
7272
pub(crate) updates: Vec<ChannelMonitorUpdateStep>,
@@ -486,7 +486,7 @@ impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,
486486

487487
);
488488

489-
#[derive(Clone, PartialEq, Eq)]
489+
#[derive(Clone, Debug, PartialEq, Eq)]
490490
pub(crate) enum ChannelMonitorUpdateStep {
491491
LatestHolderCommitmentTXInfo {
492492
commitment_tx: HolderCommitmentTransaction,

lightning/src/ln/chan_utils.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -448,7 +448,7 @@ pub fn derive_public_revocation_key<T: secp256k1::Verification>(secp_ctx: &Secp2
448448
/// channel basepoints via the new function, or they were obtained via
449449
/// CommitmentTransaction.trust().keys() because we trusted the source of the
450450
/// pre-calculated keys.
451-
#[derive(PartialEq, Eq, Clone)]
451+
#[derive(PartialEq, Eq, Clone, Debug)]
452452
pub struct TxCreationKeys {
453453
/// The broadcaster's per-commitment public key which was used to derive the other keys.
454454
pub per_commitment_point: PublicKey,
@@ -1026,7 +1026,7 @@ impl<'a> DirectedChannelTransactionParameters<'a> {
10261026
/// Information needed to build and sign a holder's commitment transaction.
10271027
///
10281028
/// The transaction is only signed once we are ready to broadcast.
1029-
#[derive(Clone)]
1029+
#[derive(Clone, Debug)]
10301030
pub struct HolderCommitmentTransaction {
10311031
inner: CommitmentTransaction,
10321032
/// Our counterparty's signature for the transaction
@@ -1132,7 +1132,7 @@ impl HolderCommitmentTransaction {
11321132
}
11331133

11341134
/// A pre-built Bitcoin commitment transaction and its txid.
1135-
#[derive(Clone)]
1135+
#[derive(Clone, Debug)]
11361136
pub struct BuiltCommitmentTransaction {
11371137
/// The commitment transaction
11381138
pub transaction: Transaction,
@@ -1303,7 +1303,7 @@ impl<'a> TrustedClosingTransaction<'a> {
13031303
///
13041304
/// This class can be used inside a signer implementation to generate a signature given the relevant
13051305
/// secret key.
1306-
#[derive(Clone)]
1306+
#[derive(Clone, Debug)]
13071307
pub struct CommitmentTransaction {
13081308
commitment_number: u64,
13091309
to_broadcaster_value_sat: u64,

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 198 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -3086,7 +3086,7 @@ fn test_blocked_chan_preimage_release() {
30863086
expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true);
30873087
}
30883088

3089-
fn do_test_inverted_mon_completion_order(complete_bc_commitment_dance: bool) {
3089+
fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_commitment_dance: bool) {
30903090
// When we forward a payment and receive an `update_fulfill_htlc` message from the downstream
30913091
// channel, we immediately claim the HTLC on the upstream channel, before even doing a
30923092
// `commitment_signed` dance on the downstream channel. This implies that our
@@ -3114,6 +3114,10 @@ fn do_test_inverted_mon_completion_order(complete_bc_commitment_dance: bool) {
31143114
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
31153115

31163116
let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
3117+
let mut manager_b = Vec::new();
3118+
if !with_latest_manager {
3119+
manager_b = nodes[1].node.encode();
3120+
}
31173121

31183122
nodes[2].node.claim_funds(payment_preimage);
31193123
check_added_monitors(&nodes[2], 1);
@@ -3150,58 +3154,222 @@ fn do_test_inverted_mon_completion_order(complete_bc_commitment_dance: bool) {
31503154
}
31513155

31523156
// Now reload node B
3153-
let manager_b = nodes[1].node.encode();
3157+
if with_latest_manager {
3158+
manager_b = nodes[1].node.encode();
3159+
}
31543160

31553161
let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
31563162
reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
31573163

31583164
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
31593165
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
31603166

3161-
// If we used the latest ChannelManager to reload from, we should have both channels still
3162-
// live. The B <-> C channel's final RAA ChannelMonitorUpdate must still be blocked as
3163-
// before - the ChannelMonitorUpdate for the A <-> B channel hasn't completed.
3164-
// When we call `timer_tick_occurred` we will get that monitor update back, which we'll
3165-
// complete after reconnecting to our peers.
3166-
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3167-
nodes[1].node.timer_tick_occurred();
3168-
check_added_monitors(&nodes[1], 1);
3169-
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3167+
if with_latest_manager {
3168+
// If we used the latest ChannelManager to reload from, we should have both channels still
3169+
// live. The B <-> C channel's final RAA ChannelMonitorUpdate must still be blocked as
3170+
// before - the ChannelMonitorUpdate for the A <-> B channel hasn't completed.
3171+
// When we call `timer_tick_occurred` we will get that monitor update back, which we'll
3172+
// complete after reconnecting to our peers.
3173+
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3174+
nodes[1].node.timer_tick_occurred();
3175+
check_added_monitors(&nodes[1], 1);
3176+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
31703177

3171-
// Now reconnect B to both A and C. If the B <-> C commitment signed dance wasn't run to
3172-
// the end go ahead and do that, though the -2 in `reconnect_nodes` indicates that we
3173-
// expect to *not* receive the final RAA ChannelMonitorUpdate.
3174-
if complete_bc_commitment_dance {
3175-
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3178+
// Now reconnect B to both A and C. If the B <-> C commitment signed dance wasn't run to
3179+
// the end go ahead and do that, though the -2 in `reconnect_nodes` indicates that we
3180+
// expect to *not* receive the final RAA ChannelMonitorUpdate.
3181+
if complete_bc_commitment_dance {
3182+
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3183+
} else {
3184+
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, -2), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
3185+
}
3186+
3187+
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3188+
3189+
// (Finally) complete the A <-> B ChannelMonitorUpdate, ensuring the preimage is durably on
3190+
// disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating
3191+
// process.
3192+
let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
3193+
nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
3194+
3195+
// When we fetch B's HTLC update messages next (now that the ChannelMonitorUpdate has
3196+
// completed), it will also release the final RAA ChannelMonitorUpdate on the B <-> C
3197+
// channel.
31763198
} else {
3177-
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, -2), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
3178-
}
3199+
// If the ChannelManager used in the reload was stale, check that the B <-> C channel was
3200+
// closed.
3201+
//
3202+
// Note that this will also process the ChannelMonitorUpdates which were queued up when we
3203+
// reloaded the ChannelManager. This will re-emit the A<->B preimage as well as the B<->C
3204+
// force-closure ChannelMonitorUpdate. Once the A<->B preimage update completes, the claim
3205+
// commitment update will be allowed to go out.
3206+
check_added_monitors(&nodes[1], 0);
3207+
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3208+
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3209+
check_closed_event(&nodes[1], 1, ClosureReason::OutdatedChannelManager, false);
3210+
check_added_monitors(&nodes[1], 2);
3211+
3212+
nodes[1].node.timer_tick_occurred();
3213+
check_added_monitors(&nodes[1], 0);
3214+
3215+
// Don't bother to reconnect B to C - that channel has been closed. We don't need to
3216+
// exchange any messages here even though there's a pending commitment update because the
3217+
// ChannelMonitorUpdate hasn't yet completed.
3218+
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
31793219

3180-
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3220+
let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
3221+
nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
31813222

3182-
// (Finally) complete the A <-> B ChannelMonitorUpdate, ensuring the preimage is durably on
3183-
// disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating
3184-
// process.
3185-
let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
3186-
nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
3223+
// The ChannelMonitorUpdate which was completed prior to the reconnect only contained the
3224+
// preimage (as it was a replay of the original ChannelMonitorUpdate from before we
3225+
// restarted). When we go to fetch the commitment transaction updates we'll poll the
3226+
// ChannelMonitorUpdate completion, then generate (and complete) a new ChannelMonitorUpdate
3227+
// with the actual commitment transaction, which will allow us to fulfill the HTLC with
3228+
// node A.
3229+
}
31873230

3188-
// When we fetch B's HTLC update messages here (now that the ChannelMonitorUpdate has
3189-
// completed), it will also release the final RAA ChannelMonitorUpdate on the B <-> C
3190-
// channel.
31913231
let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
31923232
check_added_monitors(&nodes[1], 1);
31933233

31943234
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
31953235
do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, false);
31963236

3197-
expect_payment_forwarded!(nodes[1], &nodes[0], &nodes[2], Some(1_000), false, false);
3237+
expect_payment_forwarded!(nodes[1], &nodes[0], &nodes[2], Some(1_000), false, !with_latest_manager);
31983238

31993239
// Finally, check that the payment was, ultimately, seen as sent by node A.
32003240
expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
32013241
}
32023242

32033243
#[test]
32043244
fn test_inverted_mon_completion_order() {
3205-
do_test_inverted_mon_completion_order(true);
3206-
do_test_inverted_mon_completion_order(false);
3245+
do_test_inverted_mon_completion_order(true, true);
3246+
do_test_inverted_mon_completion_order(true, false);
3247+
do_test_inverted_mon_completion_order(false, true);
3248+
do_test_inverted_mon_completion_order(false, false);
3249+
}
3250+
3251+
fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, close_only_a: bool) {
3252+
// Test that we can apply a `ChannelMonitorUpdate` with a payment preimage even if the channel
3253+
// is force-closed between when we generate the update on reload and when we go to handle the
3254+
// update or prior to generating the update at all.
3255+
3256+
if !close_chans_before_reload && close_only_a {
3257+
// If we're not closing, it makes no sense to "only close A"
3258+
panic!();
3259+
}
3260+
3261+
let chanmon_cfgs = create_chanmon_cfgs(3);
3262+
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3263+
3264+
let persister;
3265+
let new_chain_monitor;
3266+
let nodes_1_deserialized;
3267+
3268+
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3269+
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3270+
3271+
let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
3272+
let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
3273+
3274+
// Route a payment from A, through B, to C, then claim it on C. Once we pass B the
3275+
// `update_fulfill_htlc` we have a monitor update for both of B's channels. We complete the one
3276+
// on the B<->C channel but leave the A<->B monitor update pending, then reload B.
3277+
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
3278+
3279+
let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
3280+
3281+
nodes[2].node.claim_funds(payment_preimage);
3282+
check_added_monitors(&nodes[2], 1);
3283+
expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
3284+
3285+
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3286+
let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
3287+
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
3288+
3289+
// B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
3290+
// for it since the monitor update is marked in-progress.
3291+
check_added_monitors(&nodes[1], 1);
3292+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3293+
3294+
// Now step the Commitment Signed Dance between B and C forward a bit, ensuring we won't get
3295+
// the preimage when the nodes reconnect, at which point we have to ensure we get it from the
3296+
// ChannelMonitor.
3297+
nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
3298+
check_added_monitors(&nodes[1], 1);
3299+
let _ = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
3300+
3301+
let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
3302+
3303+
if close_chans_before_reload {
3304+
if !close_only_a {
3305+
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3306+
nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_bc, &nodes[2].node.get_our_node_id()).unwrap();
3307+
check_closed_broadcast(&nodes[1], 1, true);
3308+
check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false);
3309+
}
3310+
3311+
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3312+
nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[0].node.get_our_node_id()).unwrap();
3313+
check_closed_broadcast(&nodes[1], 1, true);
3314+
check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false);
3315+
}
3316+
3317+
// Now reload node B
3318+
let manager_b = nodes[1].node.encode();
3319+
reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
3320+
3321+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3322+
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3323+
3324+
if close_chans_before_reload {
3325+
// If the channels were already closed, B will rebroadcast its closing transactions here.
3326+
let bs_close_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3327+
if close_only_a {
3328+
assert_eq!(bs_close_txn.len(), 2);
3329+
} else {
3330+
assert_eq!(bs_close_txn.len(), 3);
3331+
}
3332+
}
3333+
3334+
nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id()).unwrap();
3335+
check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false);
3336+
let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3337+
assert_eq!(as_closing_tx.len(), 1);
3338+
3339+
// In order to give B A's closing transaction without processing background events first, use
3340+
// the _without_checks utility method. This is similar to connecting blocks during startup
3341+
// prior to the node being full initialized.
3342+
mine_transaction_without_checks(&nodes[1], &as_closing_tx[0]);
3343+
3344+
// After a timer tick a payment preimage ChannelMonitorUpdate is applied to the A<->B
3345+
// ChannelMonitor (possible twice), even though the channel has since been closed.
3346+
check_added_monitors(&nodes[1], 0);
3347+
nodes[1].node.timer_tick_occurred();
3348+
check_added_monitors(&nodes[1], if close_chans_before_reload { if !close_only_a { 4 } else { 3 } } else { 2 });
3349+
3350+
// Finally, check that B created a payment preimage transaction and close out the payment.
3351+
let bs_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3352+
assert_eq!(bs_txn.len(), if close_chans_before_reload && !close_only_a { 2 } else { 1 });
3353+
let bs_preimage_tx = &bs_txn[0];
3354+
check_spends!(bs_preimage_tx, as_closing_tx[0]);
3355+
3356+
if !close_chans_before_reload {
3357+
check_closed_broadcast(&nodes[1], 1, true);
3358+
check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false);
3359+
} else {
3360+
// While we forwarded the payment a while ago, we don't want to process events too early or
3361+
// we'll run background tasks we wanted to test individually.
3362+
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, false, !close_only_a);
3363+
}
3364+
3365+
mine_transactions(&nodes[0], &[&as_closing_tx[0], bs_preimage_tx]);
3366+
check_closed_broadcast(&nodes[0], 1, true);
3367+
expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
3368+
}
3369+
3370+
#[test]
3371+
fn test_durable_preimages_on_closed_channel() {
3372+
do_test_durable_preimages_on_closed_channel(true, true);
3373+
do_test_durable_preimages_on_closed_channel(true, false);
3374+
do_test_durable_preimages_on_closed_channel(false, false);
32073375
}

lightning/src/ln/channelmanager.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ pub(super) enum HTLCForwardInfo {
173173
}
174174

175175
/// Tracks the inbound corresponding to an outbound HTLC
176-
#[derive(Clone, Hash, PartialEq, Eq)]
176+
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
177177
pub(crate) struct HTLCPreviousHopData {
178178
// Note that this may be an outbound SCID alias for the associated channel.
179179
short_channel_id: u64,
@@ -255,7 +255,7 @@ impl Readable for InterceptId {
255255
}
256256
}
257257

258-
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
258+
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
259259
/// Uniquely describes an HTLC by its source. Just the guaranteed-unique subset of [`HTLCSource`].
260260
pub(crate) enum SentHTLCId {
261261
PreviousHopData { short_channel_id: u64, htlc_id: u64 },
@@ -286,7 +286,7 @@ impl_writeable_tlv_based_enum!(SentHTLCId,
286286

287287
/// Tracks the inbound corresponding to an outbound HTLC
288288
#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
289-
#[derive(Clone, PartialEq, Eq)]
289+
#[derive(Clone, Debug, PartialEq, Eq)]
290290
pub(crate) enum HTLCSource {
291291
PreviousHopData(HTLCPreviousHopData),
292292
OutboundRoute {

0 commit comments

Comments
 (0)