@@ -9843,6 +9843,186 @@ fn test_keysend_payments_to_private_node() {
9843
9843
claim_payment(&nodes[0], &path, test_preimage);
9844
9844
}
9845
9845
9846
+ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
9847
+ // Test what happens if a node receives an MPP payment, claims it, but crashes before
9848
+ // persisting the ChannelManager. If `persist_both_monitors` is false, also crash after only
9849
+ // updating one of the two channels' ChannelMonitors. As a result, on startup, we'll (a) still
9850
+ // have the PaymentReceived event, (b) have one (or two) channel(s) that goes on chain with the
9851
+ // HTLC preimage in them, and (c) optionally have one channel that is live off-chain but does
9852
+ // not have the preimage tied to the still-pending HTLC.
9853
+ //
9854
+ // To get to the correct state, on startup we should propagate the preimage to the
9855
+ // still-off-chain channel, claiming the HTLC as soon as the peer connects, with the monitor
9856
+ // receiving the preimage without a state update.
9857
+ let chanmon_cfgs = create_chanmon_cfgs(4);
9858
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9859
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9860
+
9861
+ let persister: test_utils::TestPersister;
9862
+ let new_chain_monitor: test_utils::TestChainMonitor;
9863
+ let nodes_3_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
9864
+
9865
+ let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9866
+
9867
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known());
9868
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, InitFeatures::known(), InitFeatures::known());
9869
+ let chan_id_persisted = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known()).2;
9870
+ let chan_id_not_persisted = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known()).2;
9871
+
9872
+ // Create an MPP route for 15k sats, more than the default htlc-max of 10%
9873
+ let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000);
9874
+ assert_eq!(route.paths.len(), 2);
9875
+ route.paths.sort_by(|path_a, _| {
9876
+ // Sort the path so that the path through nodes[1] comes first
9877
+ if path_a[0].pubkey == nodes[1].node.get_our_node_id() {
9878
+ core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9879
+ });
9880
+
9881
+ nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
9882
+ check_added_monitors!(nodes[0], 2);
9883
+
9884
+ // Send the payment through to nodes[3] *without* clearing the PaymentReceived event
9885
+ let mut send_events = nodes[0].node.get_and_clear_pending_msg_events();
9886
+ assert_eq!(send_events.len(), 2);
9887
+ do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None);
9888
+ do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[1].clone(), true, false, None);
9889
+
9890
+ // Now that we have an MPP payment pending, get the latest encoded copies of nodes[3]'s
9891
+ // monitors and ChannelManager, for use later, if we don't want to persist both monitors.
9892
+ let mut original_monitor = test_utils::TestVecWriter(Vec::new());
9893
+ if !persist_both_monitors {
9894
+ for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
9895
+ if outpoint.to_channel_id() == chan_id_not_persisted {
9896
+ assert!(original_monitor.0.is_empty());
9897
+ nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
9898
+ }
9899
+ }
9900
+ }
9901
+
9902
+ let mut original_manager = test_utils::TestVecWriter(Vec::new());
9903
+ nodes[3].node.write(&mut original_manager).unwrap();
9904
+
9905
+ expect_payment_received!(nodes[3], payment_hash, payment_secret, 15_000_000);
9906
+
9907
+ nodes[3].node.claim_funds(payment_preimage);
9908
+ check_added_monitors!(nodes[3], 2);
9909
+
9910
+ // Now fetch one of the two updated ChannelMonitors from nodes[3], and restart pretending we
9911
+ // crashed in between the two persistence calls - using one old ChannelMonitor and one new one,
9912
+ // with the old ChannelManager.
9913
+ let mut updated_monitor = test_utils::TestVecWriter(Vec::new());
9914
+ for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
9915
+ if outpoint.to_channel_id() == chan_id_persisted {
9916
+ assert!(updated_monitor.0.is_empty());
9917
+ nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut updated_monitor).unwrap();
9918
+ }
9919
+ }
9920
+ // If `persist_both_monitors` is set, get the second monitor here as well
9921
+ if persist_both_monitors {
9922
+ for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
9923
+ if outpoint.to_channel_id() == chan_id_not_persisted {
9924
+ assert!(original_monitor.0.is_empty());
9925
+ nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
9926
+ }
9927
+ }
9928
+ }
9929
+
9930
+ // Now restart nodes[3].
9931
+ persister = test_utils::TestPersister::new();
9932
+ let keys_manager = &chanmon_cfgs[3].keys_manager;
9933
+ new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[3].chain_source), nodes[3].tx_broadcaster.clone(), nodes[3].logger, node_cfgs[3].fee_estimator, &persister, keys_manager);
9934
+ nodes[3].chain_monitor = &new_chain_monitor;
9935
+ let mut monitors = Vec::new();
9936
+ for mut monitor_data in [original_monitor, updated_monitor].iter() {
9937
+ let (_, mut deserialized_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut &monitor_data.0[..], keys_manager).unwrap();
9938
+ monitors.push(deserialized_monitor);
9939
+ }
9940
+
9941
+ let config = UserConfig::default();
9942
+ nodes_3_deserialized = {
9943
+ let mut channel_monitors = HashMap::new();
9944
+ for monitor in monitors.iter_mut() {
9945
+ channel_monitors.insert(monitor.get_funding_txo().0, monitor);
9946
+ }
9947
+ <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut &original_manager.0[..], ChannelManagerReadArgs {
9948
+ default_config: config,
9949
+ keys_manager,
9950
+ fee_estimator: node_cfgs[3].fee_estimator,
9951
+ chain_monitor: nodes[3].chain_monitor,
9952
+ tx_broadcaster: nodes[3].tx_broadcaster.clone(),
9953
+ logger: nodes[3].logger,
9954
+ channel_monitors,
9955
+ }).unwrap().1
9956
+ };
9957
+ nodes[3].node = &nodes_3_deserialized;
9958
+
9959
+ for monitor in monitors {
9960
+ // On startup the preimage should have been copied into the non-persisted monitor:
9961
+ assert!(monitor.get_stored_preimages().contains_key(&payment_hash));
9962
+ nodes[3].chain_monitor.watch_channel(monitor.get_funding_txo().0.clone(), monitor).unwrap();
9963
+ }
9964
+ check_added_monitors!(nodes[3], 2);
9965
+
9966
+ nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
9967
+ nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
9968
+
9969
+ // During deserialization, we should have closed one channel and broadcast its latest
9970
+ // commitment transaction. We should also still have the original PaymentReceived event we
9971
+ // never finished processing.
9972
+ let events = nodes[3].node.get_and_clear_pending_events();
9973
+ assert_eq!(events.len(), if persist_both_monitors { 3 } else { 2 });
9974
+ if let Event::PaymentReceived { amt: 15_000_000, .. } = events[0] { } else { panic!(); }
9975
+ if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); }
9976
+ if persist_both_monitors {
9977
+ if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); }
9978
+ }
9979
+
9980
+ assert_eq!(nodes[3].node.list_channels().len(), if persist_both_monitors { 0 } else { 1 });
9981
+ if !persist_both_monitors {
9982
+ // If one of the two channels is still live, reveal the payment preimage over it.
9983
+
9984
+ nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
9985
+ let reestablish_1 = get_chan_reestablish_msgs!(nodes[3], nodes[2]);
9986
+ nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
9987
+ let reestablish_2 = get_chan_reestablish_msgs!(nodes[2], nodes[3]);
9988
+
9989
+ nodes[2].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish_1[0]);
9990
+ get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[3].node.get_our_node_id());
9991
+ assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
9992
+
9993
+ nodes[3].node.handle_channel_reestablish(&nodes[2].node.get_our_node_id(), &reestablish_2[0]);
9994
+
9995
+ // Once we call `get_and_clear_pending_msg_events` the holding cell is cleared and the HTLC
9996
+ // claim should fly.
9997
+ let ds_msgs = nodes[3].node.get_and_clear_pending_msg_events();
9998
+ check_added_monitors!(nodes[3], 1);
9999
+ assert_eq!(ds_msgs.len(), 2);
10000
+ if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[1] {} else { panic!(); }
10001
+
10002
+ let cs_updates = match ds_msgs[0] {
10003
+ MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
10004
+ nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
10005
+ check_added_monitors!(nodes[2], 1);
10006
+ let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
10007
+ expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false);
10008
+ commitment_signed_dance!(nodes[2], nodes[3], updates.commitment_signed, false, true);
10009
+ cs_updates
10010
+ }
10011
+ _ => panic!(),
10012
+ };
10013
+
10014
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
10015
+ commitment_signed_dance!(nodes[0], nodes[2], cs_updates.commitment_signed, false, true);
10016
+ expect_payment_sent!(nodes[0], payment_preimage);
10017
+ }
10018
+ }
10019
+
10020
+ #[test]
10021
+ fn test_partial_claim_before_restart() {
10022
+ do_test_partial_claim_before_restart(false);
10023
+ do_test_partial_claim_before_restart(true);
10024
+ }
10025
+
9846
10026
/// The possible events which may trigger a `max_dust_htlc_exposure` breach
9847
10027
#[derive(Clone, Copy, PartialEq)]
9848
10028
enum ExposureEvent {
0 commit comments