@@ -4553,7 +4553,7 @@ where
4553
4553
for htlc in sources. drain ( ..) {
4554
4554
if let Err ( ( pk, err) ) = self . claim_funds_from_hop (
4555
4555
htlc. prev_hop , payment_preimage,
4556
- |_| Some ( MonitorUpdateCompletionAction :: PaymentClaimed { payment_hash } ) )
4556
+ |_| Some ( MonitorUpdateCompletionAction :: PaymentClaimed { payment_hash } ) , false )
4557
4557
{
4558
4558
if let msgs:: ErrorAction :: IgnoreError = err. err . action {
4559
4559
// We got a temporary failure updating monitor, but will claim the
@@ -4583,7 +4583,7 @@ where
4583
4583
}
4584
4584
4585
4585
fn claim_funds_from_hop < ComplFunc : FnOnce ( Option < u64 > ) -> Option < MonitorUpdateCompletionAction > > ( & self ,
4586
- prev_hop : HTLCPreviousHopData , payment_preimage : PaymentPreimage , completion_action : ComplFunc )
4586
+ prev_hop : HTLCPreviousHopData , payment_preimage : PaymentPreimage , completion_action : ComplFunc , during_init : bool )
4587
4587
-> Result < ( ) , ( PublicKey , MsgHandleErrInternal ) > {
4588
4588
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
4589
4589
@@ -4613,16 +4613,28 @@ where
4613
4613
log_bytes!( chan_id) , action) ;
4614
4614
peer_state. monitor_update_blocked_actions . entry ( chan_id) . or_insert ( Vec :: new ( ) ) . push ( action) ;
4615
4615
}
4616
- let update_id = monitor_update. update_id ;
4617
- let update_res = self . chain_monitor . update_channel ( prev_hop. outpoint , monitor_update) ;
4618
- let res = handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock,
4619
- peer_state, per_peer_state, chan) ;
4620
- if let Err ( e) = res {
4621
- // TODO: This is a *critical* error - we probably updated the outbound edge
4622
- // of the HTLC's monitor with a preimage. We should retry this monitor
4623
- // update over and over again until morale improves.
4624
- log_error ! ( self . logger, "Failed to update channel monitor with preimage {:?}" , payment_preimage) ;
4625
- return Err ( ( counterparty_node_id, e) ) ;
4616
+ if !during_init {
4617
+ let update_id = monitor_update. update_id ;
4618
+ let update_res = self . chain_monitor . update_channel ( prev_hop. outpoint , monitor_update) ;
4619
+ let res = handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock,
4620
+ peer_state, per_peer_state, chan) ;
4621
+ if let Err ( e) = res {
4622
+ // TODO: This is a *critical* error - we probably updated the outbound edge
4623
+ // of the HTLC's monitor with a preimage. We should retry this monitor
4624
+ // update over and over again until morale improves.
4625
+ log_error ! ( self . logger, "Failed to update channel monitor with preimage {:?}" , payment_preimage) ;
4626
+ return Err ( ( counterparty_node_id, e) ) ;
4627
+ }
4628
+ } else {
4629
+ // If we're running during init we cannot update a monitor directly -
4630
+ // they probably haven't actually been loaded yet. Instead, push the
4631
+ // monitor update as a background event.
4632
+ self . pending_background_events . lock ( ) . unwrap ( ) . push (
4633
+ BackgroundEvent :: MonitorUpdateRegeneratedOnStartup {
4634
+ counterparty_node_id,
4635
+ funding_txo : prev_hop. outpoint ,
4636
+ update : monitor_update. clone ( ) ,
4637
+ } ) ;
4626
4638
}
4627
4639
}
4628
4640
return Ok ( ( ) ) ;
@@ -4635,16 +4647,34 @@ where
4635
4647
payment_preimage,
4636
4648
} ] ,
4637
4649
} ;
4638
- // We update the ChannelMonitor on the backward link, after
4639
- // receiving an `update_fulfill_htlc` from the forward link.
4640
- let update_res = self . chain_monitor . update_channel ( prev_hop. outpoint , & preimage_update) ;
4641
- if update_res != ChannelMonitorUpdateStatus :: Completed {
4642
- // TODO: This needs to be handled somehow - if we receive a monitor update
4643
- // with a preimage we *must* somehow manage to propagate it to the upstream
4644
- // channel, or we must have an ability to receive the same event and try
4645
- // again on restart.
4646
- log_error ! ( self . logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}" ,
4647
- payment_preimage, update_res) ;
4650
+
4651
+ if !during_init {
4652
+ // We update the ChannelMonitor on the backward link, after
4653
+ // receiving an `update_fulfill_htlc` from the forward link.
4654
+ let update_res = self . chain_monitor . update_channel ( prev_hop. outpoint , & preimage_update) ;
4655
+ if update_res != ChannelMonitorUpdateStatus :: Completed {
4656
+ // TODO: This needs to be handled somehow - if we receive a monitor update
4657
+ // with a preimage we *must* somehow manage to propagate it to the upstream
4658
+ // channel, or we must have an ability to receive the same event and try
4659
+ // again on restart.
4660
+ log_error ! ( self . logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}" ,
4661
+ payment_preimage, update_res) ;
4662
+ }
4663
+ } else {
4664
+ // If we're running during init we cannot update a monitor directly - they probably
4665
+ // haven't actually been loaded yet. Instead, push the monitor update as a background
4666
+ // event.
4667
+ // Note that while its safe to use `ClosingMonitorUpdateRegeneratedOnStartup` here (the
4668
+ // channel is already closed) we need to ultimately handle the monitor update
4669
+ // completion action only after we've completed the monitor update. This is the only
4670
+ // way to guarantee this update *will* be regenerated on startup (otherwise if this was
4671
+ // from a forwarded HTLC the downstream preimage may be deleted before we claim
4672
+ // upstream). Thus, we need to transition to some new `BackgroundEvent` type which will
4673
+ // complete the monitor update completion action from `completion_action`.
4674
+ self . pending_background_events . lock ( ) . unwrap ( ) . push (
4675
+ BackgroundEvent :: ClosingMonitorUpdateRegeneratedOnStartup ( (
4676
+ prev_hop. outpoint , preimage_update,
4677
+ ) ) ) ;
4648
4678
}
4649
4679
// Note that we do process the completion action here. This totally could be a
4650
4680
// duplicate claim, but we have no way of knowing without interrogating the
@@ -4659,9 +4689,10 @@ where
4659
4689
self . pending_outbound_payments . finalize_claims ( sources, & self . pending_events ) ;
4660
4690
}
4661
4691
4662
- fn claim_funds_internal ( & self , source : HTLCSource , payment_preimage : PaymentPreimage , forwarded_htlc_value_msat : Option < u64 > , from_onchain : bool , next_channel_id : [ u8 ; 32 ] ) {
4692
+ fn claim_funds_internal ( & self , source : HTLCSource , payment_preimage : PaymentPreimage , forwarded_htlc_value_msat : Option < u64 > , from_onchain : bool , next_channel_id : [ u8 ; 32 ] , during_init : bool ) {
4663
4693
match source {
4664
4694
HTLCSource :: OutboundRoute { session_priv, payment_id, path, .. } => {
4695
+ debug_assert ! ( !during_init) ;
4665
4696
self . pending_outbound_payments . claim_htlc ( payment_id, payment_preimage, session_priv, path, from_onchain, & self . pending_events , & self . logger ) ;
4666
4697
} ,
4667
4698
HTLCSource :: PreviousHopData ( hop_data) => {
@@ -4684,7 +4715,7 @@ where
4684
4715
downstream_counterparty_and_funding_outpoint : None ,
4685
4716
} )
4686
4717
} else { None }
4687
- } ) ;
4718
+ } , during_init ) ;
4688
4719
if let Err ( ( pk, err) ) = res {
4689
4720
let result: Result < ( ) , _ > = Err ( err) ;
4690
4721
let _ = handle_error ! ( self , result, pk) ;
@@ -5416,7 +5447,7 @@ where
5416
5447
hash_map:: Entry :: Vacant ( _) => return Err ( MsgHandleErrInternal :: send_err_msg_no_close ( format ! ( "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}" , counterparty_node_id) , msg. channel_id ) )
5417
5448
}
5418
5449
} ;
5419
- self . claim_funds_internal ( htlc_source, msg. payment_preimage . clone ( ) , Some ( forwarded_htlc_value) , false , msg. channel_id ) ;
5450
+ self . claim_funds_internal ( htlc_source, msg. payment_preimage . clone ( ) , Some ( forwarded_htlc_value) , false , msg. channel_id , false ) ;
5420
5451
Ok ( ( ) )
5421
5452
}
5422
5453
@@ -5790,7 +5821,7 @@ where
5790
5821
MonitorEvent :: HTLCEvent ( htlc_update) => {
5791
5822
if let Some ( preimage) = htlc_update. payment_preimage {
5792
5823
log_trace ! ( self . logger, "Claiming HTLC with preimage {} from our monitor" , log_bytes!( preimage. 0 ) ) ;
5793
- self . claim_funds_internal ( htlc_update. source , preimage, htlc_update. htlc_value_satoshis . map ( |v| v * 1000 ) , true , funding_outpoint. to_channel_id ( ) ) ;
5824
+ self . claim_funds_internal ( htlc_update. source , preimage, htlc_update. htlc_value_satoshis . map ( |v| v * 1000 ) , true , funding_outpoint. to_channel_id ( ) , false ) ;
5794
5825
} else {
5795
5826
log_trace ! ( self . logger, "Failing HTLC with hash {} from our monitor" , log_bytes!( htlc_update. payment_hash. 0 ) ) ;
5796
5827
let receiver = HTLCDestination :: NextHopChannel { node_id : counterparty_node_id, channel_id : funding_outpoint. to_channel_id ( ) } ;
@@ -8295,6 +8326,11 @@ where
8295
8326
retry_lock : Mutex :: new ( ( ) )
8296
8327
} ;
8297
8328
8329
+ // If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we
8330
+ // should ensure we try them again on the inbound edge. We put them here and do so after we
8331
+ // have a fully-constructed `ChannelManager` at the end.
8332
+ let mut pending_claims_to_replay = Vec :: new ( ) ;
8333
+
8298
8334
{
8299
8335
// If we're tracking pending payments, ensure we haven't lost any by looking at the
8300
8336
// ChannelMonitor data for any channels for which we do not have authorative state
@@ -8305,7 +8341,8 @@ where
8305
8341
// We only rebuild the pending payments map if we were most recently serialized by
8306
8342
// 0.0.102+
8307
8343
for ( _, monitor) in args. channel_monitors . iter ( ) {
8308
- if id_to_peer. get ( & monitor. get_funding_txo ( ) . 0 . to_channel_id ( ) ) . is_none ( ) {
8344
+ let counterparty_opt = id_to_peer. get ( & monitor. get_funding_txo ( ) . 0 . to_channel_id ( ) ) ;
8345
+ if counterparty_opt. is_none ( ) {
8309
8346
for ( htlc_source, ( htlc, _) ) in monitor. get_pending_or_resolved_outbound_htlcs ( ) {
8310
8347
if let HTLCSource :: OutboundRoute { payment_id, session_priv, path, .. } = htlc_source {
8311
8348
if path. hops . is_empty ( ) {
@@ -8399,6 +8436,30 @@ where
8399
8436
}
8400
8437
}
8401
8438
}
8439
+
8440
+ // Whether the downstream channel was closed or not, try to re-apply any payment
8441
+ // preimages from it which may be needed in upstream channels for forwarded
8442
+ // payments.
8443
+ let outbound_claimed_htlcs_iter = monitor. get_all_current_outbound_htlcs ( )
8444
+ . into_iter ( )
8445
+ . filter_map ( |( htlc_source, ( htlc, preimage_opt) ) | {
8446
+ if let HTLCSource :: PreviousHopData ( _) = htlc_source {
8447
+ if let Some ( payment_preimage) = preimage_opt {
8448
+ Some ( ( htlc_source, payment_preimage, htlc. amount_msat ,
8449
+ counterparty_opt. is_none ( ) , // i.e. the downstream chan is closed
8450
+ monitor. get_funding_txo ( ) . 0 . to_channel_id ( ) ) )
8451
+ } else { None }
8452
+ } else {
8453
+ // If it was an outbound payment, we've handled it above - if a preimage
8454
+ // came in and we persisted the `ChannelManager` we either handled it and
8455
+ // are good to go or the channel force-closed - we don't have to handle the
8456
+ // channel still live case here.
8457
+ None
8458
+ }
8459
+ } ) ;
8460
+ for tuple in outbound_claimed_htlcs_iter {
8461
+ pending_claims_to_replay. push ( tuple) ;
8462
+ }
8402
8463
}
8403
8464
}
8404
8465
@@ -8650,6 +8711,11 @@ where
8650
8711
channel_manager. fail_htlc_backwards_internal ( & source, & payment_hash, & reason, receiver) ;
8651
8712
}
8652
8713
8714
+ for ( source, preimage, downstream_value, downstream_closed, downstream_chan_id) in pending_claims_to_replay {
8715
+ channel_manager. claim_funds_internal ( source, preimage, Some ( downstream_value) ,
8716
+ downstream_closed, downstream_chan_id, true ) ;
8717
+ }
8718
+
8653
8719
//TODO: Broadcast channel update for closed channels, but only after we've made a
8654
8720
//connection or two.
8655
8721
0 commit comments