@@ -509,14 +509,18 @@ enum BackgroundEvent {
509
509
/// Note that any such events are lost on shutdown, so in general they must be updates which
510
510
/// are regenerated on startup.
511
511
ClosingMonitorUpdateRegeneratedOnStartup ( ( OutPoint , ChannelMonitorUpdate ) ) ,
512
- /// Handle a ChannelMonitorUpdate.
512
+ /// Handle a ChannelMonitorUpdate which may or may not close the channel. In general this
513
+ /// should be used rather than [`Self::ClosingMonitorUpdateRegeneratedOnStartup`], however in
514
+ /// cases where the `counterparty_node_id` is not available as the channel has closed from a
515
+ /// [`ChannelMonitor`] error the other variant is acceptable.
513
516
///
514
517
/// Note that any such events are lost on shutdown, so in general they must be updates which
515
518
/// are regenerated on startup.
516
- MonitorUpdateRegeneratedOnStartup ( (
517
- /// The counterparty node_id for the channel being updated.
518
- PublicKey ,
519
- OutPoint , ChannelMonitorUpdate ) ) ,
519
+ MonitorUpdateRegeneratedOnStartup {
520
+ counterparty_node_id : PublicKey ,
521
+ funding_txo : OutPoint ,
522
+ update : ChannelMonitorUpdate
523
+ } ,
520
524
}
521
525
522
526
#[ derive( Debug ) ]
@@ -3792,7 +3796,7 @@ where
3792
3796
// monitor updating completing.
3793
3797
let _ = self . chain_monitor . update_channel ( funding_txo, & update) ;
3794
3798
} ,
3795
- BackgroundEvent :: MonitorUpdateRegeneratedOnStartup ( ( _ , funding_txo, update) ) => {
3799
+ BackgroundEvent :: MonitorUpdateRegeneratedOnStartup { funding_txo, update, .. } => {
3796
3800
// The channel has already been closed, so no use bothering to care about the
3797
3801
// monitor updating completing.
3798
3802
let _ = self . chain_monitor . update_channel ( funding_txo, & update) ;
@@ -5713,8 +5717,9 @@ where
5713
5717
assert ! ( should_broadcast) ;
5714
5718
} else { unreachable ! ( ) ; }
5715
5719
self . pending_background_events . lock ( ) . unwrap ( ) . push (
5716
- BackgroundEvent :: MonitorUpdateRegeneratedOnStartup (
5717
- ( counterparty_node_id, funding_txo, update) ) ) ;
5720
+ BackgroundEvent :: MonitorUpdateRegeneratedOnStartup {
5721
+ counterparty_node_id, funding_txo, update
5722
+ } ) ;
5718
5723
}
5719
5724
self . finish_force_close_channel ( failure) ;
5720
5725
}
@@ -7787,8 +7792,10 @@ where
7787
7792
log_error ! ( args. logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}." ,
7788
7793
log_bytes!( channel. channel_id( ) ) , monitor. get_latest_update_id( ) , channel. get_latest_monitor_update_id( ) ) ;
7789
7794
let ( monitor_update, mut new_failed_htlcs) = channel. force_shutdown ( true ) ;
7790
- if let Some ( monitor_update) = monitor_update {
7791
- pending_background_events. push ( BackgroundEvent :: MonitorUpdateRegeneratedOnStartup ( monitor_update) ) ;
7795
+ if let Some ( ( counterparty_node_id, funding_txo, update) ) = monitor_update {
7796
+ pending_background_events. push ( BackgroundEvent :: MonitorUpdateRegeneratedOnStartup {
7797
+ counterparty_node_id, funding_txo, update
7798
+ } ) ;
7792
7799
}
7793
7800
failed_htlcs. append ( & mut new_failed_htlcs) ;
7794
7801
channel_closures. push_back ( ( events:: Event :: ChannelClosed {
0 commit comments