@@ -3339,8 +3339,22 @@ macro_rules! emit_initial_channel_ready_event {
3339
3339
};
3340
3340
}
3341
3341
3342
+ /// Handles the completion steps for when a [`ChannelMonitorUpdate`] is applied to a live channel.
3343
+ ///
3344
+ /// You should not add new direct calls to this, generally, rather rely on
3345
+ /// `handle_new_monitor_update` or [`ChannelManager::channel_monitor_updated`] to call it for you.
3346
+ ///
3347
+ /// Requires that `$chan.blocked_monitor_updates_pending() == 0` and the in-flight monitor update
3348
+ /// set for this channel is empty!
3342
3349
macro_rules! handle_monitor_update_completion {
3343
3350
($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
3351
+ #[cfg(debug_assertions)]
3352
+ {
3353
+ let in_flight_updates =
3354
+ $peer_state.in_flight_monitor_updates.get(&$chan.context.channel_id());
3355
+ assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true));
3356
+ assert_eq!($chan.blocked_monitor_updates_pending(), 0);
3357
+ }
3344
3358
let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3345
3359
let mut updates = $chan.monitor_updating_restored(&&logger,
3346
3360
&$self.node_signer, $self.chain_hash, &$self.default_configuration,
@@ -4258,19 +4272,7 @@ where
4258
4272
// TODO: If we do the `in_flight_monitor_updates.is_empty()` check in
4259
4273
// `locked_close_channel` we can skip the locks here.
4260
4274
if shutdown_res.channel_funding_txo.is_some() {
4261
- let per_peer_state = self.per_peer_state.read().unwrap();
4262
- if let Some(peer_state_mtx) = per_peer_state.get(&shutdown_res.counterparty_node_id) {
4263
- let mut peer_state = peer_state_mtx.lock().unwrap();
4264
- if peer_state.in_flight_monitor_updates.get(&shutdown_res.channel_id).map(|(_, updates)| updates.is_empty()).unwrap_or(true) {
4265
- let update_actions = peer_state.monitor_update_blocked_actions
4266
- .remove(&shutdown_res.channel_id).unwrap_or(Vec::new());
4267
-
4268
- mem::drop(peer_state);
4269
- mem::drop(per_peer_state);
4270
-
4271
- self.handle_monitor_update_completion_actions(update_actions);
4272
- }
4273
- }
4275
+ self.channel_monitor_updated(&shutdown_res.channel_id, None, &shutdown_res.counterparty_node_id);
4274
4276
}
4275
4277
}
4276
4278
let mut shutdown_results: Vec<(Result<Infallible, _>, _)> = Vec::new();
@@ -7147,25 +7149,7 @@ where
7147
7149
self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
7148
7150
},
7149
7151
BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
7150
- let per_peer_state = self.per_peer_state.read().unwrap();
7151
- if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
7152
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
7153
- let peer_state = &mut *peer_state_lock;
7154
- if let Some(chan) = peer_state.channel_by_id
7155
- .get_mut(&channel_id)
7156
- .and_then(Channel::as_funded_mut)
7157
- {
7158
- if chan.blocked_monitor_updates_pending() == 0 {
7159
- handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
7160
- }
7161
- } else {
7162
- let update_actions = peer_state.monitor_update_blocked_actions
7163
- .remove(&channel_id).unwrap_or(Vec::new());
7164
- mem::drop(peer_state_lock);
7165
- mem::drop(per_peer_state);
7166
- self.handle_monitor_update_completion_actions(update_actions);
7167
- }
7168
- }
7152
+ self.channel_monitor_updated(&channel_id, None, &counterparty_node_id);
7169
7153
},
7170
7154
}
7171
7155
}
@@ -8641,7 +8625,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
8641
8625
}
8642
8626
8643
8627
#[rustfmt::skip]
8644
- fn channel_monitor_updated(&self, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: &PublicKey) {
8628
+ fn channel_monitor_updated(&self, channel_id: &ChannelId, highest_applied_update_id: Option< u64> , counterparty_node_id: &PublicKey) {
8645
8629
debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
8646
8630
8647
8631
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -8651,16 +8635,35 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
8651
8635
peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
8652
8636
let peer_state = &mut *peer_state_lock;
8653
8637
8638
+ let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*channel_id), None);
8654
8639
let remaining_in_flight =
8655
8640
if let Some((_, pending)) = peer_state.in_flight_monitor_updates.get_mut(channel_id) {
8656
- pending.retain(|upd| upd.update_id > highest_applied_update_id);
8641
+ if let Some(highest_applied_update_id) = highest_applied_update_id {
8642
+ pending.retain(|upd| upd.update_id > highest_applied_update_id);
8643
+ log_trace!(
8644
+ logger,
8645
+ "ChannelMonitor updated to {highest_applied_update_id}. {} pending in-flight updates.",
8646
+ pending.len()
8647
+ );
8648
+ } else {
8649
+ if let Some(update) = pending.get(0) {
8650
+ log_trace!(
8651
+ logger,
8652
+ "ChannelMonitor updated to {}. {} pending in-flight updates.",
8653
+ update.update_id - 1,
8654
+ pending.len()
8655
+ );
8656
+ } else {
8657
+ log_trace!(
8658
+ logger,
8659
+ "ChannelMonitor updated. {} pending in-flight updates.",
8660
+ pending.len()
8661
+ );
8662
+ }
8663
+ }
8657
8664
pending.len()
8658
8665
} else { 0 };
8659
8666
8660
- let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*channel_id), None);
8661
- log_trace!(logger, "ChannelMonitor updated to {}. {} pending in-flight updates.",
8662
- highest_applied_update_id, remaining_in_flight);
8663
-
8664
8667
if remaining_in_flight != 0 {
8665
8668
return;
8666
8669
}
@@ -10891,7 +10894,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
10891
10894
MonitorEvent::Completed { channel_id, monitor_update_id, .. } => {
10892
10895
self.channel_monitor_updated(
10893
10896
&channel_id,
10894
- monitor_update_id,
10897
+ Some( monitor_update_id) ,
10895
10898
&counterparty_node_id,
10896
10899
);
10897
10900
},
@@ -13127,38 +13130,7 @@ where
13127
13130
13128
13131
#[cfg(splicing)]
13129
13132
for (counterparty_node_id, channel_id) in to_process_monitor_update_actions {
13130
- let per_peer_state = self.per_peer_state.read().unwrap();
13131
- if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
13132
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
13133
- let peer_state = &mut *peer_state_lock;
13134
- let has_in_flight_updates = peer_state
13135
- .in_flight_monitor_updates
13136
- .get(&channel_id)
13137
- .map(|in_flight_updates| !in_flight_updates.1.is_empty())
13138
- .unwrap_or(false);
13139
- if let Some(chan) = peer_state.channel_by_id
13140
- .get_mut(&channel_id)
13141
- .and_then(Channel::as_funded_mut)
13142
- {
13143
- if !has_in_flight_updates && chan.blocked_monitor_updates_pending() == 0 {
13144
- handle_monitor_update_completion!(
13145
- self,
13146
- peer_state_lock,
13147
- peer_state,
13148
- per_peer_state,
13149
- chan
13150
- );
13151
- }
13152
- } else {
13153
- let update_actions = peer_state
13154
- .monitor_update_blocked_actions
13155
- .remove(&channel_id)
13156
- .unwrap_or(Vec::new());
13157
- mem::drop(peer_state_lock);
13158
- mem::drop(per_peer_state);
13159
- self.handle_monitor_update_completion_actions(update_actions);
13160
- }
13161
- }
13133
+ self.channel_monitor_updated(&channel_id, None, &counterparty_node_id);
13162
13134
}
13163
13135
13164
13136
if let Some(height) = height_opt {
0 commit comments