@@ -3355,8 +3355,22 @@ macro_rules! emit_initial_channel_ready_event {
3355
3355
};
3356
3356
}
3357
3357
3358
+ /// Handles the completion steps for when a [`ChannelMonitorUpdate`] is applied to a live channel.
3359
+ ///
3360
+ /// You should not add new direct calls to this, generally, rather rely on
3361
+ /// `handle_new_monitor_update` or [`ChannelManager::channel_monitor_updated`] to call it for you.
3362
+ ///
3363
+ /// Requires that `$chan.blocked_monitor_updates_pending() == 0` and the in-flight monitor update
3364
+ /// set for this channel is empty!
3358
3365
macro_rules! handle_monitor_update_completion {
3359
3366
($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
3367
+ #[cfg(debug_assertions)]
3368
+ {
3369
+ let in_flight_updates =
3370
+ $peer_state.in_flight_monitor_updates.get(&$chan.context.channel_id());
3371
+ assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true));
3372
+ assert_eq!($chan.blocked_monitor_updates_pending(), 0);
3373
+ }
3360
3374
let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3361
3375
let mut updates = $chan.monitor_updating_restored(&&logger,
3362
3376
&$self.node_signer, $self.chain_hash, &$self.default_configuration,
@@ -4271,19 +4285,7 @@ where
4271
4285
// TODO: If we do the `in_flight_monitor_updates.is_empty()` check in
4272
4286
// `locked_close_channel` we can skip the locks here.
4273
4287
if shutdown_res.channel_funding_txo.is_some() {
4274
- let per_peer_state = self.per_peer_state.read().unwrap();
4275
- if let Some(peer_state_mtx) = per_peer_state.get(&shutdown_res.counterparty_node_id) {
4276
- let mut peer_state = peer_state_mtx.lock().unwrap();
4277
- if peer_state.in_flight_monitor_updates.get(&shutdown_res.channel_id).map(|(_, updates)| updates.is_empty()).unwrap_or(true) {
4278
- let update_actions = peer_state.monitor_update_blocked_actions
4279
- .remove(&shutdown_res.channel_id).unwrap_or(Vec::new());
4280
-
4281
- mem::drop(peer_state);
4282
- mem::drop(per_peer_state);
4283
-
4284
- self.handle_monitor_update_completion_actions(update_actions);
4285
- }
4286
- }
4288
+ self.channel_monitor_updated(&shutdown_res.channel_id, None, &shutdown_res.counterparty_node_id);
4287
4289
}
4288
4290
}
4289
4291
let mut shutdown_results: Vec<(Result<Infallible, _>, _)> = Vec::new();
@@ -7160,25 +7162,7 @@ where
7160
7162
self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
7161
7163
},
7162
7164
BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
7163
- let per_peer_state = self.per_peer_state.read().unwrap();
7164
- if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
7165
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
7166
- let peer_state = &mut *peer_state_lock;
7167
- if let Some(chan) = peer_state.channel_by_id
7168
- .get_mut(&channel_id)
7169
- .and_then(Channel::as_funded_mut)
7170
- {
7171
- if chan.blocked_monitor_updates_pending() == 0 {
7172
- handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
7173
- }
7174
- } else {
7175
- let update_actions = peer_state.monitor_update_blocked_actions
7176
- .remove(&channel_id).unwrap_or(Vec::new());
7177
- mem::drop(peer_state_lock);
7178
- mem::drop(per_peer_state);
7179
- self.handle_monitor_update_completion_actions(update_actions);
7180
- }
7181
- }
7165
+ self.channel_monitor_updated(&channel_id, None, &counterparty_node_id);
7182
7166
},
7183
7167
}
7184
7168
}
@@ -8700,7 +8684,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
8700
8684
}
8701
8685
8702
8686
#[rustfmt::skip]
8703
- fn channel_monitor_updated(&self, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: &PublicKey) {
8687
+ fn channel_monitor_updated(&self, channel_id: &ChannelId, highest_applied_update_id: Option< u64> , counterparty_node_id: &PublicKey) {
8704
8688
debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
8705
8689
8706
8690
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -8710,16 +8694,33 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
8710
8694
peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
8711
8695
let peer_state = &mut *peer_state_lock;
8712
8696
8697
+ let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*channel_id), None);
8713
8698
let remaining_in_flight =
8714
8699
if let Some((_, pending)) = peer_state.in_flight_monitor_updates.get_mut(channel_id) {
8715
- pending.retain(|upd| upd.update_id > highest_applied_update_id);
8700
+ if let Some(highest_applied_update_id) = highest_applied_update_id {
8701
+ pending.retain(|upd| upd.update_id > highest_applied_update_id);
8702
+ log_trace!(
8703
+ logger,
8704
+ "ChannelMonitor updated to {highest_applied_update_id}. {} pending in-flight updates.",
8705
+ pending.len()
8706
+ );
8707
+ } else if let Some(update) = pending.get(0) {
8708
+ log_trace!(
8709
+ logger,
8710
+ "ChannelMonitor updated to {}. {} pending in-flight updates.",
8711
+ update.update_id - 1,
8712
+ pending.len()
8713
+ );
8714
+ } else {
8715
+ log_trace!(
8716
+ logger,
8717
+ "ChannelMonitor updated. {} pending in-flight updates.",
8718
+ pending.len()
8719
+ );
8720
+ }
8716
8721
pending.len()
8717
8722
} else { 0 };
8718
8723
8719
- let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*channel_id), None);
8720
- log_trace!(logger, "ChannelMonitor updated to {}. {} pending in-flight updates.",
8721
- highest_applied_update_id, remaining_in_flight);
8722
-
8723
8724
if remaining_in_flight != 0 {
8724
8725
return;
8725
8726
}
@@ -10927,7 +10928,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
10927
10928
MonitorEvent::Completed { channel_id, monitor_update_id, .. } => {
10928
10929
self.channel_monitor_updated(
10929
10930
&channel_id,
10930
- monitor_update_id,
10931
+ Some( monitor_update_id) ,
10931
10932
&counterparty_node_id,
10932
10933
);
10933
10934
},
@@ -13150,38 +13151,7 @@ where
13150
13151
13151
13152
#[cfg(splicing)]
13152
13153
for (counterparty_node_id, channel_id) in to_process_monitor_update_actions {
13153
- let per_peer_state = self.per_peer_state.read().unwrap();
13154
- if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
13155
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
13156
- let peer_state = &mut *peer_state_lock;
13157
- let has_in_flight_updates = peer_state
13158
- .in_flight_monitor_updates
13159
- .get(&channel_id)
13160
- .map(|in_flight_updates| !in_flight_updates.1.is_empty())
13161
- .unwrap_or(false);
13162
- if let Some(chan) = peer_state.channel_by_id
13163
- .get_mut(&channel_id)
13164
- .and_then(Channel::as_funded_mut)
13165
- {
13166
- if !has_in_flight_updates && chan.blocked_monitor_updates_pending() == 0 {
13167
- handle_monitor_update_completion!(
13168
- self,
13169
- peer_state_lock,
13170
- peer_state,
13171
- per_peer_state,
13172
- chan
13173
- );
13174
- }
13175
- } else {
13176
- let update_actions = peer_state
13177
- .monitor_update_blocked_actions
13178
- .remove(&channel_id)
13179
- .unwrap_or(Vec::new());
13180
- mem::drop(peer_state_lock);
13181
- mem::drop(per_peer_state);
13182
- self.handle_monitor_update_completion_actions(update_actions);
13183
- }
13184
- }
13154
+ self.channel_monitor_updated(&channel_id, None, &counterparty_node_id);
13185
13155
}
13186
13156
13187
13157
if let Some(height) = height_opt {
0 commit comments