Skip to content

Commit 20e51fe

Browse files
authored
Merge pull request #3947 from TheBlueMatt/2025-07-simplify-mon-completion
Remove direct calls to `handle_monitor_update_completion!`
2 parents 95342dc + 8b9b078 commit 20e51fe

File tree

1 file changed

+41
-71
lines changed

1 file changed

+41
-71
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 41 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -3355,8 +3355,22 @@ macro_rules! emit_initial_channel_ready_event {
33553355
};
33563356
}
33573357

3358+
/// Handles the completion steps for when a [`ChannelMonitorUpdate`] is applied to a live channel.
3359+
///
3360+
/// You should not add new direct calls to this, generally, rather rely on
3361+
/// `handle_new_monitor_update` or [`ChannelManager::channel_monitor_updated`] to call it for you.
3362+
///
3363+
/// Requires that `$chan.blocked_monitor_updates_pending() == 0` and the in-flight monitor update
3364+
/// set for this channel is empty!
33583365
macro_rules! handle_monitor_update_completion {
33593366
($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
3367+
#[cfg(debug_assertions)]
3368+
{
3369+
let in_flight_updates =
3370+
$peer_state.in_flight_monitor_updates.get(&$chan.context.channel_id());
3371+
assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true));
3372+
assert_eq!($chan.blocked_monitor_updates_pending(), 0);
3373+
}
33603374
let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
33613375
let mut updates = $chan.monitor_updating_restored(&&logger,
33623376
&$self.node_signer, $self.chain_hash, &$self.default_configuration,
@@ -4271,19 +4285,7 @@ where
42714285
// TODO: If we do the `in_flight_monitor_updates.is_empty()` check in
42724286
// `locked_close_channel` we can skip the locks here.
42734287
if shutdown_res.channel_funding_txo.is_some() {
4274-
let per_peer_state = self.per_peer_state.read().unwrap();
4275-
if let Some(peer_state_mtx) = per_peer_state.get(&shutdown_res.counterparty_node_id) {
4276-
let mut peer_state = peer_state_mtx.lock().unwrap();
4277-
if peer_state.in_flight_monitor_updates.get(&shutdown_res.channel_id).map(|(_, updates)| updates.is_empty()).unwrap_or(true) {
4278-
let update_actions = peer_state.monitor_update_blocked_actions
4279-
.remove(&shutdown_res.channel_id).unwrap_or(Vec::new());
4280-
4281-
mem::drop(peer_state);
4282-
mem::drop(per_peer_state);
4283-
4284-
self.handle_monitor_update_completion_actions(update_actions);
4285-
}
4286-
}
4288+
self.channel_monitor_updated(&shutdown_res.channel_id, None, &shutdown_res.counterparty_node_id);
42874289
}
42884290
}
42894291
let mut shutdown_results: Vec<(Result<Infallible, _>, _)> = Vec::new();
@@ -7160,25 +7162,7 @@ where
71607162
self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
71617163
},
71627164
BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
7163-
let per_peer_state = self.per_peer_state.read().unwrap();
7164-
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
7165-
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
7166-
let peer_state = &mut *peer_state_lock;
7167-
if let Some(chan) = peer_state.channel_by_id
7168-
.get_mut(&channel_id)
7169-
.and_then(Channel::as_funded_mut)
7170-
{
7171-
if chan.blocked_monitor_updates_pending() == 0 {
7172-
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
7173-
}
7174-
} else {
7175-
let update_actions = peer_state.monitor_update_blocked_actions
7176-
.remove(&channel_id).unwrap_or(Vec::new());
7177-
mem::drop(peer_state_lock);
7178-
mem::drop(per_peer_state);
7179-
self.handle_monitor_update_completion_actions(update_actions);
7180-
}
7181-
}
7165+
self.channel_monitor_updated(&channel_id, None, &counterparty_node_id);
71827166
},
71837167
}
71847168
}
@@ -8700,7 +8684,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
87008684
}
87018685

87028686
#[rustfmt::skip]
8703-
fn channel_monitor_updated(&self, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: &PublicKey) {
8687+
fn channel_monitor_updated(&self, channel_id: &ChannelId, highest_applied_update_id: Option<u64>, counterparty_node_id: &PublicKey) {
87048688
debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
87058689

87068690
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -8710,16 +8694,33 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
87108694
peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
87118695
let peer_state = &mut *peer_state_lock;
87128696

8697+
let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*channel_id), None);
87138698
let remaining_in_flight =
87148699
if let Some((_, pending)) = peer_state.in_flight_monitor_updates.get_mut(channel_id) {
8715-
pending.retain(|upd| upd.update_id > highest_applied_update_id);
8700+
if let Some(highest_applied_update_id) = highest_applied_update_id {
8701+
pending.retain(|upd| upd.update_id > highest_applied_update_id);
8702+
log_trace!(
8703+
logger,
8704+
"ChannelMonitor updated to {highest_applied_update_id}. {} pending in-flight updates.",
8705+
pending.len()
8706+
);
8707+
} else if let Some(update) = pending.get(0) {
8708+
log_trace!(
8709+
logger,
8710+
"ChannelMonitor updated to {}. {} pending in-flight updates.",
8711+
update.update_id - 1,
8712+
pending.len()
8713+
);
8714+
} else {
8715+
log_trace!(
8716+
logger,
8717+
"ChannelMonitor updated. {} pending in-flight updates.",
8718+
pending.len()
8719+
);
8720+
}
87168721
pending.len()
87178722
} else { 0 };
87188723

8719-
let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*channel_id), None);
8720-
log_trace!(logger, "ChannelMonitor updated to {}. {} pending in-flight updates.",
8721-
highest_applied_update_id, remaining_in_flight);
8722-
87238724
if remaining_in_flight != 0 {
87248725
return;
87258726
}
@@ -10927,7 +10928,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1092710928
MonitorEvent::Completed { channel_id, monitor_update_id, .. } => {
1092810929
self.channel_monitor_updated(
1092910930
&channel_id,
10930-
monitor_update_id,
10931+
Some(monitor_update_id),
1093110932
&counterparty_node_id,
1093210933
);
1093310934
},
@@ -13150,38 +13151,7 @@ where
1315013151

1315113152
#[cfg(splicing)]
1315213153
for (counterparty_node_id, channel_id) in to_process_monitor_update_actions {
13153-
let per_peer_state = self.per_peer_state.read().unwrap();
13154-
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
13155-
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
13156-
let peer_state = &mut *peer_state_lock;
13157-
let has_in_flight_updates = peer_state
13158-
.in_flight_monitor_updates
13159-
.get(&channel_id)
13160-
.map(|in_flight_updates| !in_flight_updates.1.is_empty())
13161-
.unwrap_or(false);
13162-
if let Some(chan) = peer_state.channel_by_id
13163-
.get_mut(&channel_id)
13164-
.and_then(Channel::as_funded_mut)
13165-
{
13166-
if !has_in_flight_updates && chan.blocked_monitor_updates_pending() == 0 {
13167-
handle_monitor_update_completion!(
13168-
self,
13169-
peer_state_lock,
13170-
peer_state,
13171-
per_peer_state,
13172-
chan
13173-
);
13174-
}
13175-
} else {
13176-
let update_actions = peer_state
13177-
.monitor_update_blocked_actions
13178-
.remove(&channel_id)
13179-
.unwrap_or(Vec::new());
13180-
mem::drop(peer_state_lock);
13181-
mem::drop(per_peer_state);
13182-
self.handle_monitor_update_completion_actions(update_actions);
13183-
}
13184-
}
13154+
self.channel_monitor_updated(&channel_id, None, &counterparty_node_id);
1318513155
}
1318613156

1318713157
if let Some(height) = height_opt {

0 commit comments

Comments
 (0)