Skip to content

Commit 81ae784

Browse files
authored
Merge pull request lightningdevkit#4170 from TheBlueMatt/2025-10-less-monitor-update-macro
Extend lightningdevkit#4138 removing one more macro
2 parents 0eec30a + faf7238 commit 81ae784

File tree

1 file changed

+94
-120
lines changed

1 file changed

+94
-120
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 94 additions & 120 deletions
Original file line numberDiff line numberDiff line change
@@ -3300,8 +3300,9 @@ macro_rules! locked_close_channel {
33003300
}};
33013301
($self: ident, $peer_state: expr, $funded_chan: expr, $shutdown_res_mut: expr, FUNDED) => {{
33023302
if let Some((_, funding_txo, _, update)) = $shutdown_res_mut.monitor_update.take() {
3303-
handle_new_monitor_update_actions_deferred!($self, funding_txo, update, $peer_state,
3304-
$funded_chan.context);
3303+
handle_new_monitor_update_locked_actions_handled_by_caller!(
3304+
$self, funding_txo, update, $peer_state, $funded_chan.context
3305+
);
33053306
}
33063307
// If there's a possibility that we need to generate further monitor updates for this
33073308
// channel, we need to store the last update_id of it. However, we don't want to insert
@@ -3707,40 +3708,82 @@ macro_rules! handle_initial_monitor {
37073708
};
37083709
}
37093710

3711+
fn handle_new_monitor_update_internal<CM: AChannelManager, LG: Logger>(
3712+
cm: &CM,
3713+
in_flight_monitor_updates: &mut BTreeMap<ChannelId, (OutPoint, Vec<ChannelMonitorUpdate>)>,
3714+
channel_id: ChannelId, funding_txo: OutPoint, counterparty_node_id: PublicKey,
3715+
new_update: ChannelMonitorUpdate, logger: LG,
3716+
) -> (bool, bool) {
3717+
let in_flight_updates = &mut in_flight_monitor_updates
3718+
.entry(channel_id)
3719+
.or_insert_with(|| (funding_txo, Vec::new()))
3720+
.1;
3721+
// During startup, we push monitor updates as background events through to here in
3722+
// order to replay updates that were in-flight when we shut down. Thus, we have to
3723+
// filter for uniqueness here.
3724+
let update_idx =
3725+
in_flight_updates.iter().position(|upd| upd == &new_update).unwrap_or_else(|| {
3726+
in_flight_updates.push(new_update);
3727+
in_flight_updates.len() - 1
3728+
});
3729+
3730+
if cm.get_cm().background_events_processed_since_startup.load(Ordering::Acquire) {
3731+
let update_res =
3732+
cm.get_cm().chain_monitor.update_channel(channel_id, &in_flight_updates[update_idx]);
3733+
let update_completed = handle_monitor_update_res(cm, update_res, channel_id, logger);
3734+
if update_completed {
3735+
let _ = in_flight_updates.remove(update_idx);
3736+
}
3737+
(update_completed, update_completed && in_flight_updates.is_empty())
3738+
} else {
3739+
// We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we
3740+
// fail to persist it. This is a fairly safe assumption, however, since anything we do
3741+
// during the startup sequence should be replayed exactly if we immediately crash.
3742+
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
3743+
counterparty_node_id,
3744+
funding_txo,
3745+
channel_id,
3746+
update: in_flight_updates[update_idx].clone(),
3747+
};
3748+
// We want to track the in-flight update both in `in_flight_monitor_updates` and in
3749+
// `pending_background_events` to avoid a race condition during
3750+
// `pending_background_events` processing where we complete one
3751+
// `ChannelMonitorUpdate` (but there are more pending as background events) but we
3752+
// conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to
3753+
// run post-completion actions.
3754+
// We could work around that with some effort, but its simpler to just track updates
3755+
// twice.
3756+
cm.get_cm().pending_background_events.lock().unwrap().push(event);
3757+
(false, false)
3758+
}
3759+
}
3760+
37103761
macro_rules! handle_post_close_monitor_update {
37113762
(
37123763
$self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
37133764
$per_peer_state_lock: expr, $counterparty_node_id: expr, $channel_id: expr
37143765
) => {{
3715-
let logger =
3716-
WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None);
3717-
let in_flight_updates;
3718-
let idx;
3719-
handle_new_monitor_update_internal!(
3766+
let (update_completed, all_updates_complete) = handle_new_monitor_update_internal(
37203767
$self,
3721-
$funding_txo,
3722-
$update,
3723-
$peer_state,
3724-
logger,
3768+
&mut $peer_state.in_flight_monitor_updates,
37253769
$channel_id,
3770+
$funding_txo,
37263771
$counterparty_node_id,
3727-
in_flight_updates,
3728-
idx,
3729-
{
3730-
let _ = in_flight_updates.remove(idx);
3731-
if in_flight_updates.is_empty() {
3732-
let update_actions = $peer_state
3733-
.monitor_update_blocked_actions
3734-
.remove(&$channel_id)
3735-
.unwrap_or(Vec::new());
3772+
$update,
3773+
WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None),
3774+
);
3775+
if all_updates_complete {
3776+
let update_actions = $peer_state
3777+
.monitor_update_blocked_actions
3778+
.remove(&$channel_id)
3779+
.unwrap_or(Vec::new());
37363780

3737-
mem::drop($peer_state_lock);
3738-
mem::drop($per_peer_state_lock);
3781+
mem::drop($peer_state_lock);
3782+
mem::drop($per_peer_state_lock);
37393783

3740-
$self.handle_monitor_update_completion_actions(update_actions);
3741-
}
3742-
}
3743-
)
3784+
$self.handle_monitor_update_completion_actions(update_actions);
3785+
}
3786+
update_completed
37443787
}};
37453788
}
37463789

@@ -3753,80 +3796,20 @@ macro_rules! handle_post_close_monitor_update {
37533796
/// drop the aforementioned peer state locks at a given callsite. In this situation, use this macro
37543797
/// to apply the monitor update immediately and handle the monitor update completion actions at a
37553798
/// later time.
3756-
macro_rules! handle_new_monitor_update_actions_deferred {
3799+
macro_rules! handle_new_monitor_update_locked_actions_handled_by_caller {
37573800
(
37583801
$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $chan_context: expr
37593802
) => {{
3760-
let logger = WithChannelContext::from(&$self.logger, &$chan_context, None);
3761-
let chan_id = $chan_context.channel_id();
3762-
let counterparty_node_id = $chan_context.get_counterparty_node_id();
3763-
let in_flight_updates;
3764-
let idx;
3765-
handle_new_monitor_update_internal!(
3803+
let (update_completed, _all_updates_complete) = handle_new_monitor_update_internal(
37663804
$self,
3805+
&mut $peer_state.in_flight_monitor_updates,
3806+
$chan_context.channel_id(),
37673807
$funding_txo,
3808+
$chan_context.get_counterparty_node_id(),
37683809
$update,
3769-
$peer_state,
3770-
logger,
3771-
chan_id,
3772-
counterparty_node_id,
3773-
in_flight_updates,
3774-
idx,
3775-
{
3776-
let _ = in_flight_updates.remove(idx);
3777-
}
3778-
)
3779-
}};
3780-
}
3781-
3782-
macro_rules! handle_new_monitor_update_internal {
3783-
(
3784-
$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $logger: expr,
3785-
$chan_id: expr, $counterparty_node_id: expr, $in_flight_updates: ident, $update_idx: ident,
3786-
$completed: expr
3787-
) => {{
3788-
$in_flight_updates = &mut $peer_state
3789-
.in_flight_monitor_updates
3790-
.entry($chan_id)
3791-
.or_insert_with(|| ($funding_txo, Vec::new()))
3792-
.1;
3793-
// During startup, we push monitor updates as background events through to here in
3794-
// order to replay updates that were in-flight when we shut down. Thus, we have to
3795-
// filter for uniqueness here.
3796-
$update_idx =
3797-
$in_flight_updates.iter().position(|upd| upd == &$update).unwrap_or_else(|| {
3798-
$in_flight_updates.push($update);
3799-
$in_flight_updates.len() - 1
3800-
});
3801-
if $self.background_events_processed_since_startup.load(Ordering::Acquire) {
3802-
let update_res =
3803-
$self.chain_monitor.update_channel($chan_id, &$in_flight_updates[$update_idx]);
3804-
let update_completed = handle_monitor_update_res($self, update_res, $chan_id, $logger);
3805-
if update_completed {
3806-
$completed;
3807-
}
3808-
update_completed
3809-
} else {
3810-
// We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we
3811-
// fail to persist it. This is a fairly safe assumption, however, since anything we do
3812-
// during the startup sequence should be replayed exactly if we immediately crash.
3813-
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
3814-
counterparty_node_id: $counterparty_node_id,
3815-
funding_txo: $funding_txo,
3816-
channel_id: $chan_id,
3817-
update: $in_flight_updates[$update_idx].clone(),
3818-
};
3819-
// We want to track the in-flight update both in `in_flight_monitor_updates` and in
3820-
// `pending_background_events` to avoid a race condition during
3821-
// `pending_background_events` processing where we complete one
3822-
// `ChannelMonitorUpdate` (but there are more pending as background events) but we
3823-
// conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to
3824-
// run post-completion actions.
3825-
// We could work around that with some effort, but its simpler to just track updates
3826-
// twice.
3827-
$self.pending_background_events.lock().unwrap().push(event);
3828-
false
3829-
}
3810+
WithChannelContext::from(&$self.logger, &$chan_context, None),
3811+
);
3812+
update_completed
38303813
}};
38313814
}
38323815

@@ -3835,34 +3818,25 @@ macro_rules! handle_new_monitor_update {
38353818
$self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
38363819
$per_peer_state_lock: expr, $chan: expr
38373820
) => {{
3838-
let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3839-
let chan_id = $chan.context.channel_id();
3840-
let counterparty_node_id = $chan.context.get_counterparty_node_id();
3841-
let in_flight_updates;
3842-
let idx;
3843-
handle_new_monitor_update_internal!(
3821+
let (update_completed, all_updates_complete) = handle_new_monitor_update_internal(
38443822
$self,
3823+
&mut $peer_state.in_flight_monitor_updates,
3824+
$chan.context.channel_id(),
38453825
$funding_txo,
3826+
$chan.context.get_counterparty_node_id(),
38463827
$update,
3847-
$peer_state,
3848-
logger,
3849-
chan_id,
3850-
counterparty_node_id,
3851-
in_flight_updates,
3852-
idx,
3853-
{
3854-
let _ = in_flight_updates.remove(idx);
3855-
if in_flight_updates.is_empty() && $chan.blocked_monitor_updates_pending() == 0 {
3856-
handle_monitor_update_completion!(
3857-
$self,
3858-
$peer_state_lock,
3859-
$peer_state,
3860-
$per_peer_state_lock,
3861-
$chan
3862-
);
3863-
}
3864-
}
3865-
)
3828+
WithChannelContext::from(&$self.logger, &$chan.context, None),
3829+
);
3830+
if all_updates_complete && $chan.blocked_monitor_updates_pending() == 0 {
3831+
handle_monitor_update_completion!(
3832+
$self,
3833+
$peer_state_lock,
3834+
$peer_state,
3835+
$per_peer_state_lock,
3836+
$chan
3837+
);
3838+
}
3839+
update_completed
38663840
}};
38673841
}
38683842

@@ -14381,7 +14355,7 @@ where
1438114355
insert_short_channel_id!(short_to_chan_info, funded_channel);
1438214356

1438314357
if let Some(monitor_update) = monitor_update_opt {
14384-
handle_new_monitor_update_actions_deferred!(
14358+
handle_new_monitor_update_locked_actions_handled_by_caller!(
1438514359
self,
1438614360
funding_txo,
1438714361
monitor_update,

0 commit comments

Comments
 (0)