Skip to content

Commit faf7238

Browse files
committed
Convert of handle_new_monitor_update_internal to a function
Due to lifetime limitations, our `ChanelMonitorUpdate` handling logic mostly lives in macros. The bulk of the code, though, can easily be moved into an fn, which we do here, reducing the expanded size of the `lightning` crate from 256,061 lines to 253,536 lines.
1 parent f3478a0 commit faf7238

File tree

1 file changed

+87
-98
lines changed

1 file changed

+87
-98
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 87 additions & 98 deletions
Original file line numberDiff line numberDiff line change
@@ -3696,86 +3696,82 @@ macro_rules! handle_initial_monitor {
36963696
};
36973697
}
36983698

3699-
macro_rules! handle_new_monitor_update_internal {
3700-
(
3701-
$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $logger: expr,
3702-
$chan_id: expr, $counterparty_node_id: expr, $all_completed: expr
3703-
) => {{
3704-
let in_flight_updates = &mut $peer_state
3705-
.in_flight_monitor_updates
3706-
.entry($chan_id)
3707-
.or_insert_with(|| ($funding_txo, Vec::new()))
3708-
.1;
3709-
// During startup, we push monitor updates as background events through to here in
3710-
// order to replay updates that were in-flight when we shut down. Thus, we have to
3711-
// filter for uniqueness here.
3712-
let update_idx =
3713-
in_flight_updates.iter().position(|upd| upd == &$update).unwrap_or_else(|| {
3714-
in_flight_updates.push($update);
3715-
in_flight_updates.len() - 1
3716-
});
3717-
if $self.background_events_processed_since_startup.load(Ordering::Acquire) {
3718-
let update_res =
3719-
$self.chain_monitor.update_channel($chan_id, &in_flight_updates[update_idx]);
3720-
let update_completed = handle_monitor_update_res($self, update_res, $chan_id, $logger);
3721-
if update_completed {
3722-
let _ = in_flight_updates.remove(update_idx);
3723-
if in_flight_updates.is_empty() {
3724-
$all_completed;
3725-
}
3726-
}
3727-
update_completed
3728-
} else {
3729-
// We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we
3730-
// fail to persist it. This is a fairly safe assumption, however, since anything we do
3731-
// during the startup sequence should be replayed exactly if we immediately crash.
3732-
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
3733-
counterparty_node_id: $counterparty_node_id,
3734-
funding_txo: $funding_txo,
3735-
channel_id: $chan_id,
3736-
update: in_flight_updates[update_idx].clone(),
3737-
};
3738-
// We want to track the in-flight update both in `in_flight_monitor_updates` and in
3739-
// `pending_background_events` to avoid a race condition during
3740-
// `pending_background_events` processing where we complete one
3741-
// `ChannelMonitorUpdate` (but there are more pending as background events) but we
3742-
// conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to
3743-
// run post-completion actions.
3744-
// We could work around that with some effort, but its simpler to just track updates
3745-
// twice.
3746-
$self.pending_background_events.lock().unwrap().push(event);
3747-
false
3748-
}
3749-
}};
3699+
fn handle_new_monitor_update_internal<CM: AChannelManager, LG: Logger>(
3700+
cm: &CM,
3701+
in_flight_monitor_updates: &mut BTreeMap<ChannelId, (OutPoint, Vec<ChannelMonitorUpdate>)>,
3702+
channel_id: ChannelId, funding_txo: OutPoint, counterparty_node_id: PublicKey,
3703+
new_update: ChannelMonitorUpdate, logger: LG,
3704+
) -> (bool, bool) {
3705+
let in_flight_updates = &mut in_flight_monitor_updates
3706+
.entry(channel_id)
3707+
.or_insert_with(|| (funding_txo, Vec::new()))
3708+
.1;
3709+
// During startup, we push monitor updates as background events through to here in
3710+
// order to replay updates that were in-flight when we shut down. Thus, we have to
3711+
// filter for uniqueness here.
3712+
let update_idx =
3713+
in_flight_updates.iter().position(|upd| upd == &new_update).unwrap_or_else(|| {
3714+
in_flight_updates.push(new_update);
3715+
in_flight_updates.len() - 1
3716+
});
3717+
3718+
if cm.get_cm().background_events_processed_since_startup.load(Ordering::Acquire) {
3719+
let update_res =
3720+
cm.get_cm().chain_monitor.update_channel(channel_id, &in_flight_updates[update_idx]);
3721+
let update_completed = handle_monitor_update_res(cm, update_res, channel_id, logger);
3722+
if update_completed {
3723+
let _ = in_flight_updates.remove(update_idx);
3724+
}
3725+
(update_completed, update_completed && in_flight_updates.is_empty())
3726+
} else {
3727+
// We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we
3728+
// fail to persist it. This is a fairly safe assumption, however, since anything we do
3729+
// during the startup sequence should be replayed exactly if we immediately crash.
3730+
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
3731+
counterparty_node_id,
3732+
funding_txo,
3733+
channel_id,
3734+
update: in_flight_updates[update_idx].clone(),
3735+
};
3736+
// We want to track the in-flight update both in `in_flight_monitor_updates` and in
3737+
// `pending_background_events` to avoid a race condition during
3738+
// `pending_background_events` processing where we complete one
3739+
// `ChannelMonitorUpdate` (but there are more pending as background events) but we
3740+
// conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to
3741+
// run post-completion actions.
3742+
// We could work around that with some effort, but its simpler to just track updates
3743+
// twice.
3744+
cm.get_cm().pending_background_events.lock().unwrap().push(event);
3745+
(false, false)
3746+
}
37503747
}
37513748

37523749
macro_rules! handle_post_close_monitor_update {
37533750
(
37543751
$self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
37553752
$per_peer_state_lock: expr, $counterparty_node_id: expr, $channel_id: expr
37563753
) => {{
3757-
let logger =
3758-
WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None);
3759-
handle_new_monitor_update_internal!(
3754+
let (update_completed, all_updates_complete) = handle_new_monitor_update_internal(
37603755
$self,
3761-
$funding_txo,
3762-
$update,
3763-
$peer_state,
3764-
logger,
3756+
&mut $peer_state.in_flight_monitor_updates,
37653757
$channel_id,
3758+
$funding_txo,
37663759
$counterparty_node_id,
3767-
{
3768-
let update_actions = $peer_state
3769-
.monitor_update_blocked_actions
3770-
.remove(&$channel_id)
3771-
.unwrap_or(Vec::new());
3760+
$update,
3761+
WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None),
3762+
);
3763+
if all_updates_complete {
3764+
let update_actions = $peer_state
3765+
.monitor_update_blocked_actions
3766+
.remove(&$channel_id)
3767+
.unwrap_or(Vec::new());
37723768

3773-
mem::drop($peer_state_lock);
3774-
mem::drop($per_peer_state_lock);
3769+
mem::drop($peer_state_lock);
3770+
mem::drop($per_peer_state_lock);
37753771

3776-
$self.handle_monitor_update_completion_actions(update_actions);
3777-
}
3778-
)
3772+
$self.handle_monitor_update_completion_actions(update_actions);
3773+
}
3774+
update_completed
37793775
}};
37803776
}
37813777

@@ -3792,19 +3788,16 @@ macro_rules! handle_new_monitor_update_locked_actions_handled_by_caller {
37923788
(
37933789
$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $chan_context: expr
37943790
) => {{
3795-
let logger = WithChannelContext::from(&$self.logger, &$chan_context, None);
3796-
let chan_id = $chan_context.channel_id();
3797-
let counterparty_node_id = $chan_context.get_counterparty_node_id();
3798-
handle_new_monitor_update_internal!(
3791+
let (update_completed, _all_updates_complete) = handle_new_monitor_update_internal(
37993792
$self,
3793+
&mut $peer_state.in_flight_monitor_updates,
3794+
$chan_context.channel_id(),
38003795
$funding_txo,
3796+
$chan_context.get_counterparty_node_id(),
38013797
$update,
3802-
$peer_state,
3803-
logger,
3804-
chan_id,
3805-
counterparty_node_id,
3806-
{}
3807-
)
3798+
WithChannelContext::from(&$self.logger, &$chan_context, None),
3799+
);
3800+
update_completed
38083801
}};
38093802
}
38103803

@@ -3813,29 +3806,25 @@ macro_rules! handle_new_monitor_update {
38133806
$self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
38143807
$per_peer_state_lock: expr, $chan: expr
38153808
) => {{
3816-
let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3817-
let chan_id = $chan.context.channel_id();
3818-
let counterparty_node_id = $chan.context.get_counterparty_node_id();
3819-
handle_new_monitor_update_internal!(
3809+
let (update_completed, all_updates_complete) = handle_new_monitor_update_internal(
38203810
$self,
3811+
&mut $peer_state.in_flight_monitor_updates,
3812+
$chan.context.channel_id(),
38213813
$funding_txo,
3814+
$chan.context.get_counterparty_node_id(),
38223815
$update,
3823-
$peer_state,
3824-
logger,
3825-
chan_id,
3826-
counterparty_node_id,
3827-
{
3828-
if $chan.blocked_monitor_updates_pending() == 0 {
3829-
handle_monitor_update_completion!(
3830-
$self,
3831-
$peer_state_lock,
3832-
$peer_state,
3833-
$per_peer_state_lock,
3834-
$chan
3835-
);
3836-
}
3837-
}
3838-
)
3816+
WithChannelContext::from(&$self.logger, &$chan.context, None),
3817+
);
3818+
if all_updates_complete && $chan.blocked_monitor_updates_pending() == 0 {
3819+
handle_monitor_update_completion!(
3820+
$self,
3821+
$peer_state_lock,
3822+
$peer_state,
3823+
$per_peer_state_lock,
3824+
$chan
3825+
);
3826+
}
3827+
update_completed
38393828
}};
38403829
}
38413830

0 commit comments

Comments
 (0)