Skip to content

Commit 601bf4b

Browse files
committed
Require counterparty_node_id TLV for ChannelMonitor
New `ChannelMonitor`s created starting from v0.0.110 will already have this field set, and those created before then will have it set if a `ChannelMonitorUpdate` created in v0.0.116 or later has been applied. It would be extremely rare for a user to not fall under either of these conditions: they opened a channel almost 3 years ago, and haven't had any activity on it in the last 2 years. Nonetheless, a panic has been added on `ChannelMonitor` deserialization to ensure users can move forward by first running a v0.1.* release and sending/routing a payment or closing the channel before upgrading to v0.2.0.
1 parent c4d0560 commit 601bf4b

File tree

7 files changed

+94
-129
lines changed

7 files changed

+94
-129
lines changed

fuzz/src/chanmon_consistency.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -282,7 +282,7 @@ impl chain::Watch<TestChannelSigner> for TestChainMonitor {
282282

283283
fn release_pending_monitor_events(
284284
&self,
285-
) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
285+
) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, PublicKey)> {
286286
return self.chain_monitor.release_pending_monitor_events();
287287
}
288288
}

lightning/src/chain/chainmonitor.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -246,7 +246,7 @@ pub struct ChainMonitor<ChannelSigner: EcdsaChannelSigner, C: Deref, T: Deref, F
246246
persister: P,
247247
/// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly
248248
/// from the user and not from a [`ChannelMonitor`].
249-
pending_monitor_events: Mutex<Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)>>,
249+
pending_monitor_events: Mutex<Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, PublicKey)>>,
250250
/// The best block height seen, used as a proxy for the passage of time.
251251
highest_chain_height: AtomicUsize,
252252

@@ -874,7 +874,7 @@ where C::Target: chain::Filter,
874874
}
875875
}
876876

877-
fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
877+
fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, PublicKey)> {
878878
let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
879879
for monitor_state in self.monitors.read().unwrap().values() {
880880
let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();

lightning/src/chain/channelmonitor.rs

Lines changed: 18 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1020,7 +1020,7 @@ pub(crate) struct ChannelMonitorImpl<Signer: EcdsaChannelSigner> {
10201020
best_block: BestBlock,
10211021

10221022
/// The node_id of our counterparty
1023-
counterparty_node_id: Option<PublicKey>,
1023+
counterparty_node_id: PublicKey,
10241024

10251025
/// Initial counterparty commmitment data needed to recreate the commitment tx
10261026
/// in the persistence pipeline for third-party watchtowers. This will only be present on
@@ -1242,7 +1242,7 @@ impl<Signer: EcdsaChannelSigner> Writeable for ChannelMonitorImpl<Signer> {
12421242
(3, self.htlcs_resolved_on_chain, required_vec),
12431243
(5, pending_monitor_events, required_vec),
12441244
(7, self.funding_spend_seen, required),
1245-
(9, self.counterparty_node_id, option),
1245+
(9, self.counterparty_node_id, required),
12461246
(11, self.confirmed_commitment_tx_counterparty_output, option),
12471247
(13, self.spendable_txids_confirmed, required_vec),
12481248
(15, self.counterparty_fulfilled_htlcs, required),
@@ -1338,7 +1338,7 @@ impl<'a, L: Deref> WithChannelMonitor<'a, L> where L::Target: Logger {
13381338
}
13391339

13401340
pub(crate) fn from_impl<S: EcdsaChannelSigner>(logger: &'a L, monitor_impl: &ChannelMonitorImpl<S>, payment_hash: Option<PaymentHash>) -> Self {
1341-
let peer_id = monitor_impl.counterparty_node_id;
1341+
let peer_id = Some(monitor_impl.counterparty_node_id);
13421342
let channel_id = Some(monitor_impl.channel_id());
13431343
WithChannelMonitor {
13441344
logger, peer_id, channel_id, payment_hash,
@@ -1462,7 +1462,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
14621462
spendable_txids_confirmed: Vec::new(),
14631463

14641464
best_block,
1465-
counterparty_node_id: Some(counterparty_node_id),
1465+
counterparty_node_id: counterparty_node_id,
14661466
initial_counterparty_commitment_info: None,
14671467
balances_empty_height: None,
14681468

@@ -1788,10 +1788,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
17881788
}
17891789

17901790
/// Gets the `node_id` of the counterparty for this channel.
1791-
///
1792-
/// Will be `None` for channels constructed on LDK versions prior to 0.0.110 and always `Some`
1793-
/// otherwise.
1794-
pub fn get_counterparty_node_id(&self) -> Option<PublicKey> {
1791+
pub fn get_counterparty_node_id(&self) -> PublicKey {
17951792
self.inner.lock().unwrap().counterparty_node_id
17961793
}
17971794

@@ -3200,12 +3197,8 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
32003197
log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len());
32013198
}
32023199

3203-
if updates.counterparty_node_id.is_some() {
3204-
if self.counterparty_node_id.is_none() {
3205-
self.counterparty_node_id = updates.counterparty_node_id;
3206-
} else {
3207-
debug_assert_eq!(self.counterparty_node_id, updates.counterparty_node_id);
3208-
}
3200+
if let Some(counterparty_node_id) = &updates.counterparty_node_id {
3201+
debug_assert_eq!(self.counterparty_node_id, *counterparty_node_id);
32093202
}
32103203

32113204
// ChannelMonitor updates may be applied after force close if we receive a preimage for a
@@ -3376,10 +3369,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
33763369
package_target_feerate_sat_per_1000_weight, commitment_tx, anchor_output_idx,
33773370
} => {
33783371
let channel_id = self.channel_id;
3379-
// unwrap safety: `ClaimEvent`s are only available for Anchor channels,
3380-
// introduced with v0.0.116. counterparty_node_id is guaranteed to be `Some`
3381-
// since v0.0.110.
3382-
let counterparty_node_id = self.counterparty_node_id.unwrap();
3372+
let counterparty_node_id = self.counterparty_node_id;
33833373
let commitment_txid = commitment_tx.compute_txid();
33843374
debug_assert_eq!(self.current_holder_commitment_tx.txid, commitment_txid);
33853375
let pending_htlcs = self.current_holder_commitment_tx.non_dust_htlcs();
@@ -3410,10 +3400,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
34103400
target_feerate_sat_per_1000_weight, htlcs, tx_lock_time,
34113401
} => {
34123402
let channel_id = self.channel_id;
3413-
// unwrap safety: `ClaimEvent`s are only available for Anchor channels,
3414-
// introduced with v0.0.116. counterparty_node_id is guaranteed to be `Some`
3415-
// since v0.0.110.
3416-
let counterparty_node_id = self.counterparty_node_id.unwrap();
3403+
let counterparty_node_id = self.counterparty_node_id;
34173404
let mut htlc_descriptors = Vec::with_capacity(htlcs.len());
34183405
for htlc in htlcs {
34193406
htlc_descriptors.push(HTLCDescriptor {
@@ -5129,6 +5116,13 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
51295116
chan_utils::get_to_countersignatory_with_anchors_redeemscript(&payment_point).to_p2wsh();
51305117
}
51315118

5119+
let channel_id = channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(outpoint));
5120+
if counterparty_node_id.is_none() {
5121+
panic!("Found monitor for channel {} with no updates since v0.0.118.\
5122+
These monitors are no longer supported.\
5123+
To continue, run a v0.1 release, send/route a payment over the channel or close it.", channel_id);
5124+
}
5125+
51325126
Ok((best_block.block_hash, ChannelMonitor::from_impl(ChannelMonitorImpl {
51335127
latest_update_id,
51345128
commitment_transaction_number_obscure_factor,
@@ -5140,7 +5134,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
51405134

51415135
channel_keys_id,
51425136
holder_revocation_basepoint,
5143-
channel_id: channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(outpoint)),
5137+
channel_id,
51445138
funding_info,
51455139
first_confirmed_funding_txo: first_confirmed_funding_txo.0.unwrap(),
51465140
current_counterparty_commitment_txid,
@@ -5184,7 +5178,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
51845178
spendable_txids_confirmed: spendable_txids_confirmed.unwrap(),
51855179

51865180
best_block,
5187-
counterparty_node_id,
5181+
counterparty_node_id: counterparty_node_id.unwrap(),
51885182
initial_counterparty_commitment_info,
51895183
balances_empty_height,
51905184
failed_back_htlc_ids: new_hash_set(),

lightning/src/chain/mod.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -304,7 +304,7 @@ pub trait Watch<ChannelSigner: EcdsaChannelSigner> {
304304
///
305305
/// For details on asynchronous [`ChannelMonitor`] updating and returning
306306
/// [`MonitorEvent::Completed`] here, see [`ChannelMonitorUpdateStatus::InProgress`].
307-
fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)>;
307+
fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, PublicKey)>;
308308
}
309309

310310
/// The `Filter` trait defines behavior for indicating chain activity of interest pertaining to

lightning/src/ln/channelmanager.rs

Lines changed: 66 additions & 100 deletions
Original file line numberDiff line numberDiff line change
@@ -7702,24 +7702,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
77027702
(htlc_forwards, decode_update_add_htlcs)
77037703
}
77047704

7705-
fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
7705+
fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: &PublicKey) {
77067706
debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
77077707

7708-
let counterparty_node_id = match counterparty_node_id {
7709-
Some(cp_id) => cp_id.clone(),
7710-
None => {
7711-
// TODO: Once we can rely on the counterparty_node_id from the
7712-
// monitor event, this and the outpoint_to_peer map should be removed.
7713-
let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
7714-
match outpoint_to_peer.get(funding_txo) {
7715-
Some(cp_id) => cp_id.clone(),
7716-
None => return,
7717-
}
7718-
}
7719-
};
77207708
let per_peer_state = self.per_peer_state.read().unwrap();
77217709
let mut peer_state_lock;
7722-
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
7710+
let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
77237711
if peer_state_mutex_opt.is_none() { return }
77247712
peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
77257713
let peer_state = &mut *peer_state_lock;
@@ -7730,7 +7718,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
77307718
pending.len()
77317719
} else { 0 };
77327720

7733-
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*channel_id), None);
7721+
let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*channel_id), None);
77347722
log_trace!(logger, "ChannelMonitor updated to {}. {} pending in-flight updates.",
77357723
highest_applied_update_id, remaining_in_flight);
77367724

@@ -9482,67 +9470,56 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
94829470
for monitor_event in monitor_events.drain(..) {
94839471
match monitor_event {
94849472
MonitorEvent::HTLCEvent(htlc_update) => {
9485-
let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id), Some(htlc_update.payment_hash));
9473+
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), Some(htlc_update.payment_hash));
94869474
if let Some(preimage) = htlc_update.payment_preimage {
94879475
log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
94889476
self.claim_funds_internal(htlc_update.source, preimage,
94899477
htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
9490-
false, counterparty_node_id, funding_outpoint, channel_id, None);
9478+
false, Some(counterparty_node_id), funding_outpoint, channel_id, None);
94919479
} else {
94929480
log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
9493-
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
9481+
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
94949482
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
94959483
self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
94969484
}
94979485
},
94989486
MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
9499-
let counterparty_node_id_opt = match counterparty_node_id {
9500-
Some(cp_id) => Some(cp_id),
9501-
None => {
9502-
// TODO: Once we can rely on the counterparty_node_id from the
9503-
// monitor event, this and the outpoint_to_peer map should be removed.
9504-
let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
9505-
outpoint_to_peer.get(&funding_outpoint).cloned()
9506-
}
9507-
};
9508-
if let Some(counterparty_node_id) = counterparty_node_id_opt {
9509-
let per_peer_state = self.per_peer_state.read().unwrap();
9510-
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
9511-
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9512-
let peer_state = &mut *peer_state_lock;
9513-
let pending_msg_events = &mut peer_state.pending_msg_events;
9514-
if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(channel_id) {
9515-
let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
9516-
reason
9517-
} else {
9518-
ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
9519-
};
9520-
let mut shutdown_res = chan_entry.get_mut().force_shutdown(false, reason.clone());
9521-
let chan = remove_channel_entry!(self, peer_state, chan_entry, shutdown_res);
9522-
failed_channels.push(shutdown_res);
9523-
if let Some(funded_chan) = chan.as_funded() {
9524-
if let Ok(update) = self.get_channel_update_for_broadcast(funded_chan) {
9525-
let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9526-
pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate {
9527-
msg: update
9528-
});
9529-
}
9530-
pending_msg_events.push(MessageSendEvent::HandleError {
9531-
node_id: funded_chan.context.get_counterparty_node_id(),
9532-
action: msgs::ErrorAction::DisconnectPeer {
9533-
msg: Some(msgs::ErrorMessage {
9534-
channel_id: funded_chan.context.channel_id(),
9535-
data: reason.to_string()
9536-
})
9537-
},
9487+
let per_peer_state = self.per_peer_state.read().unwrap();
9488+
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
9489+
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9490+
let peer_state = &mut *peer_state_lock;
9491+
let pending_msg_events = &mut peer_state.pending_msg_events;
9492+
if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(channel_id) {
9493+
let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
9494+
reason
9495+
} else {
9496+
ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
9497+
};
9498+
let mut shutdown_res = chan_entry.get_mut().force_shutdown(false, reason.clone());
9499+
let chan = remove_channel_entry!(self, peer_state, chan_entry, shutdown_res);
9500+
failed_channels.push(shutdown_res);
9501+
if let Some(funded_chan) = chan.as_funded() {
9502+
if let Ok(update) = self.get_channel_update_for_broadcast(funded_chan) {
9503+
let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9504+
pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate {
9505+
msg: update
95389506
});
95399507
}
9508+
pending_msg_events.push(MessageSendEvent::HandleError {
9509+
node_id: counterparty_node_id,
9510+
action: msgs::ErrorAction::DisconnectPeer {
9511+
msg: Some(msgs::ErrorMessage {
9512+
channel_id: funded_chan.context.channel_id(),
9513+
data: reason.to_string()
9514+
})
9515+
},
9516+
});
95409517
}
95419518
}
95429519
}
95439520
},
95449521
MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => {
9545-
self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref());
9522+
self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, &counterparty_node_id);
95469523
},
95479524
}
95489525
}
@@ -13772,26 +13749,26 @@ where
1377213749
for (channel_id, monitor) in args.channel_monitors.iter() {
1377313750
if !channel_id_set.contains(channel_id) {
1377413751
let mut should_queue_fc_update = false;
13775-
if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13776-
// If the ChannelMonitor had any updates, we may need to update it further and
13777-
// thus track it in `closed_channel_monitor_update_ids`. If the channel never
13778-
// had any updates at all, there can't be any HTLCs pending which we need to
13779-
// claim.
13780-
// Note that a `ChannelMonitor` is created with `update_id` 0 and after we
13781-
// provide it with a closure update its `update_id` will be at 1.
13782-
if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 {
13783-
should_queue_fc_update = !monitor.no_further_updates_allowed();
13784-
let mut latest_update_id = monitor.get_latest_update_id();
13785-
if should_queue_fc_update {
13786-
latest_update_id += 1;
13787-
}
13788-
per_peer_state.entry(counterparty_node_id)
13789-
.or_insert_with(|| Mutex::new(empty_peer_state()))
13790-
.lock().unwrap()
13791-
.closed_channel_monitor_update_ids.entry(monitor.channel_id())
13792-
.and_modify(|v| *v = cmp::max(latest_update_id, *v))
13793-
.or_insert(latest_update_id);
13752+
let counterparty_node_id = monitor.get_counterparty_node_id();
13753+
13754+
// If the ChannelMonitor had any updates, we may need to update it further and
13755+
// thus track it in `closed_channel_monitor_update_ids`. If the channel never
13756+
// had any updates at all, there can't be any HTLCs pending which we need to
13757+
// claim.
13758+
// Note that a `ChannelMonitor` is created with `update_id` 0 and after we
13759+
// provide it with a closure update its `update_id` will be at 1.
13760+
if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 {
13761+
should_queue_fc_update = !monitor.no_further_updates_allowed();
13762+
let mut latest_update_id = monitor.get_latest_update_id();
13763+
if should_queue_fc_update {
13764+
latest_update_id += 1;
1379413765
}
13766+
per_peer_state.entry(counterparty_node_id)
13767+
.or_insert_with(|| Mutex::new(empty_peer_state()))
13768+
.lock().unwrap()
13769+
.closed_channel_monitor_update_ids.entry(monitor.channel_id())
13770+
.and_modify(|v| *v = cmp::max(latest_update_id, *v))
13771+
.or_insert(latest_update_id);
1379513772
}
1379613773

1379713774
if !should_queue_fc_update {
@@ -13802,31 +13779,20 @@ where
1380213779
let channel_id = monitor.channel_id();
1380313780
log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
1380413781
&channel_id);
13805-
let mut monitor_update = ChannelMonitorUpdate {
13782+
let monitor_update = ChannelMonitorUpdate {
1380613783
update_id: monitor.get_latest_update_id().saturating_add(1),
13807-
counterparty_node_id: None,
13784+
counterparty_node_id: Some(counterparty_node_id),
1380813785
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
1380913786
channel_id: Some(monitor.channel_id()),
1381013787
};
1381113788
let funding_txo = monitor.get_funding_txo();
13812-
if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13813-
let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13814-
counterparty_node_id,
13815-
funding_txo,
13816-
channel_id,
13817-
update: monitor_update,
13818-
};
13819-
close_background_events.push(update);
13820-
} else {
13821-
// This is a fairly old `ChannelMonitor` that hasn't seen an update to its
13822-
// off-chain state since LDK 0.0.118 (as in LDK 0.0.119 any off-chain
13823-
// `ChannelMonitorUpdate` will set the counterparty ID).
13824-
// Thus, we assume that it has no pending HTLCs and we will not need to
13825-
// generate a `ChannelMonitorUpdate` for it aside from this
13826-
// `ChannelForceClosed` one.
13827-
monitor_update.update_id = u64::MAX;
13828-
close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, channel_id, monitor_update)));
13829-
}
13789+
let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13790+
counterparty_node_id,
13791+
funding_txo,
13792+
channel_id,
13793+
update: monitor_update,
13794+
};
13795+
close_background_events.push(update);
1383013796
}
1383113797
}
1383213798

@@ -14385,7 +14351,7 @@ where
1438514351
// downstream chan is closed (because we don't have a
1438614352
// channel_id -> peer map entry).
1438714353
counterparty_opt.is_none(),
14388-
counterparty_opt.cloned().or(monitor.get_counterparty_node_id()),
14354+
Some(monitor.get_counterparty_node_id()),
1438914355
monitor.get_funding_txo(), monitor.channel_id()))
1439014356
} else { None }
1439114357
} else {
@@ -15070,8 +15036,8 @@ mod tests {
1507015036
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1507115037

1507215038
create_announced_chan_between_nodes(&nodes, 0, 1);
15073-
15074-
// Since we do not send peer storage, we manually simulate receiving a dummy
15039+
15040+
// Since we do not send peer storage, we manually simulate receiving a dummy
1507515041
// `PeerStorage` from the channel partner.
1507615042
nodes[0].node.handle_peer_storage(nodes[1].node.get_our_node_id(), msgs::PeerStorage{data: vec![0; 100]});
1507715043

0 commit comments

Comments
 (0)