@@ -7702,24 +7702,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
7702
7702
(htlc_forwards, decode_update_add_htlcs)
7703
7703
}
7704
7704
7705
- fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option< &PublicKey> ) {
7705
+ fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: &PublicKey) {
7706
7706
debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
7707
7707
7708
- let counterparty_node_id = match counterparty_node_id {
7709
- Some(cp_id) => cp_id.clone(),
7710
- None => {
7711
- // TODO: Once we can rely on the counterparty_node_id from the
7712
- // monitor event, this and the outpoint_to_peer map should be removed.
7713
- let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
7714
- match outpoint_to_peer.get(funding_txo) {
7715
- Some(cp_id) => cp_id.clone(),
7716
- None => return,
7717
- }
7718
- }
7719
- };
7720
7708
let per_peer_state = self.per_peer_state.read().unwrap();
7721
7709
let mut peer_state_lock;
7722
- let peer_state_mutex_opt = per_peer_state.get(& counterparty_node_id);
7710
+ let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
7723
7711
if peer_state_mutex_opt.is_none() { return }
7724
7712
peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
7725
7713
let peer_state = &mut *peer_state_lock;
@@ -7730,7 +7718,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
7730
7718
pending.len()
7731
7719
} else { 0 };
7732
7720
7733
- let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*channel_id), None);
7721
+ let logger = WithContext::from(&self.logger, Some(* counterparty_node_id), Some(*channel_id), None);
7734
7722
log_trace!(logger, "ChannelMonitor updated to {}. {} pending in-flight updates.",
7735
7723
highest_applied_update_id, remaining_in_flight);
7736
7724
@@ -9482,67 +9470,56 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
9482
9470
for monitor_event in monitor_events.drain(..) {
9483
9471
match monitor_event {
9484
9472
MonitorEvent::HTLCEvent(htlc_update) => {
9485
- let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id), Some(htlc_update.payment_hash));
9473
+ let logger = WithContext::from(&self.logger, Some( counterparty_node_id) , Some(channel_id), Some(htlc_update.payment_hash));
9486
9474
if let Some(preimage) = htlc_update.payment_preimage {
9487
9475
log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
9488
9476
self.claim_funds_internal(htlc_update.source, preimage,
9489
9477
htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
9490
- false, counterparty_node_id, funding_outpoint, channel_id, None);
9478
+ false, Some( counterparty_node_id) , funding_outpoint, channel_id, None);
9491
9479
} else {
9492
9480
log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
9493
- let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
9481
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some( counterparty_node_id) , channel_id };
9494
9482
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
9495
9483
self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
9496
9484
}
9497
9485
},
9498
9486
MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
9499
- let counterparty_node_id_opt = match counterparty_node_id {
9500
- Some(cp_id) => Some(cp_id),
9501
- None => {
9502
- // TODO: Once we can rely on the counterparty_node_id from the
9503
- // monitor event, this and the outpoint_to_peer map should be removed.
9504
- let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
9505
- outpoint_to_peer.get(&funding_outpoint).cloned()
9506
- }
9507
- };
9508
- if let Some(counterparty_node_id) = counterparty_node_id_opt {
9509
- let per_peer_state = self.per_peer_state.read().unwrap();
9510
- if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
9511
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9512
- let peer_state = &mut *peer_state_lock;
9513
- let pending_msg_events = &mut peer_state.pending_msg_events;
9514
- if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(channel_id) {
9515
- let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
9516
- reason
9517
- } else {
9518
- ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
9519
- };
9520
- let mut shutdown_res = chan_entry.get_mut().force_shutdown(false, reason.clone());
9521
- let chan = remove_channel_entry!(self, peer_state, chan_entry, shutdown_res);
9522
- failed_channels.push(shutdown_res);
9523
- if let Some(funded_chan) = chan.as_funded() {
9524
- if let Ok(update) = self.get_channel_update_for_broadcast(funded_chan) {
9525
- let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9526
- pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate {
9527
- msg: update
9528
- });
9529
- }
9530
- pending_msg_events.push(MessageSendEvent::HandleError {
9531
- node_id: funded_chan.context.get_counterparty_node_id(),
9532
- action: msgs::ErrorAction::DisconnectPeer {
9533
- msg: Some(msgs::ErrorMessage {
9534
- channel_id: funded_chan.context.channel_id(),
9535
- data: reason.to_string()
9536
- })
9537
- },
9487
+ let per_peer_state = self.per_peer_state.read().unwrap();
9488
+ if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
9489
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9490
+ let peer_state = &mut *peer_state_lock;
9491
+ let pending_msg_events = &mut peer_state.pending_msg_events;
9492
+ if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(channel_id) {
9493
+ let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
9494
+ reason
9495
+ } else {
9496
+ ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
9497
+ };
9498
+ let mut shutdown_res = chan_entry.get_mut().force_shutdown(false, reason.clone());
9499
+ let chan = remove_channel_entry!(self, peer_state, chan_entry, shutdown_res);
9500
+ failed_channels.push(shutdown_res);
9501
+ if let Some(funded_chan) = chan.as_funded() {
9502
+ if let Ok(update) = self.get_channel_update_for_broadcast(funded_chan) {
9503
+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9504
+ pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate {
9505
+ msg: update
9538
9506
});
9539
9507
}
9508
+ pending_msg_events.push(MessageSendEvent::HandleError {
9509
+ node_id: counterparty_node_id,
9510
+ action: msgs::ErrorAction::DisconnectPeer {
9511
+ msg: Some(msgs::ErrorMessage {
9512
+ channel_id: funded_chan.context.channel_id(),
9513
+ data: reason.to_string()
9514
+ })
9515
+ },
9516
+ });
9540
9517
}
9541
9518
}
9542
9519
}
9543
9520
},
9544
9521
MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => {
9545
- self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref() );
9522
+ self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, & counterparty_node_id);
9546
9523
},
9547
9524
}
9548
9525
}
@@ -13772,26 +13749,26 @@ where
13772
13749
for (channel_id, monitor) in args.channel_monitors.iter() {
13773
13750
if !channel_id_set.contains(channel_id) {
13774
13751
let mut should_queue_fc_update = false;
13775
- if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13776
- // If the ChannelMonitor had any updates, we may need to update it further and
13777
- // thus track it in `closed_channel_monitor_update_ids`. If the channel never
13778
- // had any updates at all, there can't be any HTLCs pending which we need to
13779
- // claim.
13780
- // Note that a `ChannelMonitor` is created with `update_id` 0 and after we
13781
- // provide it with a closure update its `update_id` will be at 1.
13782
- if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 {
13783
- should_queue_fc_update = !monitor.no_further_updates_allowed();
13784
- let mut latest_update_id = monitor.get_latest_update_id();
13785
- if should_queue_fc_update {
13786
- latest_update_id += 1;
13787
- }
13788
- per_peer_state.entry(counterparty_node_id)
13789
- .or_insert_with(|| Mutex::new(empty_peer_state()))
13790
- .lock().unwrap()
13791
- .closed_channel_monitor_update_ids.entry(monitor.channel_id())
13792
- .and_modify(|v| *v = cmp::max(latest_update_id, *v))
13793
- .or_insert(latest_update_id);
13752
+ let counterparty_node_id = monitor.get_counterparty_node_id();
13753
+
13754
+ // If the ChannelMonitor had any updates, we may need to update it further and
13755
+ // thus track it in `closed_channel_monitor_update_ids`. If the channel never
13756
+ // had any updates at all, there can't be any HTLCs pending which we need to
13757
+ // claim.
13758
+ // Note that a `ChannelMonitor` is created with `update_id` 0 and after we
13759
+ // provide it with a closure update its `update_id` will be at 1.
13760
+ if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 {
13761
+ should_queue_fc_update = !monitor.no_further_updates_allowed();
13762
+ let mut latest_update_id = monitor.get_latest_update_id();
13763
+ if should_queue_fc_update {
13764
+ latest_update_id += 1;
13794
13765
}
13766
+ per_peer_state.entry(counterparty_node_id)
13767
+ .or_insert_with(|| Mutex::new(empty_peer_state()))
13768
+ .lock().unwrap()
13769
+ .closed_channel_monitor_update_ids.entry(monitor.channel_id())
13770
+ .and_modify(|v| *v = cmp::max(latest_update_id, *v))
13771
+ .or_insert(latest_update_id);
13795
13772
}
13796
13773
13797
13774
if !should_queue_fc_update {
@@ -13802,31 +13779,20 @@ where
13802
13779
let channel_id = monitor.channel_id();
13803
13780
log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
13804
13781
&channel_id);
13805
- let mut monitor_update = ChannelMonitorUpdate {
13782
+ let monitor_update = ChannelMonitorUpdate {
13806
13783
update_id: monitor.get_latest_update_id().saturating_add(1),
13807
- counterparty_node_id: None ,
13784
+ counterparty_node_id: Some(counterparty_node_id) ,
13808
13785
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
13809
13786
channel_id: Some(monitor.channel_id()),
13810
13787
};
13811
13788
let funding_txo = monitor.get_funding_txo();
13812
- if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13813
- let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13814
- counterparty_node_id,
13815
- funding_txo,
13816
- channel_id,
13817
- update: monitor_update,
13818
- };
13819
- close_background_events.push(update);
13820
- } else {
13821
- // This is a fairly old `ChannelMonitor` that hasn't seen an update to its
13822
- // off-chain state since LDK 0.0.118 (as in LDK 0.0.119 any off-chain
13823
- // `ChannelMonitorUpdate` will set the counterparty ID).
13824
- // Thus, we assume that it has no pending HTLCs and we will not need to
13825
- // generate a `ChannelMonitorUpdate` for it aside from this
13826
- // `ChannelForceClosed` one.
13827
- monitor_update.update_id = u64::MAX;
13828
- close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, channel_id, monitor_update)));
13829
- }
13789
+ let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13790
+ counterparty_node_id,
13791
+ funding_txo,
13792
+ channel_id,
13793
+ update: monitor_update,
13794
+ };
13795
+ close_background_events.push(update);
13830
13796
}
13831
13797
}
13832
13798
@@ -14385,7 +14351,7 @@ where
14385
14351
// downstream chan is closed (because we don't have a
14386
14352
// channel_id -> peer map entry).
14387
14353
counterparty_opt.is_none(),
14388
- counterparty_opt.cloned().or (monitor.get_counterparty_node_id()),
14354
+ Some (monitor.get_counterparty_node_id()),
14389
14355
monitor.get_funding_txo(), monitor.channel_id()))
14390
14356
} else { None }
14391
14357
} else {
@@ -15070,8 +15036,8 @@ mod tests {
15070
15036
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15071
15037
15072
15038
create_announced_chan_between_nodes(&nodes, 0, 1);
15073
-
15074
- // Since we do not send peer storage, we manually simulate receiving a dummy
15039
+
15040
+ // Since we do not send peer storage, we manually simulate receiving a dummy
15075
15041
// `PeerStorage` from the channel partner.
15076
15042
nodes[0].node.handle_peer_storage(nodes[1].node.get_our_node_id(), msgs::PeerStorage{data: vec![0; 100]});
15077
15043
0 commit comments