@@ -1139,7 +1139,7 @@ where
1139
1139
// |
1140
1140
// |__`peer_state`
1141
1141
// |
1142
- // |__`id_to_peer `
1142
+ // |__`outpoint_to_peer `
1143
1143
// |
1144
1144
// |__`short_to_chan_info`
1145
1145
// |
@@ -1233,11 +1233,7 @@ where
1233
1233
/// See `ChannelManager` struct-level documentation for lock order requirements.
1234
1234
outbound_scid_aliases: Mutex<HashSet<u64>>,
1235
1235
1236
- /// `channel_id` -> `counterparty_node_id`.
1237
- ///
1238
- /// Only `channel_id`s are allowed as keys in this map, and not `temporary_channel_id`s. As
1239
- /// multiple channels with the same `temporary_channel_id` to different peers can exist,
1240
- /// allowing `temporary_channel_id`s in this map would cause collisions for such channels.
1236
+ /// Channel funding outpoint -> `counterparty_node_id`.
1241
1237
///
1242
1238
/// Note that this map should only be used for `MonitorEvent` handling, to be able to access
1243
1239
/// the corresponding channel for the event, as we only have access to the `channel_id` during
@@ -1255,7 +1251,7 @@ where
1255
1251
/// required to access the channel with the `counterparty_node_id`.
1256
1252
///
1257
1253
/// See `ChannelManager` struct-level documentation for lock order requirements.
1258
- id_to_peer : Mutex<HashMap<ChannelId , PublicKey>>,
1254
+ outpoint_to_peer : Mutex<HashMap<OutPoint , PublicKey>>,
1259
1255
1260
1256
/// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s.
1261
1257
///
@@ -1995,7 +1991,9 @@ macro_rules! handle_error {
1995
1991
1996
1992
macro_rules! update_maps_on_chan_removal {
1997
1993
($self: expr, $channel_context: expr) => {{
1998
- $self.id_to_peer.lock().unwrap().remove(&$channel_context.channel_id());
1994
+ if let Some(outpoint) = $channel_context.get_funding_txo() {
1995
+ $self.outpoint_to_peer.lock().unwrap().remove(&outpoint);
1996
+ }
1999
1997
let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
2000
1998
if let Some(short_id) = $channel_context.get_short_channel_id() {
2001
1999
short_to_chan_info.remove(&short_id);
@@ -2414,7 +2412,7 @@ where
2414
2412
forward_htlcs: Mutex::new(HashMap::new()),
2415
2413
claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: HashMap::new(), pending_claiming_payments: HashMap::new() }),
2416
2414
pending_intercepted_htlcs: Mutex::new(HashMap::new()),
2417
- id_to_peer : Mutex::new(HashMap::new()),
2415
+ outpoint_to_peer : Mutex::new(HashMap::new()),
2418
2416
short_to_chan_info: FairRwLock::new(HashMap::new()),
2419
2417
2420
2418
our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(),
@@ -2565,7 +2563,7 @@ where
2565
2563
fn list_funded_channels_with_filter<Fn: FnMut(&(&ChannelId, &Channel<SP>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
2566
2564
// Allocate our best estimate of the number of channels we have in the `res`
2567
2565
// Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
2568
- // a scid or a scid alias, and the `id_to_peer ` shouldn't be used outside
2566
+ // a scid or a scid alias, and the `outpoint_to_peer ` shouldn't be used outside
2569
2567
// of the ChannelMonitor handling. Therefore reallocations may still occur, but is
2570
2568
// unlikely as the `short_to_chan_info` map often contains 2 entries for
2571
2569
// the same channel.
@@ -2598,7 +2596,7 @@ where
2598
2596
pub fn list_channels(&self) -> Vec<ChannelDetails> {
2599
2597
// Allocate our best estimate of the number of channels we have in the `res`
2600
2598
// Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
2601
- // a scid or a scid alias, and the `id_to_peer ` shouldn't be used outside
2599
+ // a scid or a scid alias, and the `outpoint_to_peer ` shouldn't be used outside
2602
2600
// of the ChannelMonitor handling. Therefore reallocations may still occur, but is
2603
2601
// unlikely as the `short_to_chan_info` map often contains 2 entries for
2604
2602
// the same channel.
@@ -3716,9 +3714,10 @@ where
3716
3714
3717
3715
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3718
3716
let peer_state = &mut *peer_state_lock;
3717
+ let funding_txo;
3719
3718
let (chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
3720
3719
Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
3721
- let funding_txo = find_funding_output(&chan, &funding_transaction)?;
3720
+ funding_txo = find_funding_output(&chan, &funding_transaction)?;
3722
3721
3723
3722
let logger = WithChannelContext::from(&self.logger, &chan.context);
3724
3723
let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger)
@@ -3766,9 +3765,9 @@ where
3766
3765
panic!("Generated duplicate funding txid?");
3767
3766
},
3768
3767
hash_map::Entry::Vacant(e) => {
3769
- let mut id_to_peer = self.id_to_peer .lock().unwrap();
3770
- if id_to_peer .insert(chan.context.channel_id() , chan.context.get_counterparty_node_id()).is_some() {
3771
- panic!("id_to_peer map already contained funding txid , which shouldn't be possible");
3768
+ let mut outpoint_to_peer = self.outpoint_to_peer .lock().unwrap();
3769
+ if outpoint_to_peer .insert(funding_txo , chan.context.get_counterparty_node_id()).is_some() {
3770
+ panic!("outpoint_to_peer map already contained funding outpoint , which shouldn't be possible");
3772
3771
}
3773
3772
e.insert(ChannelPhase::UnfundedOutboundV1(chan));
3774
3773
}
@@ -5851,9 +5850,9 @@ where
5851
5850
Some(cp_id) => cp_id.clone(),
5852
5851
None => {
5853
5852
// TODO: Once we can rely on the counterparty_node_id from the
5854
- // monitor event, this and the id_to_peer map should be removed.
5855
- let id_to_peer = self.id_to_peer .lock().unwrap();
5856
- match id_to_peer .get(&funding_txo.to_channel_id() ) {
5853
+ // monitor event, this and the outpoint_to_peer map should be removed.
5854
+ let outpoint_to_peer = self.outpoint_to_peer .lock().unwrap();
5855
+ match outpoint_to_peer .get(&funding_txo) {
5857
5856
Some(cp_id) => cp_id.clone(),
5858
5857
None => return,
5859
5858
}
@@ -6237,8 +6236,8 @@ where
6237
6236
))
6238
6237
},
6239
6238
hash_map::Entry::Vacant(e) => {
6240
- let mut id_to_peer_lock = self.id_to_peer .lock().unwrap();
6241
- match id_to_peer_lock .entry(chan.context.channel_id() ) {
6239
+ let mut outpoint_to_peer_lock = self.outpoint_to_peer .lock().unwrap();
6240
+ match outpoint_to_peer_lock .entry(monitor.get_funding_txo().0 ) {
6242
6241
hash_map::Entry::Occupied(_) => {
6243
6242
return Err(MsgHandleErrInternal::send_err_msg_no_close(
6244
6243
"The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
@@ -6248,7 +6247,7 @@ where
6248
6247
let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
6249
6248
if let Ok(persist_state) = monitor_res {
6250
6249
i_e.insert(chan.context.get_counterparty_node_id());
6251
- mem::drop(id_to_peer_lock );
6250
+ mem::drop(outpoint_to_peer_lock );
6252
6251
6253
6252
// There's no problem signing a counterparty's funding transaction if our monitor
6254
6253
// hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
@@ -7142,9 +7141,9 @@ where
7142
7141
Some(cp_id) => Some(cp_id),
7143
7142
None => {
7144
7143
// TODO: Once we can rely on the counterparty_node_id from the
7145
- // monitor event, this and the id_to_peer map should be removed.
7146
- let id_to_peer = self.id_to_peer .lock().unwrap();
7147
- id_to_peer .get(&funding_outpoint.to_channel_id() ).cloned()
7144
+ // monitor event, this and the outpoint_to_peer map should be removed.
7145
+ let outpoint_to_peer = self.outpoint_to_peer .lock().unwrap();
7146
+ outpoint_to_peer .get(&funding_outpoint).cloned()
7148
7147
}
7149
7148
};
7150
7149
if let Some(counterparty_node_id) = counterparty_node_id_opt {
@@ -10081,7 +10080,7 @@ where
10081
10080
let channel_count: u64 = Readable::read(reader)?;
10082
10081
let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
10083
10082
let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
10084
- let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
10083
+ let mut outpoint_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
10085
10084
let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
10086
10085
let mut channel_closures = VecDeque::new();
10087
10086
let mut close_background_events = Vec::new();
@@ -10159,8 +10158,8 @@ where
10159
10158
if let Some(short_channel_id) = channel.context.get_short_channel_id() {
10160
10159
short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
10161
10160
}
10162
- if channel.context.is_funding_broadcast () {
10163
- id_to_peer .insert(channel.context.channel_id() , channel.context.get_counterparty_node_id());
10161
+ if let Some(funding_txo) = channel.context.get_funding_txo () {
10162
+ outpoint_to_peer .insert(funding_txo , channel.context.get_counterparty_node_id());
10164
10163
}
10165
10164
match funded_peer_channels.entry(channel.context.get_counterparty_node_id()) {
10166
10165
hash_map::Entry::Occupied(mut entry) => {
@@ -10494,7 +10493,7 @@ where
10494
10493
// We only rebuild the pending payments map if we were most recently serialized by
10495
10494
// 0.0.102+
10496
10495
for (_, monitor) in args.channel_monitors.iter() {
10497
- let counterparty_opt = id_to_peer .get(&monitor.get_funding_txo().0.to_channel_id() );
10496
+ let counterparty_opt = outpoint_to_peer .get(&monitor.get_funding_txo().0);
10498
10497
if counterparty_opt.is_none() {
10499
10498
let logger = WithChannelMonitor::from(&args.logger, monitor);
10500
10499
for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
@@ -10787,7 +10786,7 @@ where
10787
10786
// without the new monitor persisted - we'll end up right back here on
10788
10787
// restart.
10789
10788
let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id();
10790
- if let Some(peer_node_id) = id_to_peer .get(&previous_channel_id) {
10789
+ if let Some(peer_node_id) = outpoint_to_peer .get(&claimable_htlc.prev_hop.outpoint) {
10791
10790
let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap();
10792
10791
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10793
10792
let peer_state = &mut *peer_state_lock;
@@ -10865,7 +10864,7 @@ where
10865
10864
forward_htlcs: Mutex::new(forward_htlcs),
10866
10865
claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }),
10867
10866
outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
10868
- id_to_peer : Mutex::new(id_to_peer ),
10867
+ outpoint_to_peer : Mutex::new(outpoint_to_peer ),
10869
10868
short_to_chan_info: FairRwLock::new(short_to_chan_info),
10870
10869
fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(),
10871
10870
@@ -11482,8 +11481,8 @@ mod tests {
11482
11481
}
11483
11482
11484
11483
#[test]
11485
- fn test_id_to_peer_coverage () {
11486
- // Test that the `ChannelManager:id_to_peer ` contains channels which have been assigned
11484
+ fn test_outpoint_to_peer_coverage () {
11485
+ // Test that the `ChannelManager:outpoint_to_peer ` contains channels which have been assigned
11487
11486
// a `channel_id` (i.e. have had the funding tx created), and that they are removed once
11488
11487
// the channel is successfully closed.
11489
11488
let chanmon_cfgs = create_chanmon_cfgs(2);
@@ -11497,42 +11496,42 @@ mod tests {
11497
11496
let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
11498
11497
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
11499
11498
11500
- let (temporary_channel_id, tx, _funding_output ) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
11499
+ let (temporary_channel_id, tx, funding_output ) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
11501
11500
let channel_id = ChannelId::from_bytes(tx.txid().to_byte_array());
11502
11501
{
11503
- // Ensure that the `id_to_peer ` map is empty until either party has received the
11502
+ // Ensure that the `outpoint_to_peer ` map is empty until either party has received the
11504
11503
// funding transaction, and have the real `channel_id`.
11505
- assert_eq!(nodes[0].node.id_to_peer .lock().unwrap().len(), 0);
11506
- assert_eq!(nodes[1].node.id_to_peer .lock().unwrap().len(), 0);
11504
+ assert_eq!(nodes[0].node.outpoint_to_peer .lock().unwrap().len(), 0);
11505
+ assert_eq!(nodes[1].node.outpoint_to_peer .lock().unwrap().len(), 0);
11507
11506
}
11508
11507
11509
11508
nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
11510
11509
{
11511
- // Assert that `nodes[0]`'s `id_to_peer ` map is populated with the channel as soon as
11510
+ // Assert that `nodes[0]`'s `outpoint_to_peer ` map is populated with the channel as soon as
11512
11511
// as it has the funding transaction.
11513
- let nodes_0_lock = nodes[0].node.id_to_peer .lock().unwrap();
11512
+ let nodes_0_lock = nodes[0].node.outpoint_to_peer .lock().unwrap();
11514
11513
assert_eq!(nodes_0_lock.len(), 1);
11515
- assert!(nodes_0_lock.contains_key(&channel_id ));
11514
+ assert!(nodes_0_lock.contains_key(&funding_output ));
11516
11515
}
11517
11516
11518
- assert_eq!(nodes[1].node.id_to_peer .lock().unwrap().len(), 0);
11517
+ assert_eq!(nodes[1].node.outpoint_to_peer .lock().unwrap().len(), 0);
11519
11518
11520
11519
let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
11521
11520
11522
11521
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
11523
11522
{
11524
- let nodes_0_lock = nodes[0].node.id_to_peer .lock().unwrap();
11523
+ let nodes_0_lock = nodes[0].node.outpoint_to_peer .lock().unwrap();
11525
11524
assert_eq!(nodes_0_lock.len(), 1);
11526
- assert!(nodes_0_lock.contains_key(&channel_id ));
11525
+ assert!(nodes_0_lock.contains_key(&funding_output ));
11527
11526
}
11528
11527
expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
11529
11528
11530
11529
{
11531
- // Assert that `nodes[1]`'s `id_to_peer ` map is populated with the channel as soon as
11532
- // as it has the funding transaction.
11533
- let nodes_1_lock = nodes[1].node.id_to_peer .lock().unwrap();
11530
+ // Assert that `nodes[1]`'s `outpoint_to_peer ` map is populated with the channel as
11531
+ // soon as it has the funding transaction.
11532
+ let nodes_1_lock = nodes[1].node.outpoint_to_peer .lock().unwrap();
11534
11533
assert_eq!(nodes_1_lock.len(), 1);
11535
- assert!(nodes_1_lock.contains_key(&channel_id ));
11534
+ assert!(nodes_1_lock.contains_key(&funding_output ));
11536
11535
}
11537
11536
check_added_monitors!(nodes[1], 1);
11538
11537
let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
@@ -11551,48 +11550,48 @@ mod tests {
11551
11550
let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
11552
11551
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0);
11553
11552
{
11554
- // Assert that the channel is kept in the `id_to_peer ` map for both nodes until the
11553
+ // Assert that the channel is kept in the `outpoint_to_peer ` map for both nodes until the
11555
11554
// channel can be fully closed by both parties (i.e. no outstanding htlcs exists, the
11556
11555
// fee for the closing transaction has been negotiated and the parties has the other
11557
11556
// party's signature for the fee negotiated closing transaction.)
11558
- let nodes_0_lock = nodes[0].node.id_to_peer .lock().unwrap();
11557
+ let nodes_0_lock = nodes[0].node.outpoint_to_peer .lock().unwrap();
11559
11558
assert_eq!(nodes_0_lock.len(), 1);
11560
- assert!(nodes_0_lock.contains_key(&channel_id ));
11559
+ assert!(nodes_0_lock.contains_key(&funding_output ));
11561
11560
}
11562
11561
11563
11562
{
11564
11563
// At this stage, `nodes[1]` has proposed a fee for the closing transaction in the
11565
11564
// `handle_closing_signed` call above. As `nodes[1]` has not yet received the signature
11566
11565
// from `nodes[0]` for the closing transaction with the proposed fee, the channel is
11567
- // kept in the `nodes[1]`'s `id_to_peer ` map.
11568
- let nodes_1_lock = nodes[1].node.id_to_peer .lock().unwrap();
11566
+ // kept in the `nodes[1]`'s `outpoint_to_peer ` map.
11567
+ let nodes_1_lock = nodes[1].node.outpoint_to_peer .lock().unwrap();
11569
11568
assert_eq!(nodes_1_lock.len(), 1);
11570
- assert!(nodes_1_lock.contains_key(&channel_id ));
11569
+ assert!(nodes_1_lock.contains_key(&funding_output ));
11571
11570
}
11572
11571
11573
11572
nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
11574
11573
{
11575
11574
// `nodes[0]` accepts `nodes[1]`'s proposed fee for the closing transaction, and
11576
11575
// therefore has all it needs to fully close the channel (both signatures for the
11577
11576
// closing transaction).
11578
- // Assert that the channel is removed from `nodes[0]`'s `id_to_peer ` map as it can be
11577
+ // Assert that the channel is removed from `nodes[0]`'s `outpoint_to_peer ` map as it can be
11579
11578
// fully closed by `nodes[0]`.
11580
- assert_eq!(nodes[0].node.id_to_peer .lock().unwrap().len(), 0);
11579
+ assert_eq!(nodes[0].node.outpoint_to_peer .lock().unwrap().len(), 0);
11581
11580
11582
- // Assert that the channel is still in `nodes[1]`'s `id_to_peer ` map, as `nodes[1]`
11581
+ // Assert that the channel is still in `nodes[1]`'s `outpoint_to_peer ` map, as `nodes[1]`
11583
11582
// doesn't have `nodes[0]`'s signature for the closing transaction yet.
11584
- let nodes_1_lock = nodes[1].node.id_to_peer .lock().unwrap();
11583
+ let nodes_1_lock = nodes[1].node.outpoint_to_peer .lock().unwrap();
11585
11584
assert_eq!(nodes_1_lock.len(), 1);
11586
- assert!(nodes_1_lock.contains_key(&channel_id ));
11585
+ assert!(nodes_1_lock.contains_key(&funding_output ));
11587
11586
}
11588
11587
11589
11588
let (_nodes_0_update, closing_signed_node_0) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
11590
11589
11591
11590
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0.unwrap());
11592
11591
{
11593
- // Assert that the channel has now been removed from both parties `id_to_peer ` map once
11592
+ // Assert that the channel has now been removed from both parties `outpoint_to_peer ` map once
11594
11593
// they both have everything required to fully close the channel.
11595
- assert_eq!(nodes[1].node.id_to_peer .lock().unwrap().len(), 0);
11594
+ assert_eq!(nodes[1].node.outpoint_to_peer .lock().unwrap().len(), 0);
11596
11595
}
11597
11596
let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
11598
11597
0 commit comments