Skip to content

Commit 96fc0f3

Browse files
committed
Drop PeerHolder as it now only has one field
1 parent eb17464 commit 96fc0f3

File tree

1 file changed

+36
-38
lines changed

1 file changed

+36
-38
lines changed

lightning/src/ln/peer_handler.rs

Lines changed: 36 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -376,14 +376,6 @@ impl Peer {
376376
}
377377
}
378378

379-
struct PeerHolder<Descriptor: SocketDescriptor> {
380-
/// Peer is under its own mutex for sending and receiving bytes, but note that we do *not* hold
381-
/// this mutex while we're processing a message. This is fine as [`PeerManager::read_event`]
382-
/// requires that there be no parallel calls for a given peer, so mutual exclusion of messages
383-
/// handed to the `MessageHandler`s for a given peer is already guaranteed.
384-
peers: HashMap<Descriptor, Mutex<Peer>>,
385-
}
386-
387379
/// SimpleArcPeerManager is useful when you need a PeerManager with a static lifetime, e.g.
388380
/// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static
389381
/// lifetimes). Other times you can afford a reference, which is more efficient, in which case
@@ -428,7 +420,15 @@ pub struct PeerManager<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: De
428420
L::Target: Logger,
429421
CMH::Target: CustomMessageHandler {
430422
message_handler: MessageHandler<CM, RM>,
431-
peers: FairRwLock<PeerHolder<Descriptor>>,
423+
/// Connection state for each connected peer - we have an outer read-write lock which is taken
424+
/// as read while we're doing processing for a peer and taken write when a peer is being added
425+
/// or removed.
426+
///
427+
/// The inner Peer lock is held for sending and receiving bytes, but note that we do *not* hold
428+
/// it while we're processing a message. This is fine as [`PeerManager::read_event`] requires
429+
/// that there be no parallel calls for a given peer, so mutual exclusion of messages handed to
430+
/// the `MessageHandler`s for a given peer is already guaranteed.
431+
peers: FairRwLock<HashMap<Descriptor, Mutex<Peer>>>,
432432
/// Only add to this set when noise completes.
433433
/// Locked *after* peers. When an item is removed, it must be removed with the `peers` write
434434
/// lock held. Entries may be added with only the `peers` read lock held (though the
@@ -570,9 +570,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
570570

571571
PeerManager {
572572
message_handler,
573-
peers: FairRwLock::new(PeerHolder {
574-
peers: HashMap::new(),
575-
}),
573+
peers: FairRwLock::new(HashMap::new()),
576574
node_id_to_descriptor: Mutex::new(HashMap::new()),
577575
event_processing_lock: Mutex::new(()),
578576
blocked_event_processors: AtomicBool::new(false),
@@ -591,7 +589,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
591589
/// completed and we are sure the remote peer has the private key for the given node_id.
592590
pub fn get_peer_node_ids(&self) -> Vec<PublicKey> {
593591
let peers = self.peers.read().unwrap();
594-
peers.peers.values().filter_map(|peer_mutex| {
592+
peers.values().filter_map(|peer_mutex| {
595593
let p = peer_mutex.lock().unwrap();
596594
if !p.channel_encryptor.is_ready_for_encryption() || p.their_features.is_none() {
597595
return None;
@@ -629,7 +627,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
629627
let pending_read_buffer = [0; 50].to_vec(); // Noise act two is 50 bytes
630628

631629
let mut peers = self.peers.write().unwrap();
632-
if peers.peers.insert(descriptor, Mutex::new(Peer {
630+
if peers.insert(descriptor, Mutex::new(Peer {
633631
channel_encryptor: peer_encryptor,
634632
their_node_id: None,
635633
their_features: None,
@@ -676,7 +674,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
676674
let pending_read_buffer = [0; 50].to_vec(); // Noise act one is 50 bytes
677675

678676
let mut peers = self.peers.write().unwrap();
679-
if peers.peers.insert(descriptor, Mutex::new(Peer {
677+
if peers.insert(descriptor, Mutex::new(Peer {
680678
channel_encryptor: peer_encryptor,
681679
their_node_id: None,
682680
their_features: None,
@@ -787,7 +785,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
787785
/// [`write_buffer_space_avail`]: PeerManager::write_buffer_space_avail
788786
pub fn write_buffer_space_avail(&self, descriptor: &mut Descriptor) -> Result<(), PeerHandleError> {
789787
let peers = self.peers.read().unwrap();
790-
match peers.peers.get(descriptor) {
788+
match peers.get(descriptor) {
791789
None => {
792790
// This is most likely a simple race condition where the user found that the socket
793791
// was writeable, then we told the user to `disconnect_socket()`, then they called
@@ -852,7 +850,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
852850
let peers = self.peers.read().unwrap();
853851
let mut msgs_to_forward = Vec::new();
854852
let mut peer_node_id = None;
855-
match peers.peers.get(peer_descriptor) {
853+
match peers.get(peer_descriptor) {
856854
None => {
857855
// This is most likely a simple race condition where the user read some bytes
858856
// from the socket, then we told the user to `disconnect_socket()`, then they
@@ -1288,13 +1286,13 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
12881286
Ok(should_forward)
12891287
}
12901288

1291-
fn forward_broadcast_msg(&self, peers: &PeerHolder<Descriptor>, msg: &wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>, except_node: Option<&PublicKey>) {
1289+
fn forward_broadcast_msg(&self, peers: &HashMap<Descriptor, Mutex<Peer>>, msg: &wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>, except_node: Option<&PublicKey>) {
12921290
match msg {
12931291
wire::Message::ChannelAnnouncement(ref msg) => {
12941292
log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced channel's counterparties: {:?}", except_node, msg);
12951293
let encoded_msg = encode_msg!(msg);
12961294

1297-
for (_, peer_mutex) in peers.peers.iter() {
1295+
for (_, peer_mutex) in peers.iter() {
12981296
let mut peer = peer_mutex.lock().unwrap();
12991297
if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_features.is_none() ||
13001298
!peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
@@ -1320,7 +1318,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
13201318
log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced node: {:?}", except_node, msg);
13211319
let encoded_msg = encode_msg!(msg);
13221320

1323-
for (_, peer_mutex) in peers.peers.iter() {
1321+
for (_, peer_mutex) in peers.iter() {
13241322
let mut peer = peer_mutex.lock().unwrap();
13251323
if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_features.is_none() ||
13261324
!peer.should_forward_node_announcement(msg.contents.node_id) {
@@ -1345,7 +1343,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
13451343
log_gossip!(self.logger, "Sending message to all peers except {:?}: {:?}", except_node, msg);
13461344
let encoded_msg = encode_msg!(msg);
13471345

1348-
for (_, peer_mutex) in peers.peers.iter() {
1346+
for (_, peer_mutex) in peers.iter() {
13491347
let mut peer = peer_mutex.lock().unwrap();
13501348
if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_features.is_none() ||
13511349
!peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
@@ -1426,7 +1424,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
14261424
}
14271425
let descriptor_opt = self.node_id_to_descriptor.lock().unwrap().get($node_id).cloned();
14281426
match descriptor_opt {
1429-
Some(descriptor) => match peers.peers.get(&descriptor) {
1427+
Some(descriptor) => match peers.get(&descriptor) {
14301428
Some(peer_mutex) => {
14311429
let peer_lock = peer_mutex.lock().unwrap();
14321430
if peer_lock.their_features.is_none() {
@@ -1625,7 +1623,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
16251623
self.enqueue_message(&mut *get_peer_for_forwarding!(&node_id), &msg);
16261624
}
16271625

1628-
for (descriptor, peer_mutex) in peers.peers.iter() {
1626+
for (descriptor, peer_mutex) in peers.iter() {
16291627
self.do_attempt_write_data(&mut (*descriptor).clone(), &mut *peer_mutex.lock().unwrap());
16301628
}
16311629
}
@@ -1639,7 +1637,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
16391637
// lock).
16401638

16411639
if let Some(mut descriptor) = self.node_id_to_descriptor.lock().unwrap().remove(&node_id) {
1642-
if let Some(peer_mutex) = peers.peers.remove(&descriptor) {
1640+
if let Some(peer_mutex) = peers.remove(&descriptor) {
16431641
if let Some(msg) = msg {
16441642
log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
16451643
log_pubkey!(node_id),
@@ -1667,7 +1665,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
16671665

16681666
fn disconnect_event_internal(&self, descriptor: &Descriptor, no_connection_possible: bool) {
16691667
let mut peers = self.peers.write().unwrap();
1670-
let peer_option = peers.peers.remove(descriptor);
1668+
let peer_option = peers.remove(descriptor);
16711669
match peer_option {
16721670
None => {
16731671
// This is most likely a simple race condition where the user found that the socket
@@ -1703,7 +1701,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
17031701
let mut peers_lock = self.peers.write().unwrap();
17041702
if let Some(mut descriptor) = self.node_id_to_descriptor.lock().unwrap().remove(&node_id) {
17051703
log_trace!(self.logger, "Disconnecting peer with id {} due to client request", node_id);
1706-
peers_lock.peers.remove(&descriptor);
1704+
peers_lock.remove(&descriptor);
17071705
self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
17081706
descriptor.disconnect_socket();
17091707
}
@@ -1716,7 +1714,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
17161714
let mut peers_lock = self.peers.write().unwrap();
17171715
self.node_id_to_descriptor.lock().unwrap().clear();
17181716
let peers = &mut *peers_lock;
1719-
for (mut descriptor, peer) in peers.peers.drain() {
1717+
for (mut descriptor, peer) in peers.drain() {
17201718
if let Some(node_id) = peer.lock().unwrap().their_node_id {
17211719
log_trace!(self.logger, "Disconnecting peer with id {} due to client request to disconnect all peers", node_id);
17221720
self.message_handler.chan_handler.peer_disconnected(&node_id, false);
@@ -1755,7 +1753,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
17551753
{
17561754
let peers_lock = self.peers.read().unwrap();
17571755

1758-
for (descriptor, peer_mutex) in peers_lock.peers.iter() {
1756+
for (descriptor, peer_mutex) in peers_lock.iter() {
17591757
let mut peer = peer_mutex.lock().unwrap();
17601758
if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_node_id.is_none() {
17611759
// The peer needs to complete its handshake before we can exchange messages. We
@@ -1779,7 +1777,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
17791777

17801778
if (peer.awaiting_pong_timer_tick_intervals > 0 && !peer.received_message_since_timer_tick)
17811779
|| peer.awaiting_pong_timer_tick_intervals as u64 >
1782-
MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER as u64 * peers_lock.peers.len() as u64
1780+
MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER as u64 * peers_lock.len() as u64
17831781
{
17841782
descriptors_needing_disconnect.push(descriptor.clone());
17851783
continue;
@@ -1805,7 +1803,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
18051803
{
18061804
let mut peers_lock = self.peers.write().unwrap();
18071805
for descriptor in descriptors_needing_disconnect.iter() {
1808-
if let Some(peer) = peers_lock.peers.remove(&descriptor) {
1806+
if let Some(peer) = peers_lock.remove(descriptor) {
18091807
if let Some(node_id) = peer.lock().unwrap().their_node_id {
18101808
log_trace!(self.logger, "Disconnecting peer with id {} due to ping timeout", node_id);
18111809
self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
@@ -1935,7 +1933,7 @@ mod tests {
19351933
let chan_handler = test_utils::TestChannelMessageHandler::new();
19361934
let mut peers = create_network(2, &cfgs);
19371935
establish_connection(&peers[0], &peers[1]);
1938-
assert_eq!(peers[0].peers.read().unwrap().peers.len(), 1);
1936+
assert_eq!(peers[0].peers.read().unwrap().len(), 1);
19391937

19401938
let secp_ctx = Secp256k1::new();
19411939
let their_id = PublicKey::from_secret_key(&secp_ctx, &peers[1].our_node_secret);
@@ -1948,7 +1946,7 @@ mod tests {
19481946
peers[0].message_handler.chan_handler = &chan_handler;
19491947

19501948
peers[0].process_events();
1951-
assert_eq!(peers[0].peers.read().unwrap().peers.len(), 0);
1949+
assert_eq!(peers[0].peers.read().unwrap().len(), 0);
19521950
}
19531951

19541952
#[test]
@@ -1957,17 +1955,17 @@ mod tests {
19571955
let cfgs = create_peermgr_cfgs(2);
19581956
let peers = create_network(2, &cfgs);
19591957
establish_connection(&peers[0], &peers[1]);
1960-
assert_eq!(peers[0].peers.read().unwrap().peers.len(), 1);
1958+
assert_eq!(peers[0].peers.read().unwrap().len(), 1);
19611959

19621960
// peers[0] awaiting_pong is set to true, but the Peer is still connected
19631961
peers[0].timer_tick_occurred();
19641962
peers[0].process_events();
1965-
assert_eq!(peers[0].peers.read().unwrap().peers.len(), 1);
1963+
assert_eq!(peers[0].peers.read().unwrap().len(), 1);
19661964

19671965
// Since timer_tick_occurred() is called again when awaiting_pong is true, all Peers are disconnected
19681966
peers[0].timer_tick_occurred();
19691967
peers[0].process_events();
1970-
assert_eq!(peers[0].peers.read().unwrap().peers.len(), 0);
1968+
assert_eq!(peers[0].peers.read().unwrap().len(), 0);
19711969
}
19721970

19731971
#[test]
@@ -2029,9 +2027,9 @@ mod tests {
20292027
peers[0].new_inbound_connection(fd_a.clone(), None).unwrap();
20302028

20312029
// If we get a single timer tick before completion, that's fine
2032-
assert_eq!(peers[0].peers.read().unwrap().peers.len(), 1);
2030+
assert_eq!(peers[0].peers.read().unwrap().len(), 1);
20332031
peers[0].timer_tick_occurred();
2034-
assert_eq!(peers[0].peers.read().unwrap().peers.len(), 1);
2032+
assert_eq!(peers[0].peers.read().unwrap().len(), 1);
20352033

20362034
assert_eq!(peers[0].read_event(&mut fd_a, &initial_data).unwrap(), false);
20372035
peers[0].process_events();
@@ -2040,7 +2038,7 @@ mod tests {
20402038

20412039
// ...but if we get a second timer tick, we should disconnect the peer
20422040
peers[0].timer_tick_occurred();
2043-
assert_eq!(peers[0].peers.read().unwrap().peers.len(), 0);
2041+
assert_eq!(peers[0].peers.read().unwrap().len(), 0);
20442042

20452043
assert!(peers[0].read_event(&mut fd_a, &fd_b.outbound_data.lock().unwrap().split_off(0)).is_err());
20462044
}

0 commit comments

Comments
 (0)