From 439f535e60a4e6620896603a4e492cbf38e60b06 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 12 Jun 2025 19:52:37 +0200 Subject: [PATCH 1/2] Rustfmt chainmonitor.rs --- lightning/src/chain/chainmonitor.rs | 554 ++++++++++++++++++++-------- 1 file changed, 407 insertions(+), 147 deletions(-) diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index a740fa3dbcb..60f1c9863a7 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -126,8 +126,9 @@ pub trait Persist { /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager /// [`Writeable::write`]: crate::util::ser::Writeable::write - #[rustfmt::skip] - fn persist_new_channel(&self, monitor_name: MonitorName, monitor: &ChannelMonitor) -> ChannelMonitorUpdateStatus; + fn persist_new_channel( + &self, monitor_name: MonitorName, monitor: &ChannelMonitor, + ) -> ChannelMonitorUpdateStatus; /// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given /// update. @@ -166,8 +167,10 @@ pub trait Persist { /// [`ChannelMonitorUpdateStatus`] for requirements when returning errors. /// /// [`Writeable::write`]: crate::util::ser::Writeable::write - #[rustfmt::skip] - fn update_persisted_channel(&self, monitor_name: MonitorName, monitor_update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor) -> ChannelMonitorUpdateStatus; + fn update_persisted_channel( + &self, monitor_name: MonitorName, monitor_update: Option<&ChannelMonitorUpdate>, + monitor: &ChannelMonitor, + ) -> ChannelMonitorUpdateStatus; /// Prevents the channel monitor from being loaded on startup. /// /// Archiving the data in a backup location (rather than deleting it fully) is useful for @@ -306,10 +309,10 @@ where /// updated `txdata`. /// /// Calls which represent a new blockchain tip height should set `best_height`. - #[rustfmt::skip] - fn process_chain_data(&self, header: &Header, best_height: Option, txdata: &TransactionData, process: FN) - where - FN: Fn(&ChannelMonitor, &TransactionData) -> Vec + fn process_chain_data( + &self, header: &Header, best_height: Option, txdata: &TransactionData, process: FN, + ) where + FN: Fn(&ChannelMonitor, &TransactionData) -> Vec, { let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; let channel_ids = hash_set_from_iter(self.monitors.read().unwrap().keys().cloned()); @@ -317,7 +320,18 @@ where for channel_id in channel_ids.iter() { let monitor_lock = self.monitors.read().unwrap(); if let Some(monitor_state) = monitor_lock.get(channel_id) { - if self.update_monitor_with_chain_data(header, best_height, txdata, &process, channel_id, &monitor_state, channel_count).is_err() { + if self + .update_monitor_with_chain_data( + header, + best_height, + txdata, + &process, + channel_id, + &monitor_state, + channel_count, + ) + .is_err() + { // Take the monitors lock for writing so that we poison it and any future // operations going forward fail immediately. core::mem::drop(monitor_lock); @@ -332,7 +346,18 @@ where let monitor_states = self.monitors.write().unwrap(); for (channel_id, monitor_state) in monitor_states.iter() { if !channel_ids.contains(channel_id) { - if self.update_monitor_with_chain_data(header, best_height, txdata, &process, channel_id, &monitor_state, channel_count).is_err() { + if self + .update_monitor_with_chain_data( + header, + best_height, + txdata, + &process, + channel_id, + &monitor_state, + channel_count, + ) + .is_err() + { log_error!(self.logger, "{}", err_str); panic!("{}", err_str); } @@ -350,11 +375,13 @@ where } } - #[rustfmt::skip] fn update_monitor_with_chain_data( - &self, header: &Header, best_height: Option, txdata: &TransactionData, process: FN, channel_id: &ChannelId, - monitor_state: &MonitorHolder, channel_count: usize, - ) -> Result<(), ()> where FN: Fn(&ChannelMonitor, &TransactionData) -> Vec { + &self, header: &Header, best_height: Option, txdata: &TransactionData, process: FN, + channel_id: &ChannelId, monitor_state: &MonitorHolder, channel_count: usize, + ) -> Result<(), ()> + where + FN: Fn(&ChannelMonitor, &TransactionData) -> Vec, + { let monitor = &monitor_state.monitor; let logger = WithChannelMonitor::from(&self.logger, &monitor, None); @@ -362,7 +389,12 @@ where let get_partition_key = |channel_id: &ChannelId| { let channel_id_bytes = channel_id.0; - let channel_id_u32 = u32::from_be_bytes([channel_id_bytes[0], channel_id_bytes[1], channel_id_bytes[2], channel_id_bytes[3]]); + let channel_id_u32 = u32::from_be_bytes([ + channel_id_bytes[0], + channel_id_bytes[1], + channel_id_bytes[2], + channel_id_bytes[3], + ]); channel_id_u32.wrapping_add(best_height.unwrap_or_default()) }; @@ -374,23 +406,33 @@ where let has_pending_claims = monitor_state.monitor.has_pending_claims(); if has_pending_claims || get_partition_key(channel_id) % partition_factor == 0 { - log_trace!(logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor)); + log_trace!( + logger, + "Syncing Channel Monitor for channel {}", + log_funding_info!(monitor) + ); // Even though we don't track monitor updates from chain-sync as pending, we still want // updates per-channel to be well-ordered so that users don't see a // `ChannelMonitorUpdate` after a channel persist for a channel with the same // `latest_update_id`. let _pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap(); - match self.persister.update_persisted_channel(monitor.persistence_key(), None, monitor) { - ChannelMonitorUpdateStatus::Completed => - log_trace!(logger, "Finished syncing Channel Monitor for channel {} for block-data", - log_funding_info!(monitor) - ), + match self.persister.update_persisted_channel(monitor.persistence_key(), None, monitor) + { + ChannelMonitorUpdateStatus::Completed => log_trace!( + logger, + "Finished syncing Channel Monitor for channel {} for block-data", + log_funding_info!(monitor) + ), ChannelMonitorUpdateStatus::InProgress => { - log_trace!(logger, "Channel Monitor sync for channel {} in progress.", log_funding_info!(monitor)); - } + log_trace!( + logger, + "Channel Monitor sync for channel {} in progress.", + log_funding_info!(monitor) + ); + }, ChannelMonitorUpdateStatus::UnrecoverableError => { return Err(()); - } + }, } } @@ -406,7 +448,11 @@ where outpoint: OutPoint { txid, index: idx as u16 }, script_pubkey: output.script_pubkey, }; - log_trace!(logger, "Adding monitoring for spends of outpoint {} to the filter", output.outpoint); + log_trace!( + logger, + "Adding monitoring for spends of outpoint {} to the filter", + output.outpoint + ); chain_source.register_output(output); } } @@ -433,8 +479,10 @@ where /// [`NodeSigner`]: crate::sign::NodeSigner /// [`NodeSigner::get_peer_storage_key`]: crate::sign::NodeSigner::get_peer_storage_key /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager - #[rustfmt::skip] - pub fn new(chain_source: Option, broadcaster: T, logger: L, feeest: F, persister: P, entropy_source: ES, our_peerstorage_encryption_key: PeerStorageKey) -> Self { + pub fn new( + chain_source: Option, broadcaster: T, logger: L, feeest: F, persister: P, + entropy_source: ES, our_peerstorage_encryption_key: PeerStorageKey, + ) -> Self { Self { monitors: RwLock::new(new_hash_map()), chain_source, @@ -447,7 +495,7 @@ where highest_chain_height: AtomicUsize::new(0), event_notifier: Notifier::new(), pending_send_only_events: Mutex::new(Vec::new()), - our_peerstorage_encryption_key + our_peerstorage_encryption_key, } } @@ -515,11 +563,15 @@ where /// Each `Vec` contains `update_id`s from [`ChannelMonitor::get_latest_update_id`] for updates /// that have not yet been fully persisted. Note that if a full monitor is persisted all the pending /// monitor updates must be individually marked completed by calling [`ChainMonitor::channel_monitor_updated`]. - #[rustfmt::skip] pub fn list_pending_monitor_updates(&self) -> Vec<(ChannelId, Vec)> { - self.monitors.read().unwrap().iter().map(|(channel_id, holder)| { - (*channel_id, holder.pending_monitor_updates.lock().unwrap().clone()) - }).collect() + self.monitors + .read() + .unwrap() + .iter() + .map(|(channel_id, holder)| { + (*channel_id, holder.pending_monitor_updates.lock().unwrap().clone()) + }) + .collect() } #[cfg(any(test, feature = "_test_utils"))] @@ -547,11 +599,16 @@ where /// /// Returns an [`APIError::APIMisuseError`] if `funding_txo` does not match any currently /// registered [`ChannelMonitor`]s. - #[rustfmt::skip] - pub fn channel_monitor_updated(&self, channel_id: ChannelId, completed_update_id: u64) -> Result<(), APIError> { + pub fn channel_monitor_updated( + &self, channel_id: ChannelId, completed_update_id: u64, + ) -> Result<(), APIError> { let monitors = self.monitors.read().unwrap(); - let monitor_data = if let Some(mon) = monitors.get(&channel_id) { mon } else { - return Err(APIError::APIMisuseError { err: format!("No ChannelMonitor matching channel ID {} found", channel_id) }); + let monitor_data = if let Some(mon) = monitors.get(&channel_id) { + mon + } else { + return Err(APIError::APIMisuseError { + err: format!("No ChannelMonitor matching channel ID {} found", channel_id), + }); }; let mut pending_monitor_updates = monitor_data.pending_monitor_updates.lock().unwrap(); pending_monitor_updates.retain(|update_id| *update_id != completed_update_id); @@ -559,25 +616,33 @@ where // Note that we only check for pending non-chainsync monitor updates and we don't track monitor // updates resulting from chainsync in `pending_monitor_updates`. let monitor_is_pending_updates = monitor_data.has_pending_updates(&pending_monitor_updates); - log_debug!(self.logger, "Completed off-chain monitor update {} for channel with channel ID {}, {}", + log_debug!( + self.logger, + "Completed off-chain monitor update {} for channel with channel ID {}, {}", completed_update_id, channel_id, if monitor_is_pending_updates { "still have pending off-chain updates" } else { "all off-chain updates complete, returning a MonitorEvent" - }); + } + ); if monitor_is_pending_updates { // If there are still monitor updates pending, we cannot yet construct a // Completed event. return Ok(()); } let funding_txo = monitor_data.monitor.get_funding_txo(); - self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed { + self.pending_monitor_events.lock().unwrap().push(( funding_txo, channel_id, - monitor_update_id: monitor_data.monitor.get_latest_update_id(), - }], monitor_data.monitor.get_counterparty_node_id())); + vec![MonitorEvent::Completed { + funding_txo, + channel_id, + monitor_update_id: monitor_data.monitor.get_latest_update_id(), + }], + monitor_data.monitor.get_counterparty_node_id(), + )); self.event_notifier.notify(); Ok(()) @@ -587,17 +652,17 @@ where /// chain::Watch API wherein we mark a monitor fully-updated by just calling /// channel_monitor_updated once with the highest ID. #[cfg(any(test, fuzzing))] - #[rustfmt::skip] pub fn force_channel_monitor_updated(&self, channel_id: ChannelId, monitor_update_id: u64) { let monitors = self.monitors.read().unwrap(); let monitor = &monitors.get(&channel_id).unwrap().monitor; let counterparty_node_id = monitor.get_counterparty_node_id(); let funding_txo = monitor.get_funding_txo(); - self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed { + self.pending_monitor_events.lock().unwrap().push(( funding_txo, channel_id, - monitor_update_id, - }], counterparty_node_id)); + vec![MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id }], + counterparty_node_id, + )); self.event_notifier.notify(); } @@ -616,9 +681,11 @@ where /// See the trait-level documentation of [`EventsProvider`] for requirements. /// /// [`EventsProvider`]: crate::events::EventsProvider - #[rustfmt::skip] - pub async fn process_pending_events_async>, H: Fn(Event) -> Future>( - &self, handler: H + pub async fn process_pending_events_async< + Future: core::future::Future>, + H: Fn(Event) -> Future, + >( + &self, handler: H, ) { // Sadly we can't hold the monitors read lock through an async call. Thus we have to do a // crazy dance to process a monitor's events then only remove them once we've done so. @@ -626,11 +693,15 @@ where for channel_id in mons_to_process { let mut ev; match super::channelmonitor::process_events_body!( - self.monitors.read().unwrap().get(&channel_id).map(|m| &m.monitor), self.logger, ev, handler(ev).await) { + self.monitors.read().unwrap().get(&channel_id).map(|m| &m.monitor), + self.logger, + ev, + handler(ev).await + ) { Ok(()) => {}, - Err(ReplayEvent ()) => { + Err(ReplayEvent()) => { self.event_notifier.notify(); - } + }, } } } @@ -652,12 +723,13 @@ where /// feerate changes between blocks, and ensuring reliability if broadcasting fails. We recommend /// invoking this every 30 seconds, or lower if running in an environment with spotty /// connections, like on mobile. - #[rustfmt::skip] pub fn rebroadcast_pending_claims(&self) { let monitors = self.monitors.read().unwrap(); for (_, monitor_holder) in &*monitors { monitor_holder.monitor.rebroadcast_pending_claims( - &*self.broadcaster, &*self.fee_estimator, &self.logger + &*self.broadcaster, + &*self.fee_estimator, + &self.logger, ) } } @@ -666,19 +738,22 @@ where /// signature generation failure. /// /// `monitor_opt` can be used as a filter to only trigger them for a specific channel monitor. - #[rustfmt::skip] pub fn signer_unblocked(&self, monitor_opt: Option) { let monitors = self.monitors.read().unwrap(); if let Some(channel_id) = monitor_opt { if let Some(monitor_holder) = monitors.get(&channel_id) { monitor_holder.monitor.signer_unblocked( - &*self.broadcaster, &*self.fee_estimator, &self.logger + &*self.broadcaster, + &*self.fee_estimator, + &self.logger, ) } } else { for (_, monitor_holder) in &*monitors { monitor_holder.monitor.signer_unblocked( - &*self.broadcaster, &*self.fee_estimator, &self.logger + &*self.broadcaster, + &*self.fee_estimator, + &self.logger, ) } } @@ -693,30 +768,37 @@ where /// /// Depending on the implementation of [`Persist::archive_persisted_channel`] the monitor /// data could be moved to an archive location or removed entirely. - #[rustfmt::skip] pub fn archive_fully_resolved_channel_monitors(&self) { let mut have_monitors_to_prune = false; for monitor_holder in self.monitors.read().unwrap().values() { let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor, None); - let (is_fully_resolved, needs_persistence) = monitor_holder.monitor.check_and_update_full_resolution_status(&logger); + let (is_fully_resolved, needs_persistence) = + monitor_holder.monitor.check_and_update_full_resolution_status(&logger); if is_fully_resolved { have_monitors_to_prune = true; } if needs_persistence { - self.persister.update_persisted_channel(monitor_holder.monitor.persistence_key(), None, &monitor_holder.monitor); + self.persister.update_persisted_channel( + monitor_holder.monitor.persistence_key(), + None, + &monitor_holder.monitor, + ); } } if have_monitors_to_prune { let mut monitors = self.monitors.write().unwrap(); monitors.retain(|channel_id, monitor_holder| { let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor, None); - let (is_fully_resolved, _) = monitor_holder.monitor.check_and_update_full_resolution_status(&logger); + let (is_fully_resolved, _) = + monitor_holder.monitor.check_and_update_full_resolution_status(&logger); if is_fully_resolved { - log_info!(logger, + log_info!( + logger, "Archiving fully resolved ChannelMonitor for channel ID {}", channel_id ); - self.persister.archive_persisted_channel(monitor_holder.monitor.persistence_key()); + self.persister + .archive_persisted_channel(monitor_holder.monitor.persistence_key()); false } else { true @@ -727,16 +809,11 @@ where /// This function collects the counterparty node IDs from all monitors into a `HashSet`, /// ensuring unique IDs are returned. - #[rustfmt::skip] fn all_counterparty_node_ids(&self) -> HashSet { let mon = self.monitors.read().unwrap(); - mon - .values() - .map(|monitor| monitor.monitor.get_counterparty_node_id()) - .collect() + mon.values().map(|monitor| monitor.monitor.get_counterparty_node_id()).collect() } - #[rustfmt::skip] fn send_peer_storage(&self, their_node_id: PublicKey) { // TODO: Serialize `ChannelMonitor`s inside `our_peer_storage`. @@ -747,7 +824,8 @@ where log_debug!(self.logger, "Sending Peer Storage to {}", log_pubkey!(their_node_id)); let send_peer_storage_event = MessageSendEvent::SendPeerStorage { - node_id: their_node_id, msg: msgs::PeerStorage { data: cipher.into_vec() } + node_id: their_node_id, + msg: msgs::PeerStorage { data: cipher.into_vec() }, }; self.pending_send_only_events.lock().unwrap().push(send_peer_storage_event) @@ -786,8 +864,11 @@ where InitFeatures::empty() } - #[rustfmt::skip] - fn peer_connected(&self, _their_node_id: PublicKey, _msg: &Init, _inbound: bool) -> Result<(), ()> { Ok(()) } + fn peer_connected( + &self, _their_node_id: PublicKey, _msg: &Init, _inbound: bool, + ) -> Result<(), ()> { + Ok(()) + } } impl< @@ -807,12 +888,22 @@ where P::Target: Persist, ES::Target: EntropySource, { - #[rustfmt::skip] fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) { - log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height); + log_debug!( + self.logger, + "New best block {} at height {} provided via block_connected", + header.block_hash(), + height + ); self.process_chain_data(header, Some(height), &txdata, |monitor, txdata| { monitor.block_connected( - header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &self.logger) + header, + txdata, + height, + &*self.broadcaster, + &*self.fee_estimator, + &self.logger, + ) }); // Send peer storage everytime a new block arrives. @@ -824,13 +915,22 @@ where self.event_notifier.notify(); } - #[rustfmt::skip] fn block_disconnected(&self, header: &Header, height: u32) { let monitor_states = self.monitors.read().unwrap(); - log_debug!(self.logger, "Latest block {} at height {} removed via block_disconnected", header.block_hash(), height); + log_debug!( + self.logger, + "Latest block {} at height {} removed via block_disconnected", + header.block_hash(), + height + ); for monitor_state in monitor_states.values() { monitor_state.monitor.block_disconnected( - header, height, &*self.broadcaster, &*self.fee_estimator, &self.logger); + header, + height, + &*self.broadcaster, + &*self.fee_estimator, + &self.logger, + ); } } } @@ -852,35 +952,58 @@ where P::Target: Persist, ES::Target: EntropySource, { - #[rustfmt::skip] fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) { - log_debug!(self.logger, "{} provided transactions confirmed at height {} in block {}", txdata.len(), height, header.block_hash()); + log_debug!( + self.logger, + "{} provided transactions confirmed at height {} in block {}", + txdata.len(), + height, + header.block_hash() + ); self.process_chain_data(header, None, txdata, |monitor, txdata| { monitor.transactions_confirmed( - header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &self.logger) + header, + txdata, + height, + &*self.broadcaster, + &*self.fee_estimator, + &self.logger, + ) }); // Assume we may have some new events and wake the event processor self.event_notifier.notify(); } - #[rustfmt::skip] fn transaction_unconfirmed(&self, txid: &Txid) { log_debug!(self.logger, "Transaction {} reorganized out of chain", txid); let monitor_states = self.monitors.read().unwrap(); for monitor_state in monitor_states.values() { - monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &self.logger); + monitor_state.monitor.transaction_unconfirmed( + txid, + &*self.broadcaster, + &*self.fee_estimator, + &self.logger, + ); } } - #[rustfmt::skip] fn best_block_updated(&self, header: &Header, height: u32) { - log_debug!(self.logger, "New best block {} at height {} provided via best_block_updated", header.block_hash(), height); + log_debug!( + self.logger, + "New best block {} at height {} provided via best_block_updated", + header.block_hash(), + height + ); self.process_chain_data(header, Some(height), &[], |monitor, txdata| { // While in practice there shouldn't be any recursive calls when given empty txdata, // it's still possible if a chain::Filter implementation returns a transaction. debug_assert!(txdata.is_empty()); monitor.best_block_updated( - header, height, &*self.broadcaster, &*self.fee_estimator, &self.logger + header, + height, + &*self.broadcaster, + &*self.fee_estimator, + &self.logger, ) }); @@ -923,8 +1046,9 @@ where P::Target: Persist, ES::Target: EntropySource, { - #[rustfmt::skip] - fn watch_channel(&self, channel_id: ChannelId, monitor: ChannelMonitor) -> Result { + fn watch_channel( + &self, channel_id: ChannelId, monitor: ChannelMonitor, + ) -> Result { let logger = WithChannelMonitor::from(&self.logger, &monitor, None); let mut monitors = self.monitors.write().unwrap(); let entry = match monitors.entry(channel_id) { @@ -940,11 +1064,19 @@ where let persist_res = self.persister.persist_new_channel(monitor.persistence_key(), &monitor); match persist_res { ChannelMonitorUpdateStatus::InProgress => { - log_info!(logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor)); + log_info!( + logger, + "Persistence of new ChannelMonitor for channel {} in progress", + log_funding_info!(monitor) + ); pending_monitor_updates.push(update_id); }, ChannelMonitorUpdateStatus::Completed => { - log_info!(logger, "Persistence of new ChannelMonitor for channel {} completed", log_funding_info!(monitor)); + log_info!( + logger, + "Persistence of new ChannelMonitor for channel {} completed", + log_funding_info!(monitor) + ); }, ChannelMonitorUpdateStatus::UnrecoverableError => { let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; @@ -953,7 +1085,7 @@ where }, } if let Some(ref chain_source) = self.chain_source { - monitor.load_outputs_to_watch(chain_source , &self.logger); + monitor.load_outputs_to_watch(chain_source, &self.logger); } entry.insert(MonitorHolder { monitor, @@ -962,8 +1094,9 @@ where Ok(persist_res) } - #[rustfmt::skip] - fn update_channel(&self, channel_id: ChannelId, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus { + fn update_channel( + &self, channel_id: ChannelId, update: &ChannelMonitorUpdate, + ) -> ChannelMonitorUpdateStatus { // `ChannelMonitorUpdate`'s `channel_id` is `None` prior to 0.0.121 and all channels in those // versions are V1-established. For 0.0.121+ the `channel_id` fields is always `Some`. debug_assert_eq!(update.channel_id.unwrap(), channel_id); @@ -985,13 +1118,24 @@ where Some(monitor_state) => { let monitor = &monitor_state.monitor; let logger = WithChannelMonitor::from(&self.logger, &monitor, None); - log_trace!(logger, "Updating ChannelMonitor to id {} for channel {}", update.update_id, log_funding_info!(monitor)); + log_trace!( + logger, + "Updating ChannelMonitor to id {} for channel {}", + update.update_id, + log_funding_info!(monitor) + ); // We hold a `pending_monitor_updates` lock through `update_monitor` to ensure we // have well-ordered updates from the users' point of view. See the // `pending_monitor_updates` docs for more. - let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap(); - let update_res = monitor.update_monitor(update, &self.broadcaster, &self.fee_estimator, &self.logger); + let mut pending_monitor_updates = + monitor_state.pending_monitor_updates.lock().unwrap(); + let update_res = monitor.update_monitor( + update, + &self.broadcaster, + &self.fee_estimator, + &self.logger, + ); let update_id = update.update_id; let persist_res = if update_res.is_err() { @@ -1001,9 +1145,17 @@ where // while reading `channel_monitor` with updates from storage. Instead, we should persist // the entire `channel_monitor` here. log_warn!(logger, "Failed to update ChannelMonitor for channel {}. Going ahead and persisting the entire ChannelMonitor", log_funding_info!(monitor)); - self.persister.update_persisted_channel(monitor.persistence_key(), None, monitor) + self.persister.update_persisted_channel( + monitor.persistence_key(), + None, + monitor, + ) } else { - self.persister.update_persisted_channel(monitor.persistence_key(), Some(update), monitor) + self.persister.update_persisted_channel( + monitor.persistence_key(), + Some(update), + monitor, + ) }; match persist_res { ChannelMonitorUpdateStatus::InProgress => { @@ -1015,7 +1167,8 @@ where ); }, ChannelMonitorUpdateStatus::Completed => { - log_debug!(logger, + log_debug!( + logger, "Persistence of ChannelMonitorUpdate id {:?} for channel {} completed", update_id, log_funding_info!(monitor) @@ -1037,12 +1190,13 @@ where } else { persist_res } - } + }, } } - #[rustfmt::skip] - fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec, PublicKey)> { + fn release_pending_monitor_events( + &self, + ) -> Vec<(OutPoint, ChannelId, Vec, PublicKey)> { let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0); for monitor_state in self.monitors.read().unwrap().values() { let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events(); @@ -1050,7 +1204,12 @@ where let monitor_funding_txo = monitor_state.monitor.get_funding_txo(); let monitor_channel_id = monitor_state.monitor.channel_id(); let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id(); - pending_monitor_events.push((monitor_funding_txo, monitor_channel_id, monitor_events, counterparty_node_id)); + pending_monitor_events.push(( + monitor_funding_txo, + monitor_channel_id, + monitor_events, + counterparty_node_id, + )); } } pending_monitor_events @@ -1087,14 +1246,16 @@ where /// /// [`SpendableOutputs`]: events::Event::SpendableOutputs /// [`BumpTransaction`]: events::Event::BumpTransaction - #[rustfmt::skip] - fn process_pending_events(&self, handler: H) where H::Target: EventHandler { + fn process_pending_events(&self, handler: H) + where + H::Target: EventHandler, + { for monitor_state in self.monitors.read().unwrap().values() { match monitor_state.monitor.process_pending_events(&handler, &self.logger) { Ok(()) => {}, - Err(ReplayEvent ()) => { + Err(ReplayEvent()) => { self.event_notifier.notify(); - } + }, } } } @@ -1114,7 +1275,6 @@ mod tests { const CHAINSYNC_MONITOR_PARTITION_FACTOR: u32 = 5; #[test] - #[rustfmt::skip] fn test_async_ooo_offchain_updates() { // Test that if we have multiple offchain updates being persisted and they complete // out-of-order, the ChainMonitor waits until all have completed before informing the @@ -1126,8 +1286,10 @@ mod tests { let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; // Route two payments to be claimed at the same time. - let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_1, payment_hash_1, ..) = + route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_2, payment_hash_2, ..) = + route_payment(&nodes[0], &[&nodes[1]], 1_000_000); chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clear(); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); @@ -1138,7 +1300,8 @@ mod tests { nodes[1].node.claim_funds(payment_preimage_2); check_added_monitors!(nodes[1], 1); - let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone(); + let persistences = + chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone(); assert_eq!(persistences.len(), 1); let (_, updates) = persistences.iter().next().unwrap(); assert_eq!(updates.len(), 2); @@ -1149,23 +1312,55 @@ mod tests { let next_update = update_iter.next().unwrap().clone(); // Should contain next_update when pending updates listed. #[cfg(not(c_bindings))] - assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(&channel_id) - .unwrap().contains(&next_update)); + assert!(nodes[1] + .chain_monitor + .chain_monitor + .list_pending_monitor_updates() + .get(&channel_id) + .unwrap() + .contains(&next_update)); #[cfg(c_bindings)] - assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().iter() - .find(|(chan_id, _)| *chan_id == channel_id).unwrap().1.contains(&next_update)); - nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(channel_id, next_update.clone()).unwrap(); + assert!(nodes[1] + .chain_monitor + .chain_monitor + .list_pending_monitor_updates() + .iter() + .find(|(chan_id, _)| *chan_id == channel_id) + .unwrap() + .1 + .contains(&next_update)); + nodes[1] + .chain_monitor + .chain_monitor + .channel_monitor_updated(channel_id, next_update.clone()) + .unwrap(); // Should not contain the previously pending next_update when pending updates listed. #[cfg(not(c_bindings))] - assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(&channel_id) - .unwrap().contains(&next_update)); + assert!(!nodes[1] + .chain_monitor + .chain_monitor + .list_pending_monitor_updates() + .get(&channel_id) + .unwrap() + .contains(&next_update)); #[cfg(c_bindings)] - assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().iter() - .find(|(chan_id, _)| *chan_id == channel_id).unwrap().1.contains(&next_update)); + assert!(!nodes[1] + .chain_monitor + .chain_monitor + .list_pending_monitor_updates() + .iter() + .find(|(chan_id, _)| *chan_id == channel_id) + .unwrap() + .1 + .contains(&next_update)); assert!(nodes[1].chain_monitor.release_pending_monitor_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(channel_id, update_iter.next().unwrap().clone()).unwrap(); + nodes[1] + .chain_monitor + .chain_monitor + .channel_monitor_updated(channel_id, update_iter.next().unwrap().clone()) + .unwrap(); let claim_events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(claim_events.len(), 2); @@ -1186,33 +1381,60 @@ mod tests { // back-to-back it doesn't fit into the neat walk commitment_signed_dance does. let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc( + nodes[1].node.get_our_node_id(), + &updates.update_fulfill_htlcs[0], + ); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &updates.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test( + nodes[1].node.get_our_node_id(), + &updates.commitment_signed, + ); check_added_monitors!(nodes[0], 1); - let (as_first_raa, as_first_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let (as_first_raa, as_first_update) = + get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); check_added_monitors!(nodes[1], 1); let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_first_update); + nodes[1] + .node + .handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_first_update); check_added_monitors!(nodes[1], 1); - let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]); + let bs_first_raa = get_event_msg!( + nodes[1], + MessageSendEvent::SendRevokeAndACK, + nodes[0].node.get_our_node_id() + ); + + nodes[0].node.handle_update_fulfill_htlc( + nodes[1].node.get_our_node_id(), + &bs_second_updates.update_fulfill_htlcs[0], + ); expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test( + nodes[1].node.get_our_node_id(), + &bs_second_updates.commitment_signed, + ); check_added_monitors!(nodes[0], 1); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); expect_payment_path_successful!(nodes[0]); check_added_monitors!(nodes[0], 1); - let (as_second_raa, as_second_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let (as_second_raa, as_second_update) = + get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_second_update); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[0].node.get_our_node_id(), + &as_second_update, + ); check_added_monitors!(nodes[1], 1); - let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_second_raa = get_event_msg!( + nodes[1], + MessageSendEvent::SendRevokeAndACK, + nodes[0].node.get_our_node_id() + ); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_raa); expect_payment_path_successful!(nodes[0]); @@ -1220,7 +1442,6 @@ mod tests { } #[test] - #[rustfmt::skip] fn test_chainsync_triggers_distributed_monitor_persistence() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); @@ -1234,7 +1455,8 @@ mod tests { *nodes[2].connect_style.borrow_mut() = ConnectStyle::FullBlockViaListen; let _channel_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; - let channel_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1_000_000, 0).2; + let channel_2 = + create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1_000_000, 0).2; chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear(); chanmon_cfgs[1].persister.chain_sync_monitor_persistences.lock().unwrap().clear(); @@ -1246,15 +1468,37 @@ mod tests { // Connecting [`DEFAULT_CHAINSYNC_PARTITION_FACTOR`] * 2 blocks should trigger only 2 writes // per monitor/channel. - assert_eq!(2 * 2, chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().len()); - assert_eq!(2, chanmon_cfgs[1].persister.chain_sync_monitor_persistences.lock().unwrap().len()); - assert_eq!(2, chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().len()); + assert_eq!( + 2 * 2, + chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().len() + ); + assert_eq!( + 2, + chanmon_cfgs[1].persister.chain_sync_monitor_persistences.lock().unwrap().len() + ); + assert_eq!( + 2, + chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().len() + ); // Test that monitors with pending_claims are persisted on every block. // Now, close channel_2 i.e. b/w node-0 and node-2 to create pending_claim in node[0]. - nodes[0].node.force_close_broadcasting_latest_txn(&channel_2, &nodes[2].node.get_our_node_id(), "Channel force-closed".to_string()).unwrap(); - check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, - [nodes[2].node.get_our_node_id()], 1000000); + nodes[0] + .node + .force_close_broadcasting_latest_txn( + &channel_2, + &nodes[2].node.get_our_node_id(), + "Channel force-closed".to_string(), + ) + .unwrap(); + check_closed_event!( + &nodes[0], + 1, + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, + false, + [nodes[2].node.get_our_node_id()], + 1000000 + ); check_closed_broadcast(&nodes[0], 1, true); let close_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(close_tx.len(), 1); @@ -1262,8 +1506,14 @@ mod tests { mine_transaction(&nodes[2], &close_tx[0]); check_added_monitors(&nodes[2], 1); check_closed_broadcast(&nodes[2], 1, true); - check_closed_event!(&nodes[2], 1, ClosureReason::CommitmentTxConfirmed, false, - [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event!( + &nodes[2], + 1, + ClosureReason::CommitmentTxConfirmed, + false, + [nodes[0].node.get_our_node_id()], + 1000000 + ); chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear(); chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().clear(); @@ -1277,9 +1527,15 @@ mod tests { // DEFAULT_CHAINSYNC_MONITOR_PARTITION_FACTOR writes for channel_2 due to pending_claim, 1 for // channel_1 - assert_eq!((CHAINSYNC_MONITOR_PARTITION_FACTOR + 1) as usize, chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().len()); + assert_eq!( + (CHAINSYNC_MONITOR_PARTITION_FACTOR + 1) as usize, + chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().len() + ); // For node[2], there is no pending_claim - assert_eq!(1, chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().len()); + assert_eq!( + 1, + chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().len() + ); // Confirm claim for node[0] with ANTI_REORG_DELAY and reset monitor write counter. mine_transaction(&nodes[0], &close_tx[0]); @@ -1290,12 +1546,14 @@ mod tests { // Again connect 1 full cycle of DEFAULT_CHAINSYNC_MONITOR_PARTITION_FACTOR blocks, it should only // result in 1 write per monitor/channel. connect_blocks(&nodes[0], CHAINSYNC_MONITOR_PARTITION_FACTOR); - assert_eq!(2, chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().len()); + assert_eq!( + 2, + chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().len() + ); } #[test] #[cfg(feature = "std")] - #[rustfmt::skip] fn update_during_chainsync_poisons_channel() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -1311,10 +1569,12 @@ mod tests { // Connecting [`DEFAULT_CHAINSYNC_PARTITION_FACTOR`] blocks so that we trigger some persistence // after accounting for block-height based partitioning/distribution. connect_blocks(&nodes[0], CHAINSYNC_MONITOR_PARTITION_FACTOR); - }).is_err()); + }) + .is_err()); assert!(std::panic::catch_unwind(|| { // ...and also poison our locks causing later use to panic as well core::mem::drop(nodes); - }).is_err()); + }) + .is_err()); } } From 3242b9369780be53b6bb3dc09a594e8d1cb65c4a Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 12 Jun 2025 21:11:59 +0200 Subject: [PATCH 2/2] Formatting improvements chainmonitor.rs --- lightning/src/chain/chainmonitor.rs | 72 ++++++++++++----------------- 1 file changed, 29 insertions(+), 43 deletions(-) diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 60f1c9863a7..09db795e13a 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -320,18 +320,16 @@ where for channel_id in channel_ids.iter() { let monitor_lock = self.monitors.read().unwrap(); if let Some(monitor_state) = monitor_lock.get(channel_id) { - if self - .update_monitor_with_chain_data( - header, - best_height, - txdata, - &process, - channel_id, - &monitor_state, - channel_count, - ) - .is_err() - { + let update_res = self.update_monitor_with_chain_data( + header, + best_height, + txdata, + &process, + channel_id, + &monitor_state, + channel_count, + ); + if update_res.is_err() { // Take the monitors lock for writing so that we poison it and any future // operations going forward fail immediately. core::mem::drop(monitor_lock); @@ -346,18 +344,16 @@ where let monitor_states = self.monitors.write().unwrap(); for (channel_id, monitor_state) in monitor_states.iter() { if !channel_ids.contains(channel_id) { - if self - .update_monitor_with_chain_data( - header, - best_height, - txdata, - &process, - channel_id, - &monitor_state, - channel_count, - ) - .is_err() - { + let update_res = self.update_monitor_with_chain_data( + header, + best_height, + txdata, + &process, + channel_id, + &monitor_state, + channel_count, + ); + if update_res.is_err() { log_error!(self.logger, "{}", err_str); panic!("{}", err_str); } @@ -564,9 +560,8 @@ where /// that have not yet been fully persisted. Note that if a full monitor is persisted all the pending /// monitor updates must be individually marked completed by calling [`ChainMonitor::channel_monitor_updated`]. pub fn list_pending_monitor_updates(&self) -> Vec<(ChannelId, Vec)> { - self.monitors - .read() - .unwrap() + let monitors = self.monitors.read().unwrap(); + monitors .iter() .map(|(channel_id, holder)| { (*channel_id, holder.pending_monitor_updates.lock().unwrap().clone()) @@ -1491,14 +1486,10 @@ mod tests { "Channel force-closed".to_string(), ) .unwrap(); - check_closed_event!( - &nodes[0], - 1, - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, - false, - [nodes[2].node.get_our_node_id()], - 1000000 - ); + let closure_reason = + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + let node_c_id = nodes[2].node.get_our_node_id(); + check_closed_event!(&nodes[0], 1, closure_reason, false, [node_c_id], 1000000); check_closed_broadcast(&nodes[0], 1, true); let close_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(close_tx.len(), 1); @@ -1506,14 +1497,9 @@ mod tests { mine_transaction(&nodes[2], &close_tx[0]); check_added_monitors(&nodes[2], 1); check_closed_broadcast(&nodes[2], 1, true); - check_closed_event!( - &nodes[2], - 1, - ClosureReason::CommitmentTxConfirmed, - false, - [nodes[0].node.get_our_node_id()], - 1000000 - ); + let closure_reason = ClosureReason::CommitmentTxConfirmed; + let node_a_id = nodes[0].node.get_our_node_id(); + check_closed_event!(&nodes[2], 1, closure_reason, false, [node_a_id], 1000000); chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear(); chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().clear();