diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index db3b0b1af..c6b3f01d7 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -6,6 +6,7 @@ dictionary Config { Network network; NetAddress? listening_address; u32 default_cltv_expiry_delta; + LogLevel log_level; }; interface Builder { @@ -174,6 +175,15 @@ dictionary PeerDetails { boolean is_connected; }; +enum LogLevel { + "Gossip", + "Trace", + "Debug", + "Info", + "Warn", + "Error", +}; + [Custom] typedef string Txid; diff --git a/src/event.rs b/src/event.rs index 7e97fc49f..dff72c69a 100644 --- a/src/event.rs +++ b/src/event.rs @@ -526,7 +526,8 @@ where } LdkEvent::SpendableOutputs { outputs } => { // TODO: We should eventually remember the outputs and supply them to the wallet's coin selection, once BDK allows us to do so. - let destination_address = self.wallet.get_new_address().unwrap(); + let destination_address = + self.wallet.get_new_address().expect("Failed to get destination address"); let output_descriptors = &outputs.iter().collect::>(); let tx_feerate = self.wallet.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); diff --git a/src/io/utils.rs b/src/io/utils.rs index 08cb0bbd9..0351b835d 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -92,7 +92,7 @@ where } /// Read a previously persisted [`NetworkGraph`] from the store. -pub(crate) fn read_network_graph( +pub(crate) fn read_network_graph( kv_store: Arc, logger: L, ) -> Result, std::io::Error> where @@ -100,13 +100,18 @@ where { let mut reader = kv_store.read(NETWORK_GRAPH_PERSISTENCE_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY)?; - NetworkGraph::read(&mut reader, logger).map_err(|_| { + NetworkGraph::read(&mut reader, logger.clone()).map_err(|e| { + log_error!(logger, "Failed to deserialize NetworkGraph: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize NetworkGraph") }) } /// Read a previously persisted [`Scorer`] from the store. -pub(crate) fn read_scorer>, L: Deref>( +pub(crate) fn read_scorer< + K: KVStore + Send + Sync, + G: Deref>, + L: Deref + Clone, +>( kv_store: Arc, network_graph: G, logger: L, ) -> Result, std::io::Error> where @@ -114,14 +119,15 @@ where { let params = ProbabilisticScoringParameters::default(); let mut reader = kv_store.read(SCORER_PERSISTENCE_NAMESPACE, SCORER_PERSISTENCE_KEY)?; - let args = (params, network_graph, logger); - ProbabilisticScorer::read(&mut reader, args).map_err(|_| { + let args = (params, network_graph, logger.clone()); + ProbabilisticScorer::read(&mut reader, args).map_err(|e| { + log_error!(logger, "Failed to deserialize scorer: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize Scorer") }) } /// Read previously persisted events from the store. -pub(crate) fn read_event_queue( +pub(crate) fn read_event_queue( kv_store: Arc, logger: L, ) -> Result, std::io::Error> where @@ -129,48 +135,61 @@ where { let mut reader = kv_store.read(EVENT_QUEUE_PERSISTENCE_NAMESPACE, EVENT_QUEUE_PERSISTENCE_KEY)?; - EventQueue::read(&mut reader, (kv_store, logger)).map_err(|_| { + EventQueue::read(&mut reader, (kv_store, logger.clone())).map_err(|e| { + log_error!(logger, "Failed to deserialize event queue: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize EventQueue") }) } /// Read previously persisted peer info from the store. -pub(crate) fn read_peer_info( +pub(crate) fn read_peer_info( kv_store: Arc, logger: L, ) -> Result, std::io::Error> where L::Target: Logger, { let mut reader = kv_store.read(PEER_INFO_PERSISTENCE_NAMESPACE, PEER_INFO_PERSISTENCE_KEY)?; - PeerStore::read(&mut reader, (kv_store, logger)).map_err(|_| { + PeerStore::read(&mut reader, (kv_store, logger.clone())).map_err(|e| { + log_error!(logger, "Failed to deserialize peer store: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize PeerStore") }) } /// Read previously persisted payments information from the store. -pub(crate) fn read_payments( - kv_store: Arc, -) -> Result, std::io::Error> { +pub(crate) fn read_payments( + kv_store: Arc, logger: L, +) -> Result, std::io::Error> +where + L::Target: Logger, +{ let mut res = Vec::new(); for stored_key in kv_store.list(PAYMENT_INFO_PERSISTENCE_NAMESPACE)? { let payment = PaymentDetails::read( &mut kv_store.read(PAYMENT_INFO_PERSISTENCE_NAMESPACE, &stored_key)?, ) - .map_err(|_| { - std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize Payment") + .map_err(|e| { + log_error!(logger, "Failed to deserialize PaymentDetails: {}", e); + std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Failed to deserialize PaymentDetails", + ) })?; res.push(payment); } Ok(res) } -pub(crate) fn read_latest_rgs_sync_timestamp( - kv_store: Arc, -) -> Result { +pub(crate) fn read_latest_rgs_sync_timestamp( + kv_store: Arc, logger: L, +) -> Result +where + L::Target: Logger, +{ let mut reader = kv_store.read(LATEST_RGS_SYNC_TIMESTAMP_NAMESPACE, LATEST_RGS_SYNC_TIMESTAMP_KEY)?; - u32::read(&mut reader).map_err(|_| { + u32::read(&mut reader).map_err(|e| { + log_error!(logger, "Failed to deserialize latest RGS sync timestamp: {}", e); std::io::Error::new( std::io::ErrorKind::InvalidData, "Failed to deserialize latest RGS sync timestamp", @@ -199,15 +218,23 @@ where }) } -pub(crate) fn read_latest_node_ann_bcast_timestamp( - kv_store: Arc, -) -> Result { +pub(crate) fn read_latest_node_ann_bcast_timestamp( + kv_store: Arc, logger: L, +) -> Result +where + L::Target: Logger, +{ let mut reader = kv_store .read(LATEST_NODE_ANN_BCAST_TIMESTAMP_NAMESPACE, LATEST_NODE_ANN_BCAST_TIMESTAMP_KEY)?; - u64::read(&mut reader).map_err(|_| { + u64::read(&mut reader).map_err(|e| { + log_error!( + logger, + "Failed to deserialize latest node announcement broadcast timestamp: {}", + e + ); std::io::Error::new( std::io::ErrorKind::InvalidData, - "Failed to deserialize latest node announcment broadcast timestamp", + "Failed to deserialize latest node announcement broadcast timestamp", ) }) } diff --git a/src/lib.rs b/src/lib.rs index b8ae4c48c..25c7ab7ac 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -93,7 +93,6 @@ mod wallet; pub use bip39; pub use bitcoin; pub use lightning; -use lightning::ln::msgs::RoutingMessageHandler; pub use lightning_invoice; pub use error::Error as NodeError; @@ -126,11 +125,13 @@ use lightning::chain::{chainmonitor, BestBlock, Confirm, Watch}; use lightning::ln::channelmanager::{ self, ChainParameters, ChannelManagerReadArgs, PaymentId, RecipientOnionFields, Retry, }; +use lightning::ln::msgs::RoutingMessageHandler; use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler}; use lightning::ln::{PaymentHash, PaymentPreimage}; use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringParameters}; use lightning::util::config::{ChannelHandshakeConfig, ChannelHandshakeLimits, UserConfig}; +pub use lightning::util::logger::Level as LogLevel; use lightning::util::ser::ReadableArgs; use lightning_background_processor::process_events_async; @@ -173,6 +174,7 @@ const DEFAULT_NETWORK: Network = Network::Bitcoin; const DEFAULT_LISTENING_ADDR: &str = "0.0.0.0:9735"; const DEFAULT_CLTV_EXPIRY_DELTA: u32 = 144; const DEFAULT_ESPLORA_SERVER_URL: &str = "https://blockstream.info/api"; +const DEFAULT_LOG_LEVEL: LogLevel = LogLevel::Debug; // The 'stop gap' parameter used by BDK's wallet sync. This seems to configure the threshold // number of blocks after which BDK stops looking for scripts belonging to the wallet. @@ -204,9 +206,10 @@ const WALLET_KEYS_SEED_LEN: usize = 64; /// | Parameter | Value | /// |-----------------------------|------------------| /// | `storage_dir_path` | /tmp/ldk_node/ | -/// | `network` | Network::Bitcoin | +/// | `network` | `Bitcoin` | /// | `listening_address` | 0.0.0.0:9735 | /// | `default_cltv_expiry_delta` | 144 | +/// | `log_level` | `Debug` | /// pub struct Config { /// The path where the underlying LDK and BDK persist their data. @@ -217,6 +220,10 @@ pub struct Config { pub listening_address: Option, /// The default CLTV expiry delta to be used for payments. pub default_cltv_expiry_delta: u32, + /// The level at which we log messages. + /// + /// Any messages below this level will be excluded from the logs. + pub log_level: LogLevel, } impl Default for Config { @@ -226,6 +233,7 @@ impl Default for Config { network: DEFAULT_NETWORK, listening_address: Some(DEFAULT_LISTENING_ADDR.parse().unwrap()), default_cltv_expiry_delta: DEFAULT_CLTV_EXPIRY_DELTA, + log_level: DEFAULT_LOG_LEVEL, } } } @@ -348,6 +356,12 @@ impl Builder { config.listening_address = Some(listening_address); } + /// Sets the level at which [`Node`] will log messages. + pub fn set_log_level(&self, level: LogLevel) { + let mut config = self.config.write().unwrap(); + config.log_level = level; + } + /// Builds a [`Node`] instance with a [`FilesystemStore`] backend and according to the options /// previously configured. pub fn build(&self) -> Arc> { @@ -371,7 +385,7 @@ impl Builder { // Initialize the Logger let log_file_path = format!("{}/ldk_node.log", config.storage_dir_path); - let logger = Arc::new(FilesystemLogger::new(log_file_path)); + let logger = Arc::new(FilesystemLogger::new(log_file_path, config.log_level)); // Initialize the on-chain wallet and chain access let seed_bytes = match &*self.entropy_source_config.read().unwrap() { @@ -469,7 +483,6 @@ impl Builder { if e.kind() == std::io::ErrorKind::NotFound { Arc::new(NetworkGraph::new(config.network, Arc::clone(&logger))) } else { - log_error!(logger, "Failed to read network graph: {}", e.to_string()); panic!("Failed to read network graph: {}", e.to_string()); } } @@ -490,7 +503,6 @@ impl Builder { Arc::clone(&logger), ))) } else { - log_error!(logger, "Failed to read scorer: {}", e.to_string()); panic!("Failed to read scorer: {}", e.to_string()); } } @@ -609,8 +621,11 @@ impl Builder { p2p_source } GossipSourceConfig::RapidGossipSync(rgs_server) => { - let latest_sync_timestamp = - io::utils::read_latest_rgs_sync_timestamp(Arc::clone(&kv_store)).unwrap_or(0); + let latest_sync_timestamp = io::utils::read_latest_rgs_sync_timestamp( + Arc::clone(&kv_store), + Arc::clone(&logger), + ) + .unwrap_or(0); Arc::new(GossipSource::new_rgs( rgs_server.clone(), latest_sync_timestamp, @@ -648,15 +663,17 @@ impl Builder { )); // Init payment info storage - let payment_store = match io::utils::read_payments(Arc::clone(&kv_store)) { - Ok(payments) => { - Arc::new(PaymentStore::new(payments, Arc::clone(&kv_store), Arc::clone(&logger))) - } - Err(e) => { - log_error!(logger, "Failed to read payment information: {}", e.to_string()); - panic!("Failed to read payment information: {}", e.to_string()); - } - }; + let payment_store = + match io::utils::read_payments(Arc::clone(&kv_store), Arc::clone(&logger)) { + Ok(payments) => Arc::new(PaymentStore::new( + payments, + Arc::clone(&kv_store), + Arc::clone(&logger), + )), + Err(e) => { + panic!("Failed to read payment information: {}", e.to_string()); + } + }; let event_queue = match io::utils::read_event_queue(Arc::clone(&kv_store), Arc::clone(&logger)) { @@ -665,7 +682,6 @@ impl Builder { if e.kind() == std::io::ErrorKind::NotFound { Arc::new(EventQueue::new(Arc::clone(&kv_store), Arc::clone(&logger))) } else { - log_error!(logger, "Failed to read event queue: {}", e.to_string()); panic!("Failed to read event queue: {}", e.to_string()); } } @@ -678,7 +694,6 @@ impl Builder { if e.kind() == std::io::ErrorKind::NotFound { Arc::new(PeerStore::new(Arc::clone(&kv_store), Arc::clone(&logger))) } else { - log_error!(logger, "Failed to read peer store: {}", e.to_string()); panic!("Failed to read peer store: {}", e.to_string()); } } @@ -747,6 +762,8 @@ impl Node { return Err(Error::AlreadyRunning); } + log_info!(self.logger, "Starting up LDK Node on network: {}", self.config.network); + let runtime = tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap(); let event_handler = Arc::new(EventHandler::new( @@ -969,7 +986,7 @@ impl Node { return; } _ = interval.tick() => { - let skip_broadcast = match io::utils::read_latest_node_ann_bcast_timestamp(Arc::clone(&bcast_store)) { + let skip_broadcast = match io::utils::read_latest_node_ann_bcast_timestamp(Arc::clone(&bcast_store), Arc::clone(&bcast_logger)) { Ok(latest_bcast_time_secs) => { // Skip if the time hasn't elapsed yet. let next_bcast_unix_time = SystemTime::UNIX_EPOCH + Duration::from_secs(latest_bcast_time_secs) + NODE_ANN_BCAST_INTERVAL; @@ -1049,6 +1066,8 @@ impl Node { }); *runtime_lock = Some(runtime); + + log_info!(self.logger, "Startup complete."); Ok(()) } @@ -1057,6 +1076,9 @@ impl Node { /// After this returns most API methods will return [`Error::NotRunning`]. pub fn stop(&self) -> Result<(), Error> { let runtime = self.runtime.write().unwrap().take().ok_or(Error::NotRunning)?; + + log_info!(self.logger, "Shutting down LDK Node..."); + // Stop the runtime. match self.stop_sender.send(()) { Ok(_) => (), @@ -1074,6 +1096,8 @@ impl Node { self.peer_manager.disconnect_all_peers(); runtime.shutdown_timeout(Duration::from_secs(10)); + + log_info!(self.logger, "Shutdown complete."); Ok(()) } @@ -1099,7 +1123,9 @@ impl Node { /// /// **Note:** This **MUST** be called after each event has been handled. pub fn event_handled(&self) { - self.event_queue.event_handled().unwrap(); + self.event_queue + .event_handled() + .expect("Couldn't mark event handled due to persistence failure"); } /// Returns our own node id diff --git a/src/logger.rs b/src/logger.rs index 6ae4cc4bd..ae4e7a17f 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -1,8 +1,9 @@ pub(crate) use lightning::util::logger::Logger; -use lightning::util::logger::Record; -use lightning::util::ser::Writer; pub(crate) use lightning::{log_error, log_info, log_trace}; +use lightning::util::logger::{Level, Record}; +use lightning::util::ser::Writer; + use chrono::Utc; use std::fs; @@ -10,18 +11,22 @@ use std::path::Path; pub(crate) struct FilesystemLogger { file_path: String, + level: Level, } impl FilesystemLogger { - pub(crate) fn new(file_path: String) -> Self { + pub(crate) fn new(file_path: String, level: Level) -> Self { if let Some(parent_dir) = Path::new(&file_path).parent() { - fs::create_dir_all(parent_dir).unwrap(); + fs::create_dir_all(parent_dir).expect("Failed to create log parent directory"); } - Self { file_path } + Self { file_path, level } } } impl Logger for FilesystemLogger { fn log(&self, record: &Record) { + if record.level < self.level { + return; + } let raw_log = record.args.to_string(); let log = format!( "{} {:<5} [{}:{}] {}\n", @@ -35,8 +40,8 @@ impl Logger for FilesystemLogger { .create(true) .append(true) .open(self.file_path.clone()) - .unwrap() + .expect("Failed to open log file") .write_all(log.as_bytes()) - .unwrap(); + .expect("Failed to write to log file") } } diff --git a/src/test/utils.rs b/src/test/utils.rs index 4c65eb21c..6dc0ba2d2 100644 --- a/src/test/utils.rs +++ b/src/test/utils.rs @@ -266,6 +266,8 @@ pub fn random_config() -> Config { let listening_address_str = format!("127.0.0.1:{}", rand_port); config.listening_address = Some(listening_address_str.parse().unwrap()); + config.log_level = Level::Trace; + config }