@@ -320,18 +320,16 @@ where
320
320
for channel_id in channel_ids. iter ( ) {
321
321
let monitor_lock = self . monitors . read ( ) . unwrap ( ) ;
322
322
if let Some ( monitor_state) = monitor_lock. get ( channel_id) {
323
- if self
324
- . update_monitor_with_chain_data (
325
- header,
326
- best_height,
327
- txdata,
328
- & process,
329
- channel_id,
330
- & monitor_state,
331
- channel_count,
332
- )
333
- . is_err ( )
334
- {
323
+ let update_res = self . update_monitor_with_chain_data (
324
+ header,
325
+ best_height,
326
+ txdata,
327
+ & process,
328
+ channel_id,
329
+ & monitor_state,
330
+ channel_count,
331
+ ) ;
332
+ if update_res. is_err ( ) {
335
333
// Take the monitors lock for writing so that we poison it and any future
336
334
// operations going forward fail immediately.
337
335
core:: mem:: drop ( monitor_lock) ;
@@ -346,18 +344,16 @@ where
346
344
let monitor_states = self . monitors . write ( ) . unwrap ( ) ;
347
345
for ( channel_id, monitor_state) in monitor_states. iter ( ) {
348
346
if !channel_ids. contains ( channel_id) {
349
- if self
350
- . update_monitor_with_chain_data (
351
- header,
352
- best_height,
353
- txdata,
354
- & process,
355
- channel_id,
356
- & monitor_state,
357
- channel_count,
358
- )
359
- . is_err ( )
360
- {
347
+ let update_res = self . update_monitor_with_chain_data (
348
+ header,
349
+ best_height,
350
+ txdata,
351
+ & process,
352
+ channel_id,
353
+ & monitor_state,
354
+ channel_count,
355
+ ) ;
356
+ if update_res. is_err ( ) {
361
357
log_error ! ( self . logger, "{}" , err_str) ;
362
358
panic ! ( "{}" , err_str) ;
363
359
}
@@ -564,10 +560,8 @@ where
564
560
/// that have not yet been fully persisted. Note that if a full monitor is persisted all the pending
565
561
/// monitor updates must be individually marked completed by calling [`ChainMonitor::channel_monitor_updated`].
566
562
pub fn list_pending_monitor_updates ( & self ) -> Vec < ( ChannelId , Vec < u64 > ) > {
567
- self . monitors
568
- . read ( )
569
- . unwrap ( )
570
- . iter ( )
563
+ let monitors = self . monitors . read ( ) . unwrap ( ) . iter ( ) ;
564
+ monitors
571
565
. map ( |( channel_id, holder) | {
572
566
( * channel_id, holder. pending_monitor_updates . lock ( ) . unwrap ( ) . clone ( ) )
573
567
} )
@@ -1491,29 +1485,20 @@ mod tests {
1491
1485
"Channel force-closed" . to_string ( ) ,
1492
1486
)
1493
1487
. unwrap ( ) ;
1494
- check_closed_event ! (
1495
- & nodes[ 0 ] ,
1496
- 1 ,
1497
- ClosureReason :: HolderForceClosed { broadcasted_latest_txn: Some ( true ) } ,
1498
- false ,
1499
- [ nodes[ 2 ] . node. get_our_node_id( ) ] ,
1500
- 1000000
1501
- ) ;
1488
+ let closure_reason =
1489
+ ClosureReason :: HolderForceClosed { broadcasted_latest_txn : Some ( true ) } ;
1490
+ let node_c_id = nodes[ 2 ] . node . get_our_node_id ( ) ;
1491
+ check_closed_event ! ( & nodes[ 0 ] , 1 , closure_reason, false , [ node_c_id] , 1000000 ) ;
1502
1492
check_closed_broadcast ( & nodes[ 0 ] , 1 , true ) ;
1503
1493
let close_tx = nodes[ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
1504
1494
assert_eq ! ( close_tx. len( ) , 1 ) ;
1505
1495
1506
1496
mine_transaction ( & nodes[ 2 ] , & close_tx[ 0 ] ) ;
1507
1497
check_added_monitors ( & nodes[ 2 ] , 1 ) ;
1508
1498
check_closed_broadcast ( & nodes[ 2 ] , 1 , true ) ;
1509
- check_closed_event ! (
1510
- & nodes[ 2 ] ,
1511
- 1 ,
1512
- ClosureReason :: CommitmentTxConfirmed ,
1513
- false ,
1514
- [ nodes[ 0 ] . node. get_our_node_id( ) ] ,
1515
- 1000000
1516
- ) ;
1499
+ let closure_reason = ClosureReason :: CommitmentTxConfirmed ;
1500
+ let node_a_id = nodes[ 0 ] . node . get_our_node_id ( ) ;
1501
+ check_closed_event ! ( & nodes[ 2 ] , 1 , closure_reason, false , [ node_a_id] , 1000000 ) ;
1517
1502
1518
1503
chanmon_cfgs[ 0 ] . persister . chain_sync_monitor_persistences . lock ( ) . unwrap ( ) . clear ( ) ;
1519
1504
chanmon_cfgs[ 2 ] . persister . chain_sync_monitor_persistences . lock ( ) . unwrap ( ) . clear ( ) ;
0 commit comments