Skip to content

Commit 5c3fa55

Browse files
committed
Remove largely useless checks in chanmon_consistency fuzzer
When reloading nodes A or C, the chanmon_consistency fuzzer currently calls `get_and_clear_pending_msg_events` on the node, potentially causing additional `ChannelMonitor` or `ChannelManager` updates, just to check that no unexpected messages are generated. There's not much reason to do so, the fuzzer could always swap for a different command to call the same method, and the additional checking requires some weird monitor persistence introspection. Here we simplify the fuzzer by simply removing this logic.
1 parent ce94a5e commit 5c3fa55

File tree

1 file changed

+6
-17
lines changed

1 file changed

+6
-17
lines changed

fuzz/src/chanmon_consistency.rs

Lines changed: 6 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,6 @@ struct TestChainMonitor {
125125
// "fails" if we ever force-close a channel, we avoid doing so, always saving the latest
126126
// fully-serialized monitor state here, as well as the corresponding update_id.
127127
pub latest_monitors: Mutex<HashMap<OutPoint, (u64, Vec<u8>)>>,
128-
pub should_update_manager: atomic::AtomicBool,
129128
}
130129
impl TestChainMonitor {
131130
pub fn new(broadcaster: Arc<TestBroadcaster>, logger: Arc<dyn Logger>, feeest: Arc<FuzzEstimator>, persister: Arc<TestPersister>, keys: Arc<KeyProvider>) -> Self {
@@ -135,7 +134,6 @@ impl TestChainMonitor {
135134
keys,
136135
persister,
137136
latest_monitors: Mutex::new(HashMap::new()),
138-
should_update_manager: atomic::AtomicBool::new(false),
139137
}
140138
}
141139
}
@@ -146,7 +144,6 @@ impl chain::Watch<TestChannelSigner> for TestChainMonitor {
146144
if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) {
147145
panic!("Already had monitor pre-watch_channel");
148146
}
149-
self.should_update_manager.store(true, atomic::Ordering::Relaxed);
150147
self.chain_monitor.watch_channel(funding_txo, monitor)
151148
}
152149

@@ -162,7 +159,6 @@ impl chain::Watch<TestChannelSigner> for TestChainMonitor {
162159
let mut ser = VecWriter(Vec::new());
163160
deserialized_monitor.write(&mut ser).unwrap();
164161
map_entry.insert((update.update_id, ser.0));
165-
self.should_update_manager.store(true, atomic::Ordering::Relaxed);
166162
self.chain_monitor.update_channel(funding_txo, update)
167163
}
168164

@@ -1101,11 +1097,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
11011097
if !chan_a_disconnected {
11021098
nodes[1].peer_disconnected(&nodes[0].get_our_node_id());
11031099
chan_a_disconnected = true;
1104-
drain_msg_events_on_disconnect!(0);
1105-
}
1106-
if monitor_a.should_update_manager.load(atomic::Ordering::Relaxed) {
1107-
node_a_ser.0.clear();
1108-
nodes[0].write(&mut node_a_ser).unwrap();
1100+
push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0));
1101+
ab_events.clear();
1102+
ba_events.clear();
11091103
}
11101104
let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a, fee_est_a);
11111105
nodes[0] = new_node_a;
@@ -1134,11 +1128,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
11341128
if !chan_b_disconnected {
11351129
nodes[1].peer_disconnected(&nodes[2].get_our_node_id());
11361130
chan_b_disconnected = true;
1137-
drain_msg_events_on_disconnect!(2);
1138-
}
1139-
if monitor_c.should_update_manager.load(atomic::Ordering::Relaxed) {
1140-
node_c_ser.0.clear();
1141-
nodes[2].write(&mut node_c_ser).unwrap();
1131+
push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2));
1132+
bc_events.clear();
1133+
cb_events.clear();
11421134
}
11431135
let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c, fee_est_c);
11441136
nodes[2] = new_node_c;
@@ -1306,13 +1298,10 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
13061298

13071299
node_a_ser.0.clear();
13081300
nodes[0].write(&mut node_a_ser).unwrap();
1309-
monitor_a.should_update_manager.store(false, atomic::Ordering::Relaxed);
13101301
node_b_ser.0.clear();
13111302
nodes[1].write(&mut node_b_ser).unwrap();
1312-
monitor_b.should_update_manager.store(false, atomic::Ordering::Relaxed);
13131303
node_c_ser.0.clear();
13141304
nodes[2].write(&mut node_c_ser).unwrap();
1315-
monitor_c.should_update_manager.store(false, atomic::Ordering::Relaxed);
13161305
}
13171306
}
13181307

0 commit comments

Comments
 (0)