From b5389f753541b8ab84f3eb5bb3202f609dd797f3 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Mon, 2 Feb 2026 11:02:19 -0800 Subject: [PATCH 1/6] Support async signing in chanmon_consistency This commit adds new opcodes to enable/disable signer operations one by one. Note that this only covers signer operations post-funding. --- fuzz/src/chanmon_consistency.rs | 104 +++++++++++++++++++++- lightning/src/util/test_channel_signer.rs | 24 ++--- 2 files changed, 114 insertions(+), 14 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 87d58da4832..c1153e3030e 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -37,7 +37,7 @@ use lightning::blinded_path::message::{BlindedMessagePath, MessageContext, Messa use lightning::blinded_path::payment::{BlindedPaymentPath, ReceiveTlvs}; use lightning::chain; use lightning::chain::chaininterface::{ - TransactionType, BroadcasterInterface, ConfirmationTarget, FeeEstimator, + BroadcasterInterface, ConfirmationTarget, FeeEstimator, TransactionType, }; use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent}; use lightning::chain::transaction::OutPoint; @@ -77,7 +77,7 @@ use lightning::util::errors::APIError; use lightning::util::hash_tables::*; use lightning::util::logger::Logger; use lightning::util::ser::{LengthReadable, ReadableArgs, Writeable, Writer}; -use lightning::util::test_channel_signer::{EnforcementState, TestChannelSigner}; +use lightning::util::test_channel_signer::{EnforcementState, SignerOp, TestChannelSigner}; use lightning_invoice::RawBolt11Invoice; @@ -514,6 +514,20 @@ impl KeyProvider { let cell = revoked_commitments.get(&commitment_seed).unwrap(); Arc::clone(cell) } + + fn disable_op_for_all_signers(&self, signer_op: SignerOp) { + let enforcement_states = self.enforcement_states.lock().unwrap(); + for (_, state) in enforcement_states.iter() { + state.lock().unwrap().disabled_signer_ops.insert(signer_op); + } + } + + fn enable_op_for_all_signers(&self, signer_op: SignerOp) { + let enforcement_states = self.enforcement_states.lock().unwrap(); + for (_, state) in enforcement_states.iter() { + state.lock().unwrap().disabled_signer_ops.remove(&signer_op); + } + } } // Returns a bool indicating whether the payment failed. @@ -2385,6 +2399,78 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { monitor_c = new_monitor_c; }, + // Since this fuzzer is only concerned with live-channel operations, we don't need to + // worry about any signer operations that come after a force close. + 0xc0 => keys_manager_a.disable_op_for_all_signers(SignerOp::SignCounterpartyCommitment), + 0xc1 => keys_manager_b.disable_op_for_all_signers(SignerOp::SignCounterpartyCommitment), + 0xc2 => keys_manager_c.disable_op_for_all_signers(SignerOp::SignCounterpartyCommitment), + 0xc3 => { + keys_manager_a.enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); + nodes[0].signer_unblocked(None); + }, + 0xc4 => { + keys_manager_b.enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); + nodes[1].signer_unblocked(None); + }, + 0xc5 => { + keys_manager_c.enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); + nodes[2].signer_unblocked(None); + }, + + 0xc6 => keys_manager_a.disable_op_for_all_signers(SignerOp::GetPerCommitmentPoint), + 0xc7 => keys_manager_b.disable_op_for_all_signers(SignerOp::GetPerCommitmentPoint), + 0xc8 => keys_manager_c.disable_op_for_all_signers(SignerOp::GetPerCommitmentPoint), + 0xc9 => { + keys_manager_a.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); + nodes[0].signer_unblocked(None); + }, + 0xca => { + keys_manager_b.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); + nodes[1].signer_unblocked(None); + }, + 0xcb => { + keys_manager_c.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); + nodes[2].signer_unblocked(None); + }, + + 0xcc => keys_manager_a.disable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret), + 0xcd => keys_manager_b.disable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret), + 0xce => keys_manager_c.disable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret), + 0xcf => { + keys_manager_a.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + nodes[0].signer_unblocked(None); + }, + 0xd0 => { + keys_manager_b.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + nodes[1].signer_unblocked(None); + }, + 0xd1 => { + keys_manager_c.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + nodes[2].signer_unblocked(None); + }, + + 0xd2 => { + keys_manager_a.disable_op_for_all_signers(SignerOp::ValidateCounterpartyRevocation); + }, + 0xd3 => { + keys_manager_b.disable_op_for_all_signers(SignerOp::ValidateCounterpartyRevocation); + }, + 0xd4 => { + keys_manager_c.disable_op_for_all_signers(SignerOp::ValidateCounterpartyRevocation); + }, + 0xd5 => { + keys_manager_a.enable_op_for_all_signers(SignerOp::ValidateCounterpartyRevocation); + nodes[0].signer_unblocked(None); + }, + 0xd6 => { + keys_manager_b.enable_op_for_all_signers(SignerOp::ValidateCounterpartyRevocation); + nodes[1].signer_unblocked(None); + }, + 0xd7 => { + keys_manager_c.enable_op_for_all_signers(SignerOp::ValidateCounterpartyRevocation); + nodes[2].signer_unblocked(None); + }, + 0xf0 => { for id in &chan_ab_ids { complete_monitor_update(&monitor_a, id, &complete_first); @@ -2485,6 +2571,20 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { peers_bc_disconnected = false; } + for op in [ + SignerOp::SignCounterpartyCommitment, + SignerOp::GetPerCommitmentPoint, + SignerOp::ReleaseCommitmentSecret, + SignerOp::ValidateCounterpartyRevocation, + ] { + keys_manager_a.enable_op_for_all_signers(op); + keys_manager_b.enable_op_for_all_signers(op); + keys_manager_c.enable_op_for_all_signers(op); + } + nodes[0].signer_unblocked(None); + nodes[1].signer_unblocked(None); + nodes[2].signer_unblocked(None); + macro_rules! process_all_events { () => { { let mut last_pass_no_updates = false; diff --git a/lightning/src/util/test_channel_signer.rs b/lightning/src/util/test_channel_signer.rs index 3bacd76a610..0b948c3002e 100644 --- a/lightning/src/util/test_channel_signer.rs +++ b/lightning/src/util/test_channel_signer.rs @@ -186,7 +186,7 @@ impl TestChannelSigner { self.get_enforcement_state().disabled_signer_ops.insert(signer_op); } - #[cfg(test)] + #[cfg(any(test, feature = "_test_utils"))] fn is_signer_available(&self, signer_op: SignerOp) -> bool { !self.get_enforcement_state().disabled_signer_ops.contains(&signer_op) } @@ -196,7 +196,7 @@ impl ChannelSigner for TestChannelSigner { fn get_per_commitment_point( &self, idx: u64, secp_ctx: &Secp256k1, ) -> Result { - #[cfg(test)] + #[cfg(any(test, feature = "_test_utils"))] if !self.is_signer_available(SignerOp::GetPerCommitmentPoint) { return Err(()); } @@ -204,7 +204,7 @@ impl ChannelSigner for TestChannelSigner { } fn release_commitment_secret(&self, idx: u64) -> Result<[u8; 32], ()> { - #[cfg(test)] + #[cfg(any(test, feature = "_test_utils"))] if !self.is_signer_available(SignerOp::ReleaseCommitmentSecret) { return Err(()); } @@ -236,7 +236,7 @@ impl ChannelSigner for TestChannelSigner { } fn validate_counterparty_revocation(&self, idx: u64, _secret: &SecretKey) -> Result<(), ()> { - #[cfg(test)] + #[cfg(any(test, feature = "_test_utils"))] if !self.is_signer_available(SignerOp::ValidateCounterpartyRevocation) { return Err(()); } @@ -272,7 +272,7 @@ impl EcdsaChannelSigner for TestChannelSigner { ) -> Result<(Signature, Vec), ()> { self.verify_counterparty_commitment_tx(channel_parameters, commitment_tx, secp_ctx); - #[cfg(test)] + #[cfg(any(test, feature = "_test_utils"))] if !self.is_signer_available(SignerOp::SignCounterpartyCommitment) { return Err(()); } @@ -317,7 +317,7 @@ impl EcdsaChannelSigner for TestChannelSigner { &self, channel_parameters: &ChannelTransactionParameters, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1, ) -> Result { - #[cfg(test)] + #[cfg(any(test, feature = "_test_utils"))] if !self.is_signer_available(SignerOp::SignHolderCommitment) { return Err(()); } @@ -354,7 +354,7 @@ impl EcdsaChannelSigner for TestChannelSigner { input: usize, amount: u64, per_commitment_key: &SecretKey, secp_ctx: &Secp256k1, ) -> Result { - #[cfg(test)] + #[cfg(any(test, feature = "_test_utils"))] if !self.is_signer_available(SignerOp::SignJusticeRevokedOutput) { return Err(()); } @@ -375,7 +375,7 @@ impl EcdsaChannelSigner for TestChannelSigner { input: usize, amount: u64, per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1, ) -> Result { - #[cfg(test)] + #[cfg(any(test, feature = "_test_utils"))] if !self.is_signer_available(SignerOp::SignJusticeRevokedHtlc) { return Err(()); } @@ -396,7 +396,7 @@ impl EcdsaChannelSigner for TestChannelSigner { &self, htlc_tx: &Transaction, input: usize, htlc_descriptor: &HTLCDescriptor, secp_ctx: &Secp256k1, ) -> Result { - #[cfg(test)] + #[cfg(any(test, feature = "_test_utils"))] if !self.is_signer_available(SignerOp::SignHolderHtlcTransaction) { return Err(()); } @@ -462,7 +462,7 @@ impl EcdsaChannelSigner for TestChannelSigner { input: usize, amount: u64, per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1, ) -> Result { - #[cfg(test)] + #[cfg(any(test, feature = "_test_utils"))] if !self.is_signer_available(SignerOp::SignCounterpartyHtlcTransaction) { return Err(()); } @@ -483,7 +483,7 @@ impl EcdsaChannelSigner for TestChannelSigner { &self, channel_parameters: &ChannelTransactionParameters, closing_tx: &ClosingTransaction, secp_ctx: &Secp256k1, ) -> Result { - #[cfg(test)] + #[cfg(any(test, feature = "_test_utils"))] if !self.is_signer_available(SignerOp::SignClosingTransaction) { return Err(()); } @@ -504,7 +504,7 @@ impl EcdsaChannelSigner for TestChannelSigner { anchor_tx.input[input].previous_output.vout == 0 || anchor_tx.input[input].previous_output.vout == 1 ); - #[cfg(test)] + #[cfg(any(test, feature = "_test_utils"))] if !self.is_signer_available(SignerOp::SignHolderAnchorInput) { return Err(()); } From b01832e0b265b6dbcd032d0c138c08e9987575a9 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Mon, 2 Feb 2026 11:03:55 -0800 Subject: [PATCH 2/6] Drive splices to completion in chanmon_consistency There were two bugs preventing splices from negotiating up to the `tx_signatures` exchange: 1. A `serial_id` collision because its generation used the first 4 bytes of `get_secure_random_bytes`. 2. Opcodes 0xa2 and 0xa3 used the wrong vout for the input to splice in. This commit fixes both, while also adding support for locking splices. This required confirming transactions, which this target previously didn't consider. --- fuzz/src/chanmon_consistency.rs | 203 +++++++++++++++++++++++++------- 1 file changed, 159 insertions(+), 44 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index c1153e3030e..2035c2e7672 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -27,7 +27,8 @@ use bitcoin::script::{Builder, ScriptBuf}; use bitcoin::transaction::Version; use bitcoin::transaction::{Transaction, TxOut}; -use bitcoin::hash_types::BlockHash; +use bitcoin::block::Header; +use bitcoin::hash_types::{BlockHash, Txid}; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::Hash as TraitImport; @@ -93,6 +94,7 @@ use lightning::util::dyn_signer::DynSigner; use std::cell::RefCell; use std::cmp; +use std::collections::HashSet; use std::mem; use std::sync::atomic; use std::sync::{Arc, Mutex}; @@ -168,6 +170,46 @@ impl BroadcasterInterface for TestBroadcaster { } } +struct ChainState { + blocks: Vec<(Header, Vec)>, + confirmed_txids: HashSet, +} + +impl ChainState { + fn new() -> Self { + let genesis_hash = genesis_block(Network::Bitcoin).block_hash(); + let genesis_header = create_dummy_header(genesis_hash, 42); + Self { blocks: vec![(genesis_header, Vec::new())], confirmed_txids: HashSet::new() } + } + + fn tip_height(&self) -> u32 { + (self.blocks.len() - 1) as u32 + } + + fn confirm_tx(&mut self, tx: Transaction) -> bool { + let txid = tx.compute_txid(); + if self.confirmed_txids.contains(&txid) { + return false; + } + self.confirmed_txids.insert(txid); + + let prev_hash = self.blocks.last().unwrap().0.block_hash(); + let header = create_dummy_header(prev_hash, 42); + self.blocks.push((header, vec![tx])); + + for _ in 0..5 { + let prev_hash = self.blocks.last().unwrap().0.block_hash(); + let header = create_dummy_header(prev_hash, 42); + self.blocks.push((header, Vec::new())); + } + true + } + + fn block_at(&self, height: u32) -> &(Header, Vec) { + &self.blocks[height as usize] + } +} + pub struct VecWriter(pub Vec); impl Writer for VecWriter { fn write_all(&mut self, buf: &[u8]) -> Result<(), ::lightning::io::Error> { @@ -380,7 +422,8 @@ impl EntropySource for KeyProvider { fn get_secure_random_bytes(&self) -> [u8; 32] { let id = self.rand_bytes_id.fetch_add(1, atomic::Ordering::Relaxed); #[rustfmt::skip] - let mut res = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, self.node_secret[31]]; + let mut res = [self.node_secret[31], 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, self.node_secret[31]]; + res[2..6].copy_from_slice(&id.to_le_bytes()); res[30 - 4..30].copy_from_slice(&id.to_le_bytes()); res } @@ -794,7 +837,9 @@ fn send_mpp_hop_payment( #[inline] pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let out = SearchingOutput::new(underlying_out); - let broadcast = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); + let broadcast_a = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); + let broadcast_b = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); + let broadcast_c = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); let router = FuzzRouter {}; // Read initial monitor styles from fuzz input (1 byte: 2 bits per node) @@ -817,8 +862,13 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }), ]; + let mut chain_state = ChainState::new(); + let mut node_height_a: u32 = 0; + let mut node_height_b: u32 = 0; + let mut node_height_c: u32 = 0; + macro_rules! make_node { - ($node_id: expr, $fee_estimator: expr) => {{ + ($node_id: expr, $fee_estimator: expr, $broadcaster: expr) => {{ let logger: Arc = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); let node_secret = SecretKey::from_slice(&[ @@ -832,7 +882,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { enforcement_states: Mutex::new(new_hash_map()), }); let monitor = Arc::new(TestChainMonitor::new( - broadcast.clone(), + $broadcaster.clone(), logger.clone(), $fee_estimator.clone(), Arc::new(TestPersister { @@ -855,7 +905,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { ChannelManager::new( $fee_estimator.clone(), monitor.clone(), - broadcast.clone(), + $broadcaster.clone(), &router, &router, Arc::clone(&logger), @@ -877,12 +927,13 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { old_monitors: &TestChainMonitor, mut use_old_mons, keys, - fee_estimator| { + fee_estimator, + broadcaster: Arc| { let keys_manager = Arc::clone(keys); let logger: Arc = Arc::new(test_logger::TestLogger::new(node_id.to_string(), out.clone())); let chain_monitor = Arc::new(TestChainMonitor::new( - broadcast.clone(), + broadcaster.clone(), logger.clone(), Arc::clone(fee_estimator), Arc::new(TestPersister { @@ -944,7 +995,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { signer_provider: keys_manager, fee_estimator: Arc::clone(fee_estimator), chain_monitor: chain_monitor.clone(), - tx_broadcaster: broadcast.clone(), + tx_broadcaster: broadcaster, router: &router, message_router: &router, logger, @@ -965,7 +1016,6 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { res }; - let mut channel_txn = Vec::new(); macro_rules! complete_all_pending_monitor_updates { ($monitor: expr) => {{ for (channel_id, state) in $monitor.latest_monitors.lock().unwrap().iter_mut() { @@ -1069,7 +1119,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { tx.clone(), ) .unwrap(); - channel_txn.push(tx); + chain_state.confirm_tx(tx); } else { panic!("Wrong event type"); } @@ -1127,20 +1177,6 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }}; } - macro_rules! confirm_txn { - ($node: expr) => {{ - let chain_hash = genesis_block(Network::Bitcoin).block_hash(); - let mut header = create_dummy_header(chain_hash, 42); - let txdata: Vec<_> = - channel_txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); - $node.transactions_confirmed(&header, &txdata, 1); - for _ in 2..100 { - header = create_dummy_header(header.block_hash(), 42); - } - $node.best_block_updated(&header, 99); - }}; - } - macro_rules! lock_fundings { ($nodes: expr) => {{ let mut node_events = Vec::new(); @@ -1206,9 +1242,9 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { // 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest // forwarding. - let (node_a, mut monitor_a, keys_manager_a) = make_node!(0, fee_est_a); - let (node_b, mut monitor_b, keys_manager_b) = make_node!(1, fee_est_b); - let (node_c, mut monitor_c, keys_manager_c) = make_node!(2, fee_est_c); + let (node_a, mut monitor_a, keys_manager_a) = make_node!(0, fee_est_a, broadcast_a); + let (node_b, mut monitor_b, keys_manager_b) = make_node!(1, fee_est_b, broadcast_b); + let (node_c, mut monitor_c, keys_manager_c) = make_node!(2, fee_est_c, broadcast_c); let mut nodes = [node_a, node_b, node_c]; @@ -1235,11 +1271,35 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { // Wipe the transactions-broadcasted set to make sure we don't broadcast any transactions // during normal operation in `test_return`. - broadcast.txn_broadcasted.borrow_mut().clear(); + broadcast_a.txn_broadcasted.borrow_mut().clear(); + broadcast_b.txn_broadcasted.borrow_mut().clear(); + broadcast_c.txn_broadcasted.borrow_mut().clear(); + + let sync_with_chain_state = |chain_state: &ChainState, + node: &ChannelManager<_, _, _, _, _, _, _, _, _>, + node_height: &mut u32, + num_blocks: Option| { + let target_height = if let Some(num_blocks) = num_blocks { + std::cmp::min(*node_height + num_blocks, chain_state.tip_height()) + } else { + chain_state.tip_height() + }; - for node in nodes.iter() { - confirm_txn!(node); - } + while *node_height < target_height { + *node_height += 1; + let (header, txn) = chain_state.block_at(*node_height); + let txdata: Vec<_> = txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); + if !txdata.is_empty() { + node.transactions_confirmed(header, &txdata, *node_height); + } + node.best_block_updated(header, *node_height); + } + }; + + // Sync all nodes to tip to lock the funding. + sync_with_chain_state(&mut chain_state, &nodes[0], &mut node_height_a, None); + sync_with_chain_state(&mut chain_state, &nodes[1], &mut node_height_b, None); + sync_with_chain_state(&mut chain_state, &nodes[2], &mut node_height_c, None); lock_fundings!(nodes); @@ -1289,9 +1349,11 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { assert_eq!(nodes[1].list_channels().len(), 6); assert_eq!(nodes[2].list_channels().len(), 3); - // At no point should we have broadcasted any transactions after the initial channel - // opens. - assert!(broadcast.txn_broadcasted.borrow().is_empty()); + // All broadcasters should be empty (all broadcast transactions should be handled + // explicitly). + assert!(broadcast_a.txn_broadcasted.borrow().is_empty()); + assert!(broadcast_b.txn_broadcasted.borrow().is_empty()); + assert!(broadcast_c.txn_broadcasted.borrow().is_empty()); return; }}; @@ -1362,6 +1424,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } *node_id == a_id }, + MessageSendEvent::SendTxSignatures { ref node_id, .. } => { + if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + *node_id == a_id + }, MessageSendEvent::SendChannelReady { .. } => continue, MessageSendEvent::SendAnnouncementSignatures { .. } => continue, MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => { @@ -1546,6 +1612,14 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } } }, + MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering tx_signatures from node {} to node {}.\n", $node, idx).as_bytes()); + dest.handle_tx_signatures(nodes[$node].get_our_node_id(), msg); + } + } + }, MessageSendEvent::SendSpliceInit { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { @@ -1747,7 +1821,18 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { ) .unwrap(); }, - events::Event::SplicePending { .. } => {}, + events::Event::SplicePending { new_funding_txo, .. } => { + let broadcaster = match $node { + 0 => &broadcast_a, + 1 => &broadcast_b, + _ => &broadcast_c, + }; + let mut txs = broadcaster.txn_broadcasted.borrow_mut(); + assert!(txs.len() >= 1); + let splice_tx = txs.remove(0); + assert_eq!(new_funding_txo.txid, splice_tx.compute_txid()); + chain_state.confirm_tx(splice_tx); + }, events::Event::SpliceFailed { .. } => {}, _ => { @@ -2182,7 +2267,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } }, 0xa2 => { - let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 0).unwrap(); + let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 1).unwrap(); let contribution = SpliceContribution::splice_in(Amount::from_sat(10_000), vec![input], None); let funding_feerate_sat_per_kw = fee_est_b.ret_val.load(atomic::Ordering::Acquire); @@ -2201,7 +2286,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } }, 0xa3 => { - let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 1).unwrap(); + let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 2).unwrap(); let contribution = SpliceContribution::splice_in(Amount::from_sat(10_000), vec![input], None); let funding_feerate_sat_per_kw = fee_est_c.ret_val.load(atomic::Ordering::Acquire); @@ -2340,6 +2425,15 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } }, + // Sync node by 1 block to cover confirmation of a transaction. + 0xa8 => sync_with_chain_state(&mut chain_state, &nodes[0], &mut node_height_a, Some(1)), + 0xa9 => sync_with_chain_state(&mut chain_state, &nodes[1], &mut node_height_b, Some(1)), + 0xaa => sync_with_chain_state(&mut chain_state, &nodes[2], &mut node_height_c, Some(1)), + // Sync node to chain tip to cover confirmation of a transaction post-reorg-risk. + 0xab => sync_with_chain_state(&mut chain_state, &nodes[0], &mut node_height_a, None), + 0xac => sync_with_chain_state(&mut chain_state, &nodes[1], &mut node_height_b, None), + 0xad => sync_with_chain_state(&mut chain_state, &nodes[2], &mut node_height_c, None), + 0xb0 | 0xb1 | 0xb2 => { // Restart node A, picking among the in-flight `ChannelMonitor`s to use based on // the value of `v` we're matching. @@ -2353,8 +2447,15 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { ab_events.clear(); ba_events.clear(); } - let (new_node_a, new_monitor_a) = - reload_node(&node_a_ser, 0, &monitor_a, v, &keys_manager_a, &fee_est_a); + let (new_node_a, new_monitor_a) = reload_node( + &node_a_ser, + 0, + &monitor_a, + v, + &keys_manager_a, + &fee_est_a, + broadcast_a.clone(), + ); nodes[0] = new_node_a; monitor_a = new_monitor_a; }, @@ -2375,8 +2476,15 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { bc_events.clear(); cb_events.clear(); } - let (new_node_b, new_monitor_b) = - reload_node(&node_b_ser, 1, &monitor_b, v, &keys_manager_b, &fee_est_b); + let (new_node_b, new_monitor_b) = reload_node( + &node_b_ser, + 1, + &monitor_b, + v, + &keys_manager_b, + &fee_est_b, + broadcast_b.clone(), + ); nodes[1] = new_node_b; monitor_b = new_monitor_b; }, @@ -2393,8 +2501,15 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { bc_events.clear(); cb_events.clear(); } - let (new_node_c, new_monitor_c) = - reload_node(&node_c_ser, 2, &monitor_c, v, &keys_manager_c, &fee_est_c); + let (new_node_c, new_monitor_c) = reload_node( + &node_c_ser, + 2, + &monitor_c, + v, + &keys_manager_c, + &fee_est_c, + broadcast_c.clone(), + ); nodes[2] = new_node_c; monitor_c = new_monitor_c; }, From bb62f0cea18c163f0354b962f8b3faa5bcdc71ad Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Thu, 5 Feb 2026 09:26:08 -0800 Subject: [PATCH 3/6] Use channel_id over short_channel_id for payments in chanmon_consistency The `short_channel_id` is no longer guaranteed to be stable with splicing now that the fuzzer can actually lock splices. --- fuzz/src/chanmon_consistency.rs | 297 ++++++++++++++++++-------------- 1 file changed, 169 insertions(+), 128 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 2035c2e7672..cd8877512d1 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -614,15 +614,21 @@ fn get_payment_secret_hash(dest: &ChanMan, payment_ctr: &mut u64) -> (PaymentSec #[inline] fn send_payment( - source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_secret: PaymentSecret, - payment_hash: PaymentHash, payment_id: PaymentId, + source: &ChanMan, dest: &ChanMan, dest_chan_id: ChannelId, amt: u64, + payment_secret: PaymentSecret, payment_hash: PaymentHash, payment_id: PaymentId, ) -> bool { - let (min_value_sendable, max_value_sendable) = source + let (min_value_sendable, max_value_sendable, dest_scid) = source .list_usable_channels() .iter() - .find(|chan| chan.short_channel_id == Some(dest_chan_id)) - .map(|chan| (chan.next_outbound_htlc_minimum_msat, chan.next_outbound_htlc_limit_msat)) - .unwrap_or((0, 0)); + .find(|chan| chan.channel_id == dest_chan_id) + .map(|chan| { + ( + chan.next_outbound_htlc_minimum_msat, + chan.next_outbound_htlc_limit_msat, + chan.short_channel_id.unwrap_or(0), + ) + }) + .unwrap_or((0, 0, 0)); let route_params = RouteParameters::from_payment_params_and_value( PaymentParameters::from_node_id(source.get_our_node_id(), TEST_FINAL_CLTV), amt, @@ -632,7 +638,7 @@ fn send_payment( hops: vec![RouteHop { pubkey: dest.get_our_node_id(), node_features: dest.node_features(), - short_channel_id: dest_chan_id, + short_channel_id: dest_scid, channel_features: dest.channel_features(), fee_msat: amt, cltv_expiry_delta: 200, @@ -659,15 +665,28 @@ fn send_payment( #[inline] fn send_hop_payment( - source: &ChanMan, middle: &ChanMan, middle_scid: u64, dest: &ChanMan, dest_scid: u64, amt: u64, - payment_secret: PaymentSecret, payment_hash: PaymentHash, payment_id: PaymentId, + source: &ChanMan, middle: &ChanMan, middle_chan_id: ChannelId, dest: &ChanMan, + dest_chan_id: ChannelId, amt: u64, payment_secret: PaymentSecret, payment_hash: PaymentHash, + payment_id: PaymentId, ) -> bool { - let (min_value_sendable, max_value_sendable) = source + let (min_value_sendable, max_value_sendable, middle_scid) = source .list_usable_channels() .iter() - .find(|chan| chan.short_channel_id == Some(middle_scid)) - .map(|chan| (chan.next_outbound_htlc_minimum_msat, chan.next_outbound_htlc_limit_msat)) - .unwrap_or((0, 0)); + .find(|chan| chan.channel_id == middle_chan_id) + .map(|chan| { + ( + chan.next_outbound_htlc_minimum_msat, + chan.next_outbound_htlc_limit_msat, + chan.short_channel_id.unwrap_or(0), + ) + }) + .unwrap_or((0, 0, 0)); + let dest_scid = dest + .list_channels() + .iter() + .find(|chan| chan.channel_id == dest_chan_id) + .and_then(|chan| chan.short_channel_id) + .unwrap_or(0); let first_hop_fee = 50_000; let route_params = RouteParameters::from_payment_params_and_value( PaymentParameters::from_node_id(source.get_our_node_id(), TEST_FINAL_CLTV), @@ -718,10 +737,10 @@ fn send_hop_payment( /// Send an MPP payment directly from source to dest using multiple channels. #[inline] fn send_mpp_payment( - source: &ChanMan, dest: &ChanMan, dest_scids: &[u64], amt: u64, payment_secret: PaymentSecret, - payment_hash: PaymentHash, payment_id: PaymentId, + source: &ChanMan, dest: &ChanMan, dest_chan_ids: &[ChannelId], amt: u64, + payment_secret: PaymentSecret, payment_hash: PaymentHash, payment_id: PaymentId, ) -> bool { - let num_paths = dest_scids.len(); + let num_paths = dest_chan_ids.len(); if num_paths == 0 { return false; } @@ -729,7 +748,16 @@ fn send_mpp_payment( let amt_per_path = amt / num_paths as u64; let mut paths = Vec::with_capacity(num_paths); - for (i, &dest_scid) in dest_scids.iter().enumerate() { + let dest_chans = dest.list_channels(); + let dest_scids = dest_chan_ids.iter().map(|chan_id| { + dest_chans + .iter() + .find(|chan| chan.channel_id == *chan_id) + .and_then(|chan| chan.short_channel_id) + .unwrap() + }); + + for (i, dest_scid) in dest_scids.enumerate() { let path_amt = if i == num_paths - 1 { amt - amt_per_path * (num_paths as u64 - 1) } else { @@ -767,11 +795,12 @@ fn send_mpp_payment( /// Supports multiple channels on either or both hops. #[inline] fn send_mpp_hop_payment( - source: &ChanMan, middle: &ChanMan, middle_scids: &[u64], dest: &ChanMan, dest_scids: &[u64], - amt: u64, payment_secret: PaymentSecret, payment_hash: PaymentHash, payment_id: PaymentId, + source: &ChanMan, middle: &ChanMan, middle_chan_ids: &[ChannelId], dest: &ChanMan, + dest_chan_ids: &[ChannelId], amt: u64, payment_secret: PaymentSecret, + payment_hash: PaymentHash, payment_id: PaymentId, ) -> bool { // Create paths by pairing middle_scids with dest_scids - let num_paths = middle_scids.len().max(dest_scids.len()); + let num_paths = middle_chan_ids.len().max(dest_chan_ids.len()); if num_paths == 0 { return false; } @@ -781,6 +810,30 @@ fn send_mpp_hop_payment( let fee_per_path = first_hop_fee / num_paths as u64; let mut paths = Vec::with_capacity(num_paths); + let middle_chans = middle.list_channels(); + let middle_scids: Vec<_> = middle_chan_ids + .iter() + .map(|chan_id| { + middle_chans + .iter() + .find(|chan| chan.channel_id == *chan_id) + .and_then(|chan| chan.short_channel_id) + .unwrap() + }) + .collect(); + + let dest_chans = dest.list_channels(); + let dest_scids: Vec<_> = dest_chan_ids + .iter() + .map(|chan_id| { + dest_chans + .iter() + .find(|chan| chan.channel_id == *chan_id) + .and_then(|chan| chan.short_channel_id) + .unwrap() + }) + .collect(); + for i in 0..num_paths { let middle_scid = middle_scids[i % middle_scids.len()]; let dest_scid = dest_scids[i % dest_scids.len()]; @@ -1258,16 +1311,12 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { // Fuzz mode uses XOR-based hashing (all bytes XOR to one byte), and // versions 0-5 cause collisions between A-B and B-C channel pairs // (e.g., A-B with Version(1) collides with B-C with Version(3)). - let chan_ab_ids = [ - make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 1), - make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 2), - make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 3), - ]; - let chan_bc_ids = [ - make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 4), - make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 5), - make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 6), - ]; + let _ = make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 1); + let _ = make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 2); + let _ = make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 3); + let _ = make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 4); + let _ = make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 5); + let _ = make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 6); // Wipe the transactions-broadcasted set to make sure we don't broadcast any transactions // during normal operation in `test_return`. @@ -1303,29 +1352,19 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { lock_fundings!(nodes); - // Get SCIDs for all A-B channels (from node A's perspective) - let node_a_chans: Vec<_> = nodes[0].list_usable_channels(); - let chan_ab_scids: [u64; 3] = [ - node_a_chans[0].short_channel_id.unwrap(), - node_a_chans[1].short_channel_id.unwrap(), - node_a_chans[2].short_channel_id.unwrap(), - ]; - let chan_ab_chan_ids: [ChannelId; 3] = - [node_a_chans[0].channel_id, node_a_chans[1].channel_id, node_a_chans[2].channel_id]; - // Get SCIDs for all B-C channels (from node C's perspective) - let node_c_chans: Vec<_> = nodes[2].list_usable_channels(); - let chan_bc_scids: [u64; 3] = [ - node_c_chans[0].short_channel_id.unwrap(), - node_c_chans[1].short_channel_id.unwrap(), - node_c_chans[2].short_channel_id.unwrap(), - ]; - let chan_bc_chan_ids: [ChannelId; 3] = - [node_c_chans[0].channel_id, node_c_chans[1].channel_id, node_c_chans[2].channel_id]; + // Get channel IDs for all A-B channels (from node A's perspective) + let chan_ab_ids = { + let node_a_chans = nodes[0].list_usable_channels(); + [node_a_chans[0].channel_id, node_a_chans[1].channel_id, node_a_chans[2].channel_id] + }; + // Get channel IDs for all B-C channels (from node C's perspective) + let chan_bc_ids = { + let node_c_chans = nodes[2].list_usable_channels(); + [node_c_chans[0].channel_id, node_c_chans[1].channel_id, node_c_chans[2].channel_id] + }; // Keep old names for backward compatibility in existing code - let chan_a = chan_ab_scids[0]; - let chan_a_id = chan_ab_chan_ids[0]; - let chan_b = chan_bc_scids[0]; - let chan_b_id = chan_bc_chan_ids[0]; + let chan_a_id = chan_ab_ids[0]; + let chan_b_id = chan_bc_ids[0]; let mut p_ctr: u64 = 0; @@ -1913,9 +1952,9 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let send_hop_noret = |source_idx: usize, middle_idx: usize, - middle_scid: u64, + middle_chan_id: ChannelId, dest_idx: usize, - dest_scid: u64, + dest_chan_id: ChannelId, amt: u64, payment_ctr: &mut u64| { let source = &nodes[source_idx]; @@ -1927,9 +1966,9 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let succeeded = send_hop_payment( source, middle, - middle_scid, + middle_chan_id, dest, - dest_scid, + dest_chan_id, amt, secret, hash, @@ -1943,7 +1982,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { // Direct MPP payment (no hop) let send_mpp_direct = |source_idx: usize, dest_idx: usize, - dest_scids: &[u64], + dest_chan_ids: &[ChannelId], amt: u64, payment_ctr: &mut u64| { let source = &nodes[source_idx]; @@ -1951,7 +1990,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let (secret, hash) = get_payment_secret_hash(dest, payment_ctr); let mut id = PaymentId([0; 32]); id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); - let succeeded = send_mpp_payment(source, dest, dest_scids, amt, secret, hash, id); + let succeeded = send_mpp_payment(source, dest, dest_chan_ids, amt, secret, hash, id); if succeeded { pending_payments.borrow_mut()[source_idx].push(id); } @@ -1960,9 +1999,9 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { // MPP payment via hop - splits payment across multiple channels on either or both hops let send_mpp_hop = |source_idx: usize, middle_idx: usize, - middle_scids: &[u64], + middle_chan_ids: &[ChannelId], dest_idx: usize, - dest_scids: &[u64], + dest_chan_ids: &[ChannelId], amt: u64, payment_ctr: &mut u64| { let source = &nodes[source_idx]; @@ -1974,9 +2013,9 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let succeeded = send_mpp_hop_payment( source, middle, - middle_scids, + middle_chan_ids, dest, - dest_scids, + dest_chan_ids, amt, secret, hash, @@ -2115,73 +2154,75 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { 0x27 => process_ev_noret!(2, false), // 1/10th the channel size: - 0x30 => send_noret(0, 1, chan_a, 10_000_000, &mut p_ctr), - 0x31 => send_noret(1, 0, chan_a, 10_000_000, &mut p_ctr), - 0x32 => send_noret(1, 2, chan_b, 10_000_000, &mut p_ctr), - 0x33 => send_noret(2, 1, chan_b, 10_000_000, &mut p_ctr), - 0x34 => send_hop_noret(0, 1, chan_a, 2, chan_b, 10_000_000, &mut p_ctr), - 0x35 => send_hop_noret(2, 1, chan_b, 0, chan_a, 10_000_000, &mut p_ctr), - - 0x38 => send_noret(0, 1, chan_a, 1_000_000, &mut p_ctr), - 0x39 => send_noret(1, 0, chan_a, 1_000_000, &mut p_ctr), - 0x3a => send_noret(1, 2, chan_b, 1_000_000, &mut p_ctr), - 0x3b => send_noret(2, 1, chan_b, 1_000_000, &mut p_ctr), - 0x3c => send_hop_noret(0, 1, chan_a, 2, chan_b, 1_000_000, &mut p_ctr), - 0x3d => send_hop_noret(2, 1, chan_b, 0, chan_a, 1_000_000, &mut p_ctr), - - 0x40 => send_noret(0, 1, chan_a, 100_000, &mut p_ctr), - 0x41 => send_noret(1, 0, chan_a, 100_000, &mut p_ctr), - 0x42 => send_noret(1, 2, chan_b, 100_000, &mut p_ctr), - 0x43 => send_noret(2, 1, chan_b, 100_000, &mut p_ctr), - 0x44 => send_hop_noret(0, 1, chan_a, 2, chan_b, 100_000, &mut p_ctr), - 0x45 => send_hop_noret(2, 1, chan_b, 0, chan_a, 100_000, &mut p_ctr), - - 0x48 => send_noret(0, 1, chan_a, 10_000, &mut p_ctr), - 0x49 => send_noret(1, 0, chan_a, 10_000, &mut p_ctr), - 0x4a => send_noret(1, 2, chan_b, 10_000, &mut p_ctr), - 0x4b => send_noret(2, 1, chan_b, 10_000, &mut p_ctr), - 0x4c => send_hop_noret(0, 1, chan_a, 2, chan_b, 10_000, &mut p_ctr), - 0x4d => send_hop_noret(2, 1, chan_b, 0, chan_a, 10_000, &mut p_ctr), - - 0x50 => send_noret(0, 1, chan_a, 1_000, &mut p_ctr), - 0x51 => send_noret(1, 0, chan_a, 1_000, &mut p_ctr), - 0x52 => send_noret(1, 2, chan_b, 1_000, &mut p_ctr), - 0x53 => send_noret(2, 1, chan_b, 1_000, &mut p_ctr), - 0x54 => send_hop_noret(0, 1, chan_a, 2, chan_b, 1_000, &mut p_ctr), - 0x55 => send_hop_noret(2, 1, chan_b, 0, chan_a, 1_000, &mut p_ctr), - - 0x58 => send_noret(0, 1, chan_a, 100, &mut p_ctr), - 0x59 => send_noret(1, 0, chan_a, 100, &mut p_ctr), - 0x5a => send_noret(1, 2, chan_b, 100, &mut p_ctr), - 0x5b => send_noret(2, 1, chan_b, 100, &mut p_ctr), - 0x5c => send_hop_noret(0, 1, chan_a, 2, chan_b, 100, &mut p_ctr), - 0x5d => send_hop_noret(2, 1, chan_b, 0, chan_a, 100, &mut p_ctr), - - 0x60 => send_noret(0, 1, chan_a, 10, &mut p_ctr), - 0x61 => send_noret(1, 0, chan_a, 10, &mut p_ctr), - 0x62 => send_noret(1, 2, chan_b, 10, &mut p_ctr), - 0x63 => send_noret(2, 1, chan_b, 10, &mut p_ctr), - 0x64 => send_hop_noret(0, 1, chan_a, 2, chan_b, 10, &mut p_ctr), - 0x65 => send_hop_noret(2, 1, chan_b, 0, chan_a, 10, &mut p_ctr), - - 0x68 => send_noret(0, 1, chan_a, 1, &mut p_ctr), - 0x69 => send_noret(1, 0, chan_a, 1, &mut p_ctr), - 0x6a => send_noret(1, 2, chan_b, 1, &mut p_ctr), - 0x6b => send_noret(2, 1, chan_b, 1, &mut p_ctr), - 0x6c => send_hop_noret(0, 1, chan_a, 2, chan_b, 1, &mut p_ctr), - 0x6d => send_hop_noret(2, 1, chan_b, 0, chan_a, 1, &mut p_ctr), + 0x30 => send_noret(0, 1, chan_a_id, 10_000_000, &mut p_ctr), + 0x31 => send_noret(1, 0, chan_a_id, 10_000_000, &mut p_ctr), + 0x32 => send_noret(1, 2, chan_b_id, 10_000_000, &mut p_ctr), + 0x33 => send_noret(2, 1, chan_b_id, 10_000_000, &mut p_ctr), + 0x34 => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 10_000_000, &mut p_ctr), + 0x35 => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 10_000_000, &mut p_ctr), + + 0x38 => send_noret(0, 1, chan_a_id, 1_000_000, &mut p_ctr), + 0x39 => send_noret(1, 0, chan_a_id, 1_000_000, &mut p_ctr), + 0x3a => send_noret(1, 2, chan_b_id, 1_000_000, &mut p_ctr), + 0x3b => send_noret(2, 1, chan_b_id, 1_000_000, &mut p_ctr), + 0x3c => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 1_000_000, &mut p_ctr), + 0x3d => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 1_000_000, &mut p_ctr), + + 0x40 => send_noret(0, 1, chan_a_id, 100_000, &mut p_ctr), + 0x41 => send_noret(1, 0, chan_a_id, 100_000, &mut p_ctr), + 0x42 => send_noret(1, 2, chan_b_id, 100_000, &mut p_ctr), + 0x43 => send_noret(2, 1, chan_b_id, 100_000, &mut p_ctr), + 0x44 => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 100_000, &mut p_ctr), + 0x45 => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 100_000, &mut p_ctr), + + 0x48 => send_noret(0, 1, chan_a_id, 10_000, &mut p_ctr), + 0x49 => send_noret(1, 0, chan_a_id, 10_000, &mut p_ctr), + 0x4a => send_noret(1, 2, chan_b_id, 10_000, &mut p_ctr), + 0x4b => send_noret(2, 1, chan_b_id, 10_000, &mut p_ctr), + 0x4c => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 10_000, &mut p_ctr), + 0x4d => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 10_000, &mut p_ctr), + + 0x50 => send_noret(0, 1, chan_a_id, 1_000, &mut p_ctr), + 0x51 => send_noret(1, 0, chan_a_id, 1_000, &mut p_ctr), + 0x52 => send_noret(1, 2, chan_b_id, 1_000, &mut p_ctr), + 0x53 => send_noret(2, 1, chan_b_id, 1_000, &mut p_ctr), + 0x54 => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 1_000, &mut p_ctr), + 0x55 => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 1_000, &mut p_ctr), + + 0x58 => send_noret(0, 1, chan_a_id, 100, &mut p_ctr), + 0x59 => send_noret(1, 0, chan_a_id, 100, &mut p_ctr), + 0x5a => send_noret(1, 2, chan_b_id, 100, &mut p_ctr), + 0x5b => send_noret(2, 1, chan_b_id, 100, &mut p_ctr), + 0x5c => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 100, &mut p_ctr), + 0x5d => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 100, &mut p_ctr), + + 0x60 => send_noret(0, 1, chan_a_id, 10, &mut p_ctr), + 0x61 => send_noret(1, 0, chan_a_id, 10, &mut p_ctr), + 0x62 => send_noret(1, 2, chan_b_id, 10, &mut p_ctr), + 0x63 => send_noret(2, 1, chan_b_id, 10, &mut p_ctr), + 0x64 => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 10, &mut p_ctr), + 0x65 => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 10, &mut p_ctr), + + 0x68 => send_noret(0, 1, chan_a_id, 1, &mut p_ctr), + 0x69 => send_noret(1, 0, chan_a_id, 1, &mut p_ctr), + 0x6a => send_noret(1, 2, chan_b_id, 1, &mut p_ctr), + 0x6b => send_noret(2, 1, chan_b_id, 1, &mut p_ctr), + 0x6c => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 1, &mut p_ctr), + 0x6d => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 1, &mut p_ctr), // MPP payments // 0x70: direct MPP from 0 to 1 (multi A-B channels) - 0x70 => send_mpp_direct(0, 1, &chan_ab_scids, 1_000_000, &mut p_ctr), + 0x70 => send_mpp_direct(0, 1, &chan_ab_ids, 1_000_000, &mut p_ctr), // 0x71: MPP 0->1->2, multi channels on first hop (A-B) - 0x71 => send_mpp_hop(0, 1, &chan_ab_scids, 2, &[chan_b], 1_000_000, &mut p_ctr), + 0x71 => send_mpp_hop(0, 1, &chan_ab_ids, 2, &[chan_b_id], 1_000_000, &mut p_ctr), // 0x72: MPP 0->1->2, multi channels on both hops (A-B and B-C) - 0x72 => send_mpp_hop(0, 1, &chan_ab_scids, 2, &chan_bc_scids, 1_000_000, &mut p_ctr), + 0x72 => send_mpp_hop(0, 1, &chan_ab_ids, 2, &chan_bc_ids, 1_000_000, &mut p_ctr), // 0x73: MPP 0->1->2, multi channels on second hop (B-C) - 0x73 => send_mpp_hop(0, 1, &[chan_a], 2, &chan_bc_scids, 1_000_000, &mut p_ctr), + 0x73 => send_mpp_hop(0, 1, &[chan_a_id], 2, &chan_bc_ids, 1_000_000, &mut p_ctr), // 0x74: direct MPP from 0 to 1, multi parts over single channel - 0x74 => send_mpp_direct(0, 1, &[chan_a, chan_a, chan_a], 1_000_000, &mut p_ctr), + 0x74 => { + send_mpp_direct(0, 1, &[chan_a_id, chan_a_id, chan_a_id], 1_000_000, &mut p_ctr) + }, 0x80 => { let mut max_feerate = last_htlc_clear_fee_a; @@ -2770,16 +2811,16 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } // Finally, make sure that at least one end of each channel can make a substantial payment - for &scid in &chan_ab_scids { + for &chan_id in &chan_ab_ids { assert!( - send(0, 1, scid, 10_000_000, &mut p_ctr) - || send(1, 0, scid, 10_000_000, &mut p_ctr) + send(0, 1, chan_id, 10_000_000, &mut p_ctr) + || send(1, 0, chan_id, 10_000_000, &mut p_ctr) ); } - for &scid in &chan_bc_scids { + for &chan_id in &chan_bc_ids { assert!( - send(1, 2, scid, 10_000_000, &mut p_ctr) - || send(2, 1, scid, 10_000_000, &mut p_ctr) + send(1, 2, chan_id, 10_000_000, &mut p_ctr) + || send(2, 1, chan_id, 10_000_000, &mut p_ctr) ); } From 71eb2d4f6d70de57fb4086b65e490b2d64296b41 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 10 Feb 2026 01:18:19 -0800 Subject: [PATCH 4/6] Consider probe events for stuck payments check in chanmon_consistency Even though we don't explicitly send probes, because probes are detected based on hashing the payment hash+preimage, it's rather trivial for the fuzzer to build payments that accidentally end up looking like probes. --- fuzz/src/chanmon_consistency.rs | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index cd8877512d1..241869b41ae 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1819,7 +1819,22 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { assert!(resolved_payments[$node].contains(&sent_id)); } }, - events::Event::PaymentFailed { payment_id, .. } => { + // Even though we don't explicitly send probes, because probes are + // detected based on hashing the payment hash+preimage, its rather + // trivial for the fuzzer to build payments that accidentally end up + // looking like probes. + events::Event::ProbeSuccessful { payment_id, .. } => { + let idx_opt = + pending_payments[$node].iter().position(|id| *id == payment_id); + if let Some(idx) = idx_opt { + pending_payments[$node].remove(idx); + resolved_payments[$node].push(payment_id); + } else { + assert!(resolved_payments[$node].contains(&payment_id)); + } + }, + events::Event::PaymentFailed { payment_id, .. } + | events::Event::ProbeFailed { payment_id, .. } => { let idx_opt = pending_payments[$node].iter().position(|id| *id == payment_id); if let Some(idx) = idx_opt { @@ -1834,13 +1849,6 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { events::Event::PaymentClaimed { .. } => {}, events::Event::PaymentPathSuccessful { .. } => {}, events::Event::PaymentPathFailed { .. } => {}, - events::Event::ProbeSuccessful { .. } - | events::Event::ProbeFailed { .. } => { - // Even though we don't explicitly send probes, because probes are - // detected based on hashing the payment hash+preimage, its rather - // trivial for the fuzzer to build payments that accidentally end up - // looking like probes. - }, events::Event::PaymentForwarded { .. } if $node == 1 => {}, events::Event::ChannelReady { .. } => {}, events::Event::HTLCHandlingFailed { .. } => {}, From 670377cd9d9abd6b567bb0603cedeff6d4401d7f Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 10 Feb 2026 11:19:47 -0800 Subject: [PATCH 5/6] Time out incomplete MPP payments in chanmon_consistency This requires calling `timer_tick_occurred`. As a result, disabled/enabled updates may be sent based on the connection status when `timer_tick_occurred` is called. --- fuzz/src/chanmon_consistency.rs | 26 ++++++++++++++++++-------- lightning/src/ln/channelmanager.rs | 3 +++ 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 241869b41ae..1077cb16f27 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1689,14 +1689,12 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { MessageSendEvent::SendAnnouncementSignatures { .. } => { // Can be generated as a reestablish response }, - MessageSendEvent::SendChannelUpdate { ref msg, .. } => { - // When we reconnect we will resend a channel_update to make sure our - // counterparty has the latest parameters for receiving payments - // through us. We do, however, check that the message does not include - // the "disabled" bit, as we should never ever have a channel which is - // disabled when we send such an update (or it may indicate channel - // force-close which we should detect as an error). - assert_eq!(msg.contents.channel_flags & 2, 0); + MessageSendEvent::SendChannelUpdate { .. } => { + // Can be generated as a reestablish response + }, + MessageSendEvent::BroadcastChannelUpdate { .. } => { + // Can be generated as a result of calling `timer_tick_occurred` enough + // times while peers are disconnected }, _ => if out.may_fail.load(atomic::Ordering::Acquire) { return; @@ -2277,6 +2275,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { nodes[2].maybe_update_chan_fees(); }, + 0x8a => nodes[0].timer_tick_occurred(), + 0x8b => nodes[1].timer_tick_occurred(), + 0x8c => nodes[2].timer_tick_occurred(), + 0xa0 => { let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 0).unwrap(); let contribution = @@ -2808,6 +2810,14 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { process_all_events!(); + // Since MPP payments are supported, we wait until we fully settle the state of all + // channels to see if we have any committed HTLC parts of an MPP payment that need + // to be failed back. + for node in &nodes { + node.timer_tick_occurred(); + } + process_all_events!(); + // Verify no payments are stuck - all should have resolved for (idx, pending) in pending_payments.borrow().iter().enumerate() { assert!( diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e840d705b8e..a7f798f6cd2 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3034,7 +3034,10 @@ const _CHECK_CLTV_EXPIRY_OFFCHAIN: () = assert!( ); /// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs +#[cfg(not(any(fuzzing, test, feature = "_test_utils")))] pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3; +#[cfg(any(fuzzing, test, feature = "_test_utils"))] +pub(crate) const MPP_TIMEOUT_TICKS: u8 = 1; /// The number of ticks of [`ChannelManager::timer_tick_occurred`] where a peer is disconnected /// until we mark the channel disabled and gossip the update. From d31b518c9089798d62996198b3bad95e8a212a07 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 10 Feb 2026 11:20:35 -0800 Subject: [PATCH 6/6] Add newline to fuzz log statements This regressed at some point, making the logs harder to parse on a failed test run. --- fuzz/src/utils/test_logger.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fuzz/src/utils/test_logger.rs b/fuzz/src/utils/test_logger.rs index f8369879447..8f38d08a035 100644 --- a/fuzz/src/utils/test_logger.rs +++ b/fuzz/src/utils/test_logger.rs @@ -59,6 +59,6 @@ impl<'a, Out: Output> Write for LockedWriteAdapter<'a, Out> { impl Logger for TestLogger { fn log(&self, record: Record) { - write!(LockedWriteAdapter(&self.out), "{:<6} {}", self.id, record).unwrap(); + writeln!(LockedWriteAdapter(&self.out), "{:<6} {}", self.id, record).unwrap(); } }