From eb31aeb1b8c8ac7093678abe4441be111d7cf558 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 6 Feb 2026 15:12:45 -0500 Subject: [PATCH 01/10] Trivial: use full path in test macros Useful when using these macros in lightning-tests/upgrade_downgrade_tests --- lightning/src/ln/functional_test_utils.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index a5461154a02..d5a29785a94 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1383,7 +1383,7 @@ macro_rules! _reload_node_inner { ); $node.chain_monitor = &$new_chain_monitor; - $new_channelmanager = _reload_node( + $new_channelmanager = $crate::ln::functional_test_utils::_reload_node( &$node, $new_config, &chanman_encoded, @@ -1401,7 +1401,7 @@ macro_rules! reload_node { // Reload the node using the node's current config ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { let config = $node.node.get_current_config(); - _reload_node_inner!( + $crate::_reload_node_inner!( $node, config, $chanman_encoded, @@ -1414,7 +1414,7 @@ macro_rules! reload_node { }; // Reload the node with the new provided config ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { - _reload_node_inner!( + $crate::_reload_node_inner!( $node, $new_config, $chanman_encoded, @@ -1431,7 +1431,7 @@ macro_rules! reload_node { ident, $new_chain_monitor: ident, $new_channelmanager: ident, $reconstruct_pending_htlcs: expr ) => { let config = $node.node.get_current_config(); - _reload_node_inner!( + $crate::_reload_node_inner!( $node, config, $chanman_encoded, @@ -2971,7 +2971,7 @@ pub fn check_payment_claimable( #[cfg(any(test, ldk_bench, feature = "_test_utils"))] macro_rules! expect_payment_claimable { ($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr) => { - expect_payment_claimable!( + $crate::expect_payment_claimable!( $node, $expected_payment_hash, $expected_payment_secret, From 07b3deff29e4e3d6eeb164780170e9d9247352b5 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 9 Feb 2026 14:16:12 -0500 Subject: [PATCH 02/10] Split method to reconstruct pending HTLCs into two In the next commit, we want to dedup fields between the InboundUpdateAdd::Forwarded's HTLCPreviousHopData and the outer InboundHTLCOutput/Channel structs, since many fields are duplicated in both places at the moment. As part of doing this cleanly, we first refactor the method that retrieves these InboundUpdateAdds for reconstructing the set of pending HTLCs during ChannelManager deconstruction. Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channel.rs | 57 +++++++++++++++++++++--------- lightning/src/ln/channelmanager.rs | 57 +++++++++++++----------------- 2 files changed, 64 insertions(+), 50 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 3236ebdefed..88d2e32e764 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -315,7 +315,7 @@ impl InboundHTLCState { /// /// Useful for reconstructing the pending HTLC set on startup. #[derive(Debug, Clone)] -pub(super) enum InboundUpdateAdd { +enum InboundUpdateAdd { /// The inbound committed HTLC's update_add_htlc message. WithOnion { update_add_htlc: msgs::UpdateAddHTLC }, /// This inbound HTLC is a forward that was irrevocably committed to the outbound edge, allowing @@ -7885,10 +7885,35 @@ where Ok(()) } - /// Useful for reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. - pub(super) fn inbound_committed_unresolved_htlcs( + /// Returns true if any committed inbound HTLCs were received pre-LDK 0.3 and cannot be used + /// during `ChannelManager` deserialization to reconstruct the set of pending HTLCs. + pub(super) fn has_legacy_inbound_htlcs(&self) -> bool { + self.context.pending_inbound_htlcs.iter().any(|htlc| { + matches!( + &htlc.state, + InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy } + ) + }) + } + + /// Returns committed inbound HTLCs whose onion has not yet been decoded and processed. Useful + /// for reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. + pub(super) fn inbound_htlcs_pending_decode( + &self, + ) -> impl Iterator + '_ { + self.context.pending_inbound_htlcs.iter().filter_map(|htlc| match &htlc.state { + InboundHTLCState::Committed { + update_add_htlc: InboundUpdateAdd::WithOnion { update_add_htlc }, + } => Some(update_add_htlc.clone()), + _ => None, + }) + } + + /// Returns committed inbound HTLCs that have been forwarded but not yet fully resolved. Useful + /// when reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. + pub(super) fn inbound_forwarded_htlcs( &self, - ) -> Vec<(PaymentHash, InboundUpdateAdd)> { + ) -> impl Iterator + '_ { // We don't want to return an HTLC as needing processing if it already has a resolution that's // pending in the holding cell. let htlc_resolution_in_holding_cell = |id: u64| -> bool { @@ -7902,19 +7927,17 @@ where }) }; - self.context - .pending_inbound_htlcs - .iter() - .filter_map(|htlc| match &htlc.state { - InboundHTLCState::Committed { update_add_htlc } => { - if htlc_resolution_in_holding_cell(htlc.htlc_id) { - return None; - } - Some((htlc.payment_hash, update_add_htlc.clone())) - }, - _ => None, - }) - .collect() + self.context.pending_inbound_htlcs.iter().filter_map(move |htlc| match &htlc.state { + InboundHTLCState::Committed { + update_add_htlc: InboundUpdateAdd::Forwarded { hop_data, outbound_amt_msat }, + } => { + if htlc_resolution_in_holding_cell(htlc.htlc_id) { + return None; + } + Some((htlc.payment_hash, hop_data.clone(), *outbound_amt_msat)) + }, + _ => None, + }) } /// Useful when reconstructing the set of pending HTLC forwards when deserializing the diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e840d705b8e..bdc0155054f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -59,9 +59,9 @@ use crate::ln::chan_utils::selected_commitment_sat_per_1000_weight; use crate::ln::channel::QuiescentAction; use crate::ln::channel::{ self, hold_time_since, Channel, ChannelError, ChannelUpdateStatus, DisconnectResult, - FundedChannel, FundingTxSigned, InboundUpdateAdd, InboundV1Channel, OutboundV1Channel, - PendingV2Channel, ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, - UpdateFulfillCommitFetch, WithChannelContext, + FundedChannel, FundingTxSigned, InboundV1Channel, OutboundV1Channel, PendingV2Channel, + ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, UpdateFulfillCommitFetch, + WithChannelContext, }; use crate::ln::channel_state::ChannelDetails; use crate::ln::funding::SpliceContribution; @@ -10185,10 +10185,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state = per_peer_state.get(&cp_id).map(|state| state.lock().unwrap()).unwrap(); let chan = peer_state.channel_by_id.get(&chan_id).and_then(|c| c.as_funded()).unwrap(); - chan.inbound_committed_unresolved_htlcs() - .iter() - .filter(|(_, htlc)| matches!(htlc, InboundUpdateAdd::WithOnion { .. })) - .count() + chan.inbound_htlcs_pending_decode().count() } #[cfg(test)] @@ -18626,33 +18623,27 @@ impl< if reconstruct_manager_from_monitors { if let Some(chan) = peer_state.channel_by_id.get(channel_id) { if let Some(funded_chan) = chan.as_funded() { + // Legacy HTLCs are from pre-LDK 0.3 and cannot be reconstructed. + if funded_chan.has_legacy_inbound_htlcs() { + return Err(DecodeError::InvalidValue); + } + // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized + // `Channel` as part of removing the requirement to regularly persist the + // `ChannelManager`. let scid_alias = funded_chan.context.outbound_scid_alias(); - let inbound_committed_update_adds = - funded_chan.inbound_committed_unresolved_htlcs(); - for (payment_hash, htlc) in inbound_committed_update_adds { - match htlc { - InboundUpdateAdd::WithOnion { update_add_htlc } => { - // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized - // `Channel` as part of removing the requirement to regularly persist the - // `ChannelManager`. - decode_update_add_htlcs - .entry(scid_alias) - .or_insert_with(Vec::new) - .push(update_add_htlc); - }, - InboundUpdateAdd::Forwarded { - hop_data, - outbound_amt_msat, - } => { - already_forwarded_htlcs - .entry((hop_data.channel_id, payment_hash)) - .or_insert_with(Vec::new) - .push((hop_data, outbound_amt_msat)); - }, - InboundUpdateAdd::Legacy => { - return Err(DecodeError::InvalidValue) - }, - } + for update_add_htlc in funded_chan.inbound_htlcs_pending_decode() { + decode_update_add_htlcs + .entry(scid_alias) + .or_insert_with(Vec::new) + .push(update_add_htlc); + } + for (payment_hash, hop_data, outbound_amt_msat) in + funded_chan.inbound_forwarded_htlcs() + { + already_forwarded_htlcs + .entry((hop_data.channel_id, payment_hash)) + .or_insert_with(Vec::new) + .push((hop_data, outbound_amt_msat)); } } } From d3e9cd018dfeab5e4c0884eee73988c3ecc1fb1e Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 9 Feb 2026 14:29:11 -0500 Subject: [PATCH 03/10] Dedup data in InboundUpdateAdd::Forwarded::hop_data Previously, the InboundUpdateAdd::Forwarded enum variant contained an HTLCPreviousHopData, which had a lot of fields that were redundant with the outer InboundHTLCOutput/Channel structs. Here we dedup those fields, which is important because the pending InboundUpdateAdds are persisted whenever the ChannelManager is persisted. --- lightning/src/ln/channel.rs | 69 ++++++++++++++++++++++++------ lightning/src/ln/channelmanager.rs | 2 +- 2 files changed, 56 insertions(+), 15 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 88d2e32e764..b12061bf118 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -50,10 +50,10 @@ use crate::ln::channel_state::{ OutboundHTLCDetails, OutboundHTLCStateDetails, }; use crate::ln::channelmanager::{ - self, ChannelReadyOrder, FundingConfirmedMessage, HTLCFailureMsg, HTLCPreviousHopData, - HTLCSource, OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, PendingHTLCStatus, - RAACommitmentOrder, SentHTLCId, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT, - MIN_CLTV_EXPIRY_DELTA, + self, BlindedFailure, ChannelReadyOrder, FundingConfirmedMessage, HTLCFailureMsg, + HTLCPreviousHopData, HTLCSource, OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, + PendingHTLCStatus, RAACommitmentOrder, SentHTLCId, BREAKDOWN_TIMEOUT, + MAX_LOCAL_BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, }; use crate::ln::funding::{FundingTxInput, SpliceContribution}; use crate::ln::interactivetxs::{ @@ -320,12 +320,16 @@ enum InboundUpdateAdd { WithOnion { update_add_htlc: msgs::UpdateAddHTLC }, /// This inbound HTLC is a forward that was irrevocably committed to the outbound edge, allowing /// its onion to be pruned and no longer persisted. + /// + /// Contains data that is useful if we need to fail or claim this HTLC backwards after a restart + /// and it's missing in the outbound edge. Forwarded { - /// Useful if we need to fail or claim this HTLC backwards after restart, if it's missing in the - /// outbound edge. - hop_data: HTLCPreviousHopData, - /// Useful if we need to claim this HTLC backwards after a restart and it's missing in the - /// outbound edge, to generate an accurate [`Event::PaymentForwarded`]. + incoming_packet_shared_secret: [u8; 32], + phantom_shared_secret: Option<[u8; 32]>, + trampoline_shared_secret: Option<[u8; 32]>, + blinded_failure: Option, + /// Useful for generating an accurate [`Event::PaymentForwarded`], if we need to claim this + /// HTLC post-restart. /// /// [`Event::PaymentForwarded`]: crate::events::Event::PaymentForwarded outbound_amt_msat: u64, @@ -341,8 +345,11 @@ impl_writeable_tlv_based_enum_upgradable!(InboundUpdateAdd, }, (2, Legacy) => {}, (4, Forwarded) => { - (0, hop_data, required), + (0, incoming_packet_shared_secret, required), (2, outbound_amt_msat, required), + (4, phantom_shared_secret, option), + (6, trampoline_shared_secret, option), + (8, blinded_failure, option), }, ); @@ -7927,14 +7934,42 @@ where }) }; + let prev_outbound_scid_alias = self.context.outbound_scid_alias(); + let user_channel_id = self.context.get_user_id(); + let channel_id = self.context.channel_id(); + let outpoint = self.funding_outpoint(); + let counterparty_node_id = self.context.get_counterparty_node_id(); + self.context.pending_inbound_htlcs.iter().filter_map(move |htlc| match &htlc.state { InboundHTLCState::Committed { - update_add_htlc: InboundUpdateAdd::Forwarded { hop_data, outbound_amt_msat }, + update_add_htlc: + InboundUpdateAdd::Forwarded { + incoming_packet_shared_secret, + phantom_shared_secret, + trampoline_shared_secret, + blinded_failure, + outbound_amt_msat, + }, } => { if htlc_resolution_in_holding_cell(htlc.htlc_id) { return None; } - Some((htlc.payment_hash, hop_data.clone(), *outbound_amt_msat)) + // The reconstructed `HTLCPreviousHopData` is used to fail or claim the HTLC backwards + // post-restart, if it is missing in the outbound edge. + let hop_data = HTLCPreviousHopData { + prev_outbound_scid_alias, + user_channel_id: Some(user_channel_id), + htlc_id: htlc.htlc_id, + incoming_packet_shared_secret: *incoming_packet_shared_secret, + phantom_shared_secret: *phantom_shared_secret, + trampoline_shared_secret: *trampoline_shared_secret, + blinded_failure: *blinded_failure, + channel_id, + outpoint, + counterparty_node_id: Some(counterparty_node_id), + cltv_expiry: Some(htlc.cltv_expiry), + }; + Some((htlc.payment_hash, hop_data, *outbound_amt_msat)) }, _ => None, }) @@ -7984,12 +8019,18 @@ where /// This inbound HTLC was irrevocably forwarded to the outbound edge, so we no longer need to /// persist its onion. pub(super) fn prune_inbound_htlc_onion( - &mut self, htlc_id: u64, hop_data: HTLCPreviousHopData, outbound_amt_msat: u64, + &mut self, htlc_id: u64, hop_data: &HTLCPreviousHopData, outbound_amt_msat: u64, ) { for htlc in self.context.pending_inbound_htlcs.iter_mut() { if htlc.htlc_id == htlc_id { if let InboundHTLCState::Committed { ref mut update_add_htlc } = htlc.state { - *update_add_htlc = InboundUpdateAdd::Forwarded { hop_data, outbound_amt_msat }; + *update_add_htlc = InboundUpdateAdd::Forwarded { + incoming_packet_shared_secret: hop_data.incoming_packet_shared_secret, + phantom_shared_secret: hop_data.phantom_shared_secret, + trampoline_shared_secret: hop_data.trampoline_shared_secret, + blinded_failure: hop_data.blinded_failure, + outbound_amt_msat, + }; return; } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index bdc0155054f..68eeb7c4e15 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -10161,7 +10161,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(chan) = peer_state.channel_by_id.get_mut(&source.channel_id).and_then(|c| c.as_funded_mut()) { - chan.prune_inbound_htlc_onion(source.htlc_id, source, outbound_amt_msat); + chan.prune_inbound_htlc_onion(source.htlc_id, &source, outbound_amt_msat); } } } From 3246010beba323077c90f7e703258f4d5bfc0b44 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 12 Feb 2026 15:15:51 -0500 Subject: [PATCH 04/10] Trivial: ChannelManager::read var rename prefactor Makes an upcoming commit cleaner: when we add a next_hop variable we want to distinguish it from the previous hop. --- lightning/src/ln/channelmanager.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 68eeb7c4e15..cc95424dbbd 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18637,13 +18637,13 @@ impl< .or_insert_with(Vec::new) .push(update_add_htlc); } - for (payment_hash, hop_data, outbound_amt_msat) in + for (payment_hash, prev_hop, outbound_amt_msat) in funded_chan.inbound_forwarded_htlcs() { already_forwarded_htlcs - .entry((hop_data.channel_id, payment_hash)) + .entry((prev_hop.channel_id, payment_hash)) .or_insert_with(Vec::new) - .push((hop_data, outbound_amt_msat)); + .push((prev_hop, outbound_amt_msat)); } } } @@ -19352,14 +19352,14 @@ impl< if let Some(forwarded_htlcs) = already_forwarded_htlcs.remove(&(*channel_id, payment_hash)) { - for (hop_data, outbound_amt_msat) in forwarded_htlcs { + for (prev_hop, outbound_amt_msat) in forwarded_htlcs { let new_pending_claim = !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _)| { - matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == hop_data.htlc_id && hop.channel_id == hop_data.channel_id) + matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == prev_hop.htlc_id && hop.channel_id == prev_hop.channel_id) }); if new_pending_claim { let counterparty_node_id = monitor.get_counterparty_node_id(); - let is_channel_closed = channel_manager + let is_downstream_closed = channel_manager .per_peer_state .read() .unwrap() @@ -19372,10 +19372,10 @@ impl< .contains_key(channel_id) }); pending_claims_to_replay.push(( - HTLCSource::PreviousHopData(hop_data), + HTLCSource::PreviousHopData(prev_hop), payment_preimage, outbound_amt_msat, - is_channel_closed, + is_downstream_closed, counterparty_node_id, monitor.get_funding_txo(), *channel_id, From 70ae54fb21b1440c6532aa95fb1d60ede9df96da Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 12 Feb 2026 15:20:31 -0500 Subject: [PATCH 05/10] Trivial: user_channel_id in pending_claims_to_replay Adds support for passing user_channel_id into the pending_claims_to_replay vec, which is used by the ChannelManager on startup. For now user_channel_id is always set to None, but in upcoming commits we will set it to Some when the downstream channel is still open (this is currently a bug). Separated out here for reviewability. --- lightning/src/ln/channelmanager.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index cc95424dbbd..ac2af352e34 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18988,7 +18988,7 @@ impl< Some((htlc_source, payment_preimage, htlc.amount_msat, is_channel_closed, monitor.get_counterparty_node_id(), - monitor.get_funding_txo(), monitor.channel_id())) + monitor.get_funding_txo(), monitor.channel_id(), None)) } else { None } } else { // If it was an outbound payment, we've handled it above - if a preimage @@ -19354,7 +19354,7 @@ impl< { for (prev_hop, outbound_amt_msat) in forwarded_htlcs { let new_pending_claim = - !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _)| { + !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _, _)| { matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == prev_hop.htlc_id && hop.channel_id == prev_hop.channel_id) }); if new_pending_claim { @@ -19379,6 +19379,7 @@ impl< counterparty_node_id, monitor.get_funding_txo(), *channel_id, + None, )); } } @@ -19648,6 +19649,7 @@ impl< downstream_node_id, downstream_funding, downstream_channel_id, + downstream_user_channel_id, ) in pending_claims_to_replay { // We use `downstream_closed` in place of `from_onchain` here just as a guess - we @@ -19663,7 +19665,7 @@ impl< downstream_node_id, downstream_funding, downstream_channel_id, - None, + downstream_user_channel_id, None, None, ); From b3b59e6dfa51b7e2f0fe62575c48693ffeb12332 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 10 Feb 2026 15:18:58 -0500 Subject: [PATCH 06/10] Persist outbound channel info in inbound HTLCs We need these fields to generate a correct PaymentForwarded event if we need to claim this inbound HTLC backwards after restart and it's already been claimed and removed on the outbound edge. --- lightning/src/ln/channel.rs | 53 +++++++++++++++++++++--------- lightning/src/ln/channelmanager.rs | 42 ++++++++++++++++++----- 2 files changed, 72 insertions(+), 23 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index b12061bf118..37a0661de76 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -308,6 +308,32 @@ impl InboundHTLCState { } } +/// Information about the outbound hop for a forwarded HTLC. Useful for generating an accurate +/// [`Event::PaymentForwarded`] if we need to claim this HTLC post-restart. +/// +/// [`Event::PaymentForwarded`]: crate::events::Event::PaymentForwarded +#[derive(Debug, Copy, Clone)] +pub(super) struct OutboundHop { + /// The amount forwarded outbound. + pub(super) amt_msat: u64, + /// The outbound channel this HTLC was forwarded over. + pub(super) channel_id: ChannelId, + /// The next-hop recipient of this HTLC. + pub(super) node_id: PublicKey, + /// The outbound channel's funding outpoint. + pub(super) funding_txo: OutPoint, + /// The outbound channel's user channel ID. + pub(super) user_channel_id: u128, +} + +impl_writeable_tlv_based!(OutboundHop, { + (0, amt_msat, required), + (2, channel_id, required), + (4, node_id, required), + (6, funding_txo, required), + (8, user_channel_id, required), +}); + /// A field of `InboundHTLCState::Committed` containing the HTLC's `update_add_htlc` message. If /// the HTLC is a forward and gets irrevocably committed to the outbound edge, we convert to /// `InboundUpdateAdd::Forwarded`, thus pruning the onion and not persisting it on every @@ -328,11 +354,7 @@ enum InboundUpdateAdd { phantom_shared_secret: Option<[u8; 32]>, trampoline_shared_secret: Option<[u8; 32]>, blinded_failure: Option, - /// Useful for generating an accurate [`Event::PaymentForwarded`], if we need to claim this - /// HTLC post-restart. - /// - /// [`Event::PaymentForwarded`]: crate::events::Event::PaymentForwarded - outbound_amt_msat: u64, + outbound_hop: OutboundHop, }, /// This HTLC was received pre-LDK 0.3, before we started persisting the onion for inbound /// committed HTLCs. @@ -346,7 +368,7 @@ impl_writeable_tlv_based_enum_upgradable!(InboundUpdateAdd, (2, Legacy) => {}, (4, Forwarded) => { (0, incoming_packet_shared_secret, required), - (2, outbound_amt_msat, required), + (2, outbound_hop, required), (4, phantom_shared_secret, option), (6, trampoline_shared_secret, option), (8, blinded_failure, option), @@ -7948,7 +7970,7 @@ where phantom_shared_secret, trampoline_shared_secret, blinded_failure, - outbound_amt_msat, + outbound_hop: OutboundHop { amt_msat, .. }, }, } => { if htlc_resolution_in_holding_cell(htlc.htlc_id) { @@ -7956,7 +7978,7 @@ where } // The reconstructed `HTLCPreviousHopData` is used to fail or claim the HTLC backwards // post-restart, if it is missing in the outbound edge. - let hop_data = HTLCPreviousHopData { + let prev_hop_data = HTLCPreviousHopData { prev_outbound_scid_alias, user_channel_id: Some(user_channel_id), htlc_id: htlc.htlc_id, @@ -7969,7 +7991,7 @@ where counterparty_node_id: Some(counterparty_node_id), cltv_expiry: Some(htlc.cltv_expiry), }; - Some((htlc.payment_hash, hop_data, *outbound_amt_msat)) + Some((htlc.payment_hash, prev_hop_data, *amt_msat)) }, _ => None, }) @@ -8019,17 +8041,18 @@ where /// This inbound HTLC was irrevocably forwarded to the outbound edge, so we no longer need to /// persist its onion. pub(super) fn prune_inbound_htlc_onion( - &mut self, htlc_id: u64, hop_data: &HTLCPreviousHopData, outbound_amt_msat: u64, + &mut self, htlc_id: u64, prev_hop_data: &HTLCPreviousHopData, + outbound_hop_data: OutboundHop, ) { for htlc in self.context.pending_inbound_htlcs.iter_mut() { if htlc.htlc_id == htlc_id { if let InboundHTLCState::Committed { ref mut update_add_htlc } = htlc.state { *update_add_htlc = InboundUpdateAdd::Forwarded { - incoming_packet_shared_secret: hop_data.incoming_packet_shared_secret, - phantom_shared_secret: hop_data.phantom_shared_secret, - trampoline_shared_secret: hop_data.trampoline_shared_secret, - blinded_failure: hop_data.blinded_failure, - outbound_amt_msat, + incoming_packet_shared_secret: prev_hop_data.incoming_packet_shared_secret, + phantom_shared_secret: prev_hop_data.phantom_shared_secret, + trampoline_shared_secret: prev_hop_data.trampoline_shared_secret, + blinded_failure: prev_hop_data.blinded_failure, + outbound_hop: outbound_hop_data, }; return; } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ac2af352e34..b7b39698bb4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -59,9 +59,9 @@ use crate::ln::chan_utils::selected_commitment_sat_per_1000_weight; use crate::ln::channel::QuiescentAction; use crate::ln::channel::{ self, hold_time_since, Channel, ChannelError, ChannelUpdateStatus, DisconnectResult, - FundedChannel, FundingTxSigned, InboundV1Channel, OutboundV1Channel, PendingV2Channel, - ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, UpdateFulfillCommitFetch, - WithChannelContext, + FundedChannel, FundingTxSigned, InboundV1Channel, OutboundHop, OutboundV1Channel, + PendingV2Channel, ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, + UpdateFulfillCommitFetch, WithChannelContext, }; use crate::ln::channel_state::ChannelDetails; use crate::ln::funding::SpliceContribution; @@ -1402,6 +1402,8 @@ enum PostMonitorUpdateChanResume { Unblocked { channel_id: ChannelId, counterparty_node_id: PublicKey, + funding_txo: OutPoint, + user_channel_id: u128, unbroadcasted_batch_funding_txid: Option, update_actions: Vec, htlc_forwards: Option, @@ -9582,8 +9584,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// Handles actions which need to complete after a [`ChannelMonitorUpdate`] has been applied /// which can happen after the per-peer state lock has been dropped. fn post_monitor_update_unlock( - &self, channel_id: ChannelId, counterparty_node_id: PublicKey, - unbroadcasted_batch_funding_txid: Option, + &self, channel_id: ChannelId, counterparty_node_id: PublicKey, funding_txo: OutPoint, + user_channel_id: u128, unbroadcasted_batch_funding_txid: Option, update_actions: Vec, htlc_forwards: Option, decode_update_add_htlcs: Option<(u64, Vec)>, @@ -9660,7 +9662,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }; self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver, None); } - self.prune_persisted_inbound_htlc_onions(committed_outbound_htlc_sources); + self.prune_persisted_inbound_htlc_onions( + channel_id, + counterparty_node_id, + funding_txo, + user_channel_id, + committed_outbound_htlc_sources, + ); } fn handle_monitor_update_completion_actions< @@ -10129,6 +10137,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ PostMonitorUpdateChanResume::Unblocked { channel_id: chan_id, counterparty_node_id, + funding_txo: chan.funding_outpoint(), + user_channel_id: chan.context.get_user_id(), unbroadcasted_batch_funding_txid, update_actions, htlc_forwards, @@ -10144,7 +10154,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// HTLC set on `ChannelManager` read. If an HTLC has been irrevocably forwarded to the outbound /// edge, we no longer need to persist the inbound edge's onion and can prune it here. fn prune_persisted_inbound_htlc_onions( - &self, committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, + &self, outbound_channel_id: ChannelId, outbound_node_id: PublicKey, + outbound_funding_txo: OutPoint, outbound_user_channel_id: u128, + committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, ) { let per_peer_state = self.per_peer_state.read().unwrap(); for (source, outbound_amt_msat) in committed_outbound_htlc_sources { @@ -10161,7 +10173,17 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(chan) = peer_state.channel_by_id.get_mut(&source.channel_id).and_then(|c| c.as_funded_mut()) { - chan.prune_inbound_htlc_onion(source.htlc_id, &source, outbound_amt_msat); + chan.prune_inbound_htlc_onion( + source.htlc_id, + &source, + OutboundHop { + amt_msat: outbound_amt_msat, + channel_id: outbound_channel_id, + node_id: outbound_node_id, + funding_txo: outbound_funding_txo, + user_channel_id: outbound_user_channel_id, + }, + ); } } } @@ -10217,6 +10239,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ PostMonitorUpdateChanResume::Unblocked { channel_id, counterparty_node_id, + funding_txo, + user_channel_id, unbroadcasted_batch_funding_txid, update_actions, htlc_forwards, @@ -10228,6 +10252,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.post_monitor_update_unlock( channel_id, counterparty_node_id, + funding_txo, + user_channel_id, unbroadcasted_batch_funding_txid, update_actions, htlc_forwards, From 48010cbadf6723b679c90e840de0de36d8415265 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 10 Feb 2026 16:33:14 -0500 Subject: [PATCH 07/10] Fix PaymentForwarded fields on restart claim Previously, we were spuriously using the upstream channel's info when we should've been using the downstream channel's. --- lightning/src/ln/channel.rs | 6 +++--- lightning/src/ln/channelmanager.rs | 25 ++++++++++++------------- lightning/src/ln/reload_tests.rs | 8 +------- 3 files changed, 16 insertions(+), 23 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 37a0661de76..4a0d1175b8e 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -7942,7 +7942,7 @@ where /// when reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. pub(super) fn inbound_forwarded_htlcs( &self, - ) -> impl Iterator + '_ { + ) -> impl Iterator + '_ { // We don't want to return an HTLC as needing processing if it already has a resolution that's // pending in the holding cell. let htlc_resolution_in_holding_cell = |id: u64| -> bool { @@ -7970,7 +7970,7 @@ where phantom_shared_secret, trampoline_shared_secret, blinded_failure, - outbound_hop: OutboundHop { amt_msat, .. }, + outbound_hop, }, } => { if htlc_resolution_in_holding_cell(htlc.htlc_id) { @@ -7991,7 +7991,7 @@ where counterparty_node_id: Some(counterparty_node_id), cltv_expiry: Some(htlc.cltv_expiry), }; - Some((htlc.payment_hash, prev_hop_data, *amt_msat)) + Some((htlc.payment_hash, prev_hop_data, *outbound_hop)) }, _ => None, }) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b7b39698bb4..897f10cf2f4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18610,11 +18610,11 @@ impl< // that it is handled. let mut already_forwarded_htlcs: HashMap< (ChannelId, PaymentHash), - Vec<(HTLCPreviousHopData, u64)>, + Vec<(HTLCPreviousHopData, OutboundHop)>, > = new_hash_map(); let prune_forwarded_htlc = |already_forwarded_htlcs: &mut HashMap< (ChannelId, PaymentHash), - Vec<(HTLCPreviousHopData, u64)>, + Vec<(HTLCPreviousHopData, OutboundHop)>, >, prev_hop: &HTLCPreviousHopData, payment_hash: &PaymentHash| { @@ -18663,13 +18663,13 @@ impl< .or_insert_with(Vec::new) .push(update_add_htlc); } - for (payment_hash, prev_hop, outbound_amt_msat) in + for (payment_hash, prev_hop, next_hop) in funded_chan.inbound_forwarded_htlcs() { already_forwarded_htlcs .entry((prev_hop.channel_id, payment_hash)) .or_insert_with(Vec::new) - .push((prev_hop, outbound_amt_msat)); + .push((prev_hop, next_hop)); } } } @@ -19378,34 +19378,33 @@ impl< if let Some(forwarded_htlcs) = already_forwarded_htlcs.remove(&(*channel_id, payment_hash)) { - for (prev_hop, outbound_amt_msat) in forwarded_htlcs { + for (prev_hop, next_hop) in forwarded_htlcs { let new_pending_claim = !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _, _)| { matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == prev_hop.htlc_id && hop.channel_id == prev_hop.channel_id) }); if new_pending_claim { - let counterparty_node_id = monitor.get_counterparty_node_id(); let is_downstream_closed = channel_manager .per_peer_state .read() .unwrap() - .get(&counterparty_node_id) + .get(&next_hop.node_id) .map_or(true, |peer_state_mtx| { !peer_state_mtx .lock() .unwrap() .channel_by_id - .contains_key(channel_id) + .contains_key(&next_hop.channel_id) }); pending_claims_to_replay.push(( HTLCSource::PreviousHopData(prev_hop), payment_preimage, - outbound_amt_msat, + next_hop.amt_msat, is_downstream_closed, - counterparty_node_id, - monitor.get_funding_txo(), - *channel_id, - None, + next_hop.node_id, + next_hop.funding_txo, + next_hop.channel_id, + Some(next_hop.user_channel_id), )); } } diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index c7e7175602d..42986bc41b1 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1958,14 +1958,8 @@ fn test_reload_node_with_preimage_in_monitor_claims_htlc() { ); // When the claim is reconstructed during reload, a PaymentForwarded event is generated. - // This event has next_user_channel_id as None since the outbound HTLC was already removed. // Fetching events triggers the pending monitor update (adding preimage) to be applied. - let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match &events[0] { - Event::PaymentForwarded { total_fee_earned_msat: Some(1000), .. } => {}, - _ => panic!("Expected PaymentForwarded event"), - } + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); check_added_monitors(&nodes[1], 1); // Reconnect nodes[1] to nodes[0]. The claim should be in nodes[1]'s holding cell. From 0d6dcc910558c558e763c02c17d609dc0410d835 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 10 Feb 2026 16:51:46 -0500 Subject: [PATCH 08/10] Fix missing user_channel_id in PaymentForwarded Previously, if a forwarding node reloaded mid-HTLC-forward with a preimage in the outbound edge monitor and the outbound edge channel still open, and subsequently reclaimed the inbound HTLC backwards, the PaymentForwarded event would be missing the next_user_channel_id field. --- lightning/src/ln/chanmon_update_fail_tests.rs | 7 ++++++- lightning/src/ln/channelmanager.rs | 12 +++++++----- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 5a0c37bd61d..e5f6b7259ff 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -3938,7 +3938,12 @@ fn do_test_durable_preimages_on_closed_channel( let evs = nodes[1].node.get_and_clear_pending_events(); assert_eq!(evs.len(), if close_chans_before_reload { 2 } else { 1 }); for ev in evs { - if let Event::PaymentForwarded { .. } = ev { + if let Event::PaymentForwarded { claim_from_onchain_tx, next_user_channel_id, .. } = ev { + if !claim_from_onchain_tx { + // If the outbound channel is still open, the `next_user_channel_id` should be available. + // This was previously broken. + assert!(next_user_channel_id.is_some()) + } } else { panic!(); } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 897f10cf2f4..40342d72700 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18707,14 +18707,16 @@ impl< } } for (channel_id, monitor) in args.channel_monitors.iter() { - let mut is_channel_closed = true; + let (mut is_channel_closed, mut user_channel_id_opt) = (true, None); let counterparty_node_id = monitor.get_counterparty_node_id(); if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; - is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); - if reconstruct_manager_from_monitors && !is_channel_closed { - if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + is_channel_closed = false; + user_channel_id_opt = Some(chan.context().get_user_id()); + + if reconstruct_manager_from_monitors { if let Some(funded_chan) = chan.as_funded() { for (payment_hash, prev_hop) in funded_chan.outbound_htlc_forwards() { @@ -19014,7 +19016,7 @@ impl< Some((htlc_source, payment_preimage, htlc.amount_msat, is_channel_closed, monitor.get_counterparty_node_id(), - monitor.get_funding_txo(), monitor.channel_id(), None)) + monitor.get_funding_txo(), monitor.channel_id(), user_channel_id_opt)) } else { None } } else { // If it was an outbound payment, we've handled it above - if a preimage From 5daf51c00764572f1cecd38c6f9dd16f69f172db Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 11 Feb 2026 16:14:19 -0500 Subject: [PATCH 09/10] Test restart-claim of two MPP holding cell HTLCs Test that if we restart and had two inbound MPP-part HTLCs received over the same channel in the holding cell prior to shutdown, and we lost the holding cell prior to restart, those HTLCs will still be claimed backwards. Test largely written by Claude --- lightning/src/ln/chanmon_update_fail_tests.rs | 3 +- lightning/src/ln/functional_test_utils.rs | 22 ++- lightning/src/ln/reload_tests.rs | 145 ++++++++++++++++++ 3 files changed, 161 insertions(+), 9 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index e5f6b7259ff..b421114e911 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -3519,8 +3519,9 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode .node .handle_commitment_signed_batch_test(node_a_id, &as_htlc_fulfill.commitment_signed); check_added_monitors(&nodes[1], 1); - let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); + let (a, raa, holding_cell) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); assert!(a.is_none()); + assert!(holding_cell.is_empty()); nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); check_added_monitors(&nodes[1], 1); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index d5a29785a94..d3902b26201 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -2672,20 +2672,23 @@ pub fn commitment_signed_dance_through_cp_raa( node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, fail_backwards: bool, includes_claim: bool, ) -> Option { - let (extra_msg_option, bs_revoke_and_ack) = + let (extra_msg_option, bs_revoke_and_ack, node_b_holding_cell_htlcs) = do_main_commitment_signed_dance(node_a, node_b, fail_backwards); + assert!(node_b_holding_cell_htlcs.is_empty()); node_a.node.handle_revoke_and_ack(node_b.node.get_our_node_id(), &bs_revoke_and_ack); check_added_monitors(node_a, if includes_claim { 0 } else { 1 }); extra_msg_option } /// Does the main logic in the commitment_signed dance. After the first `commitment_signed` has -/// been delivered, this method picks up and delivers the response `revoke_and_ack` and -/// `commitment_signed`, returning the recipient's `revoke_and_ack` and any extra message it may -/// have included. +/// been delivered, delivers the response `revoke_and_ack` and `commitment_signed`, and returns: +/// - The recipient's `revoke_and_ack` +/// - The recipient's extra message (if any) after handling the commitment_signed +/// - Any messages released from the initiator's holding cell after handling the `revoke_and_ack` +/// (e.g., a second HTLC on the same channel) pub fn do_main_commitment_signed_dance( node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, fail_backwards: bool, -) -> (Option, msgs::RevokeAndACK) { +) -> (Option, msgs::RevokeAndACK, Vec) { let node_a_id = node_a.node.get_our_node_id(); let node_b_id = node_b.node.get_our_node_id(); @@ -2693,7 +2696,9 @@ pub fn do_main_commitment_signed_dance( check_added_monitors(&node_b, 0); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); - assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); + // Handling the RAA may release HTLCs from node_b's holding cell (e.g., if multiple HTLCs + // were sent over the same channel and the second was queued behind the first). + let node_b_holding_cell_htlcs = node_b.node.get_and_clear_pending_msg_events(); check_added_monitors(&node_b, 1); node_b.node.handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed); let (bs_revoke_and_ack, extra_msg_option) = { @@ -2716,7 +2721,7 @@ pub fn do_main_commitment_signed_dance( assert!(node_a.node.get_and_clear_pending_events().is_empty()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); } - (extra_msg_option, bs_revoke_and_ack) + (extra_msg_option, bs_revoke_and_ack, node_b_holding_cell_htlcs) } /// Runs the commitment_signed dance by delivering the commitment_signed and handling the @@ -2733,9 +2738,10 @@ pub fn commitment_signed_dance_return_raa( .node .handle_commitment_signed_batch_test(node_b.node.get_our_node_id(), commitment_signed); check_added_monitors(&node_a, 1); - let (extra_msg_option, bs_revoke_and_ack) = + let (extra_msg_option, bs_revoke_and_ack, node_b_holding_cell_htlcs) = do_main_commitment_signed_dance(&node_a, &node_b, fail_backwards); assert!(extra_msg_option.is_none()); + assert!(node_b_holding_cell_htlcs.is_empty()); bs_revoke_and_ack } diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 42986bc41b1..d1e34cb7c71 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -2082,3 +2082,148 @@ fn test_reload_node_without_preimage_fails_htlc() { // nodes[0] should now have received the failure and generate PaymentFailed. expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new()); } + +#[test] +fn test_reload_with_mpp_claims_on_same_channel() { + // Test that if a forwarding node has two HTLCs for the same MPP payment that were both + // irrevocably removed on the outbound edge via claim but are still forwarded-and-unresolved + // on the inbound edge, both HTLCs will be claimed backwards on restart. + // + // Topology: + // nodes[0] ----chan_0_1----> nodes[1] ----chan_1_2_a----> nodes[2] + // \----chan_1_2_b---/ + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let node_2_id = nodes[2].node.get_our_node_id(); + + let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 2_000_000, 0); + let chan_1_2_a = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0); + let chan_1_2_b = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0); + + let chan_id_0_1 = chan_0_1.2; + let chan_id_1_2_a = chan_1_2_a.2; + let chan_id_1_2_b = chan_1_2_b.2; + + // Send an MPP payment large enough that the router must split it across both outbound channels. + // Each 1M sat outbound channel has 100M msat max in-flight, so 150M msat requires splitting. + let amt_msat = 150_000_000; + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + + let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); + nodes[0].node.send_payment_with_route( + route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id, + ).unwrap(); + check_added_monitors(&nodes[0], 1); + + // Forward the first HTLC nodes[0] -> nodes[1] -> nodes[2]. Note that the second HTLC is released + // from the holding cell during the first HTLC's commitment_signed_dance. + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event_1 = SendEvent::from_event(events.remove(0)); + + nodes[1].node.handle_update_add_htlc(node_0_id, &payment_event_1.msgs[0]); + check_added_monitors(&nodes[1], 0); + nodes[1].node.handle_commitment_signed_batch_test(node_0_id, &payment_event_1.commitment_msg); + check_added_monitors(&nodes[1], 1); + let (_, raa, holding_cell_htlcs) = + do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); + assert_eq!(holding_cell_htlcs.len(), 1); + let payment_event_2 = holding_cell_htlcs.into_iter().next().unwrap(); + nodes[1].node.handle_revoke_and_ack(node_0_id, &raa); + check_added_monitors(&nodes[1], 1); + + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors(&nodes[1], 1); + let mut events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev_1_2 = events.remove(0); + pass_along_path( + &nodes[1], &[&nodes[2]], amt_msat, payment_hash, Some(payment_secret), ev_1_2, false, None, + ); + + // Second HTLC: full path nodes[0] -> nodes[1] -> nodes[2]. PaymentClaimable expected at end. + pass_along_path( + &nodes[0], &[&nodes[1], &nodes[2]], amt_msat, payment_hash, Some(payment_secret), + payment_event_2, true, None, + ); + + // Claim the HTLCs such that they're fully removed from the outbound edge, but disconnect + // node_0<>node_1 so that they can't be claimed backwards by node_1. + nodes[2].node.claim_funds(payment_preimage); + check_added_monitors(&nodes[2], 2); + expect_payment_claimed!(nodes[2], payment_hash, amt_msat); + + nodes[0].node.peer_disconnected(node_1_id); + nodes[1].node.peer_disconnected(node_0_id); + + let mut events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + for ev in events { + match ev { + MessageSendEvent::UpdateHTLCs { ref node_id, ref updates, .. } => { + assert_eq!(*node_id, node_1_id); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(node_2_id, updates.update_fulfill_htlcs[0].clone()); + check_added_monitors(&nodes[1], 1); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); + }, + _ => panic!("Unexpected event"), + } + } + + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + for event in events { + expect_payment_forwarded( + event, &nodes[1], &nodes[0], &nodes[2], Some(1000), None, false, false, false, + ); + } + + // Clear the holding cell's claim entries on chan_0_1 before serialization. + // This simulates a crash where both HTLCs were fully removed on the outbound edges but are + // still present on the inbound edge without a resolution. + nodes[1].node.test_clear_channel_holding_cell(node_0_id, chan_id_0_1); + + let node_1_serialized = nodes[1].node.encode(); + let mon_0_1_serialized = get_monitor!(nodes[1], chan_id_0_1).encode(); + let mon_1_2_a_serialized = get_monitor!(nodes[1], chan_id_1_2_a).encode(); + let mon_1_2_b_serialized = get_monitor!(nodes[1], chan_id_1_2_b).encode(); + + reload_node!( + nodes[1], + node_1_serialized, + &[&mon_0_1_serialized, &mon_1_2_a_serialized, &mon_1_2_b_serialized], + persister, + new_chain_monitor, + nodes_1_deserialized, + Some(true) + ); + + // When the claims are reconstructed during reload, PaymentForwarded events are regenerated. + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + for event in events { + expect_payment_forwarded( + event, &nodes[1], &nodes[0], &nodes[2], Some(1000), None, false, false, false, + ); + } + // Fetching events triggers the pending monitor updates (one for each HTLC preimage) to be applied. + check_added_monitors(&nodes[1], 2); + + // Reconnect nodes[1] to nodes[0]. Both claims should be in nodes[1]'s holding cell. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[0]); + reconnect_args.pending_cell_htlc_claims = (0, 2); + reconnect_nodes(reconnect_args); + + // nodes[0] should now have received both fulfills and generate PaymentSent. + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} From ab0ba65923158791a9afa196e275c4a9d375a673 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 12 Feb 2026 15:30:16 -0500 Subject: [PATCH 10/10] Update RECONSTRUCT_HTLCS_FROM_CHANS_VERSION 5 -> 2 We previously had 5 due to wanting some flexibility to bump versions in between, but eventually concluded that wasn't necessary. --- lightning/src/ln/channelmanager.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 40342d72700..5a4f569d879 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -16616,7 +16616,7 @@ const MIN_SERIALIZATION_VERSION: u8 = 1; // // If 0.3 or 0.4 reads this manager version, it knows that the legacy maps were not written and // acts accordingly. -const RECONSTRUCT_HTLCS_FROM_CHANS_VERSION: u8 = 5; +const RECONSTRUCT_HTLCS_FROM_CHANS_VERSION: u8 = 2; impl_writeable_tlv_based!(PhantomRouteHints, { (2, channels, required_vec),