From eb31aeb1b8c8ac7093678abe4441be111d7cf558 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 6 Feb 2026 15:12:45 -0500 Subject: [PATCH 01/21] Trivial: use full path in test macros Useful when using these macros in lightning-tests/upgrade_downgrade_tests --- lightning/src/ln/functional_test_utils.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index a5461154a02..d5a29785a94 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1383,7 +1383,7 @@ macro_rules! _reload_node_inner { ); $node.chain_monitor = &$new_chain_monitor; - $new_channelmanager = _reload_node( + $new_channelmanager = $crate::ln::functional_test_utils::_reload_node( &$node, $new_config, &chanman_encoded, @@ -1401,7 +1401,7 @@ macro_rules! reload_node { // Reload the node using the node's current config ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { let config = $node.node.get_current_config(); - _reload_node_inner!( + $crate::_reload_node_inner!( $node, config, $chanman_encoded, @@ -1414,7 +1414,7 @@ macro_rules! reload_node { }; // Reload the node with the new provided config ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { - _reload_node_inner!( + $crate::_reload_node_inner!( $node, $new_config, $chanman_encoded, @@ -1431,7 +1431,7 @@ macro_rules! reload_node { ident, $new_chain_monitor: ident, $new_channelmanager: ident, $reconstruct_pending_htlcs: expr ) => { let config = $node.node.get_current_config(); - _reload_node_inner!( + $crate::_reload_node_inner!( $node, config, $chanman_encoded, @@ -2971,7 +2971,7 @@ pub fn check_payment_claimable( #[cfg(any(test, ldk_bench, feature = "_test_utils"))] macro_rules! expect_payment_claimable { ($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr) => { - expect_payment_claimable!( + $crate::expect_payment_claimable!( $node, $expected_payment_hash, $expected_payment_secret, From 07b3deff29e4e3d6eeb164780170e9d9247352b5 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 9 Feb 2026 14:16:12 -0500 Subject: [PATCH 02/21] Split method to reconstruct pending HTLCs into two In the next commit, we want to dedup fields between the InboundUpdateAdd::Forwarded's HTLCPreviousHopData and the outer InboundHTLCOutput/Channel structs, since many fields are duplicated in both places at the moment. As part of doing this cleanly, we first refactor the method that retrieves these InboundUpdateAdds for reconstructing the set of pending HTLCs during ChannelManager deconstruction. Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channel.rs | 57 +++++++++++++++++++++--------- lightning/src/ln/channelmanager.rs | 57 +++++++++++++----------------- 2 files changed, 64 insertions(+), 50 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 3236ebdefed..88d2e32e764 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -315,7 +315,7 @@ impl InboundHTLCState { /// /// Useful for reconstructing the pending HTLC set on startup. #[derive(Debug, Clone)] -pub(super) enum InboundUpdateAdd { +enum InboundUpdateAdd { /// The inbound committed HTLC's update_add_htlc message. WithOnion { update_add_htlc: msgs::UpdateAddHTLC }, /// This inbound HTLC is a forward that was irrevocably committed to the outbound edge, allowing @@ -7885,10 +7885,35 @@ where Ok(()) } - /// Useful for reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. - pub(super) fn inbound_committed_unresolved_htlcs( + /// Returns true if any committed inbound HTLCs were received pre-LDK 0.3 and cannot be used + /// during `ChannelManager` deserialization to reconstruct the set of pending HTLCs. + pub(super) fn has_legacy_inbound_htlcs(&self) -> bool { + self.context.pending_inbound_htlcs.iter().any(|htlc| { + matches!( + &htlc.state, + InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy } + ) + }) + } + + /// Returns committed inbound HTLCs whose onion has not yet been decoded and processed. Useful + /// for reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. + pub(super) fn inbound_htlcs_pending_decode( + &self, + ) -> impl Iterator + '_ { + self.context.pending_inbound_htlcs.iter().filter_map(|htlc| match &htlc.state { + InboundHTLCState::Committed { + update_add_htlc: InboundUpdateAdd::WithOnion { update_add_htlc }, + } => Some(update_add_htlc.clone()), + _ => None, + }) + } + + /// Returns committed inbound HTLCs that have been forwarded but not yet fully resolved. Useful + /// when reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. + pub(super) fn inbound_forwarded_htlcs( &self, - ) -> Vec<(PaymentHash, InboundUpdateAdd)> { + ) -> impl Iterator + '_ { // We don't want to return an HTLC as needing processing if it already has a resolution that's // pending in the holding cell. let htlc_resolution_in_holding_cell = |id: u64| -> bool { @@ -7902,19 +7927,17 @@ where }) }; - self.context - .pending_inbound_htlcs - .iter() - .filter_map(|htlc| match &htlc.state { - InboundHTLCState::Committed { update_add_htlc } => { - if htlc_resolution_in_holding_cell(htlc.htlc_id) { - return None; - } - Some((htlc.payment_hash, update_add_htlc.clone())) - }, - _ => None, - }) - .collect() + self.context.pending_inbound_htlcs.iter().filter_map(move |htlc| match &htlc.state { + InboundHTLCState::Committed { + update_add_htlc: InboundUpdateAdd::Forwarded { hop_data, outbound_amt_msat }, + } => { + if htlc_resolution_in_holding_cell(htlc.htlc_id) { + return None; + } + Some((htlc.payment_hash, hop_data.clone(), *outbound_amt_msat)) + }, + _ => None, + }) } /// Useful when reconstructing the set of pending HTLC forwards when deserializing the diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e840d705b8e..bdc0155054f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -59,9 +59,9 @@ use crate::ln::chan_utils::selected_commitment_sat_per_1000_weight; use crate::ln::channel::QuiescentAction; use crate::ln::channel::{ self, hold_time_since, Channel, ChannelError, ChannelUpdateStatus, DisconnectResult, - FundedChannel, FundingTxSigned, InboundUpdateAdd, InboundV1Channel, OutboundV1Channel, - PendingV2Channel, ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, - UpdateFulfillCommitFetch, WithChannelContext, + FundedChannel, FundingTxSigned, InboundV1Channel, OutboundV1Channel, PendingV2Channel, + ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, UpdateFulfillCommitFetch, + WithChannelContext, }; use crate::ln::channel_state::ChannelDetails; use crate::ln::funding::SpliceContribution; @@ -10185,10 +10185,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state = per_peer_state.get(&cp_id).map(|state| state.lock().unwrap()).unwrap(); let chan = peer_state.channel_by_id.get(&chan_id).and_then(|c| c.as_funded()).unwrap(); - chan.inbound_committed_unresolved_htlcs() - .iter() - .filter(|(_, htlc)| matches!(htlc, InboundUpdateAdd::WithOnion { .. })) - .count() + chan.inbound_htlcs_pending_decode().count() } #[cfg(test)] @@ -18626,33 +18623,27 @@ impl< if reconstruct_manager_from_monitors { if let Some(chan) = peer_state.channel_by_id.get(channel_id) { if let Some(funded_chan) = chan.as_funded() { + // Legacy HTLCs are from pre-LDK 0.3 and cannot be reconstructed. + if funded_chan.has_legacy_inbound_htlcs() { + return Err(DecodeError::InvalidValue); + } + // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized + // `Channel` as part of removing the requirement to regularly persist the + // `ChannelManager`. let scid_alias = funded_chan.context.outbound_scid_alias(); - let inbound_committed_update_adds = - funded_chan.inbound_committed_unresolved_htlcs(); - for (payment_hash, htlc) in inbound_committed_update_adds { - match htlc { - InboundUpdateAdd::WithOnion { update_add_htlc } => { - // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized - // `Channel` as part of removing the requirement to regularly persist the - // `ChannelManager`. - decode_update_add_htlcs - .entry(scid_alias) - .or_insert_with(Vec::new) - .push(update_add_htlc); - }, - InboundUpdateAdd::Forwarded { - hop_data, - outbound_amt_msat, - } => { - already_forwarded_htlcs - .entry((hop_data.channel_id, payment_hash)) - .or_insert_with(Vec::new) - .push((hop_data, outbound_amt_msat)); - }, - InboundUpdateAdd::Legacy => { - return Err(DecodeError::InvalidValue) - }, - } + for update_add_htlc in funded_chan.inbound_htlcs_pending_decode() { + decode_update_add_htlcs + .entry(scid_alias) + .or_insert_with(Vec::new) + .push(update_add_htlc); + } + for (payment_hash, hop_data, outbound_amt_msat) in + funded_chan.inbound_forwarded_htlcs() + { + already_forwarded_htlcs + .entry((hop_data.channel_id, payment_hash)) + .or_insert_with(Vec::new) + .push((hop_data, outbound_amt_msat)); } } } From d3e9cd018dfeab5e4c0884eee73988c3ecc1fb1e Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 9 Feb 2026 14:29:11 -0500 Subject: [PATCH 03/21] Dedup data in InboundUpdateAdd::Forwarded::hop_data Previously, the InboundUpdateAdd::Forwarded enum variant contained an HTLCPreviousHopData, which had a lot of fields that were redundant with the outer InboundHTLCOutput/Channel structs. Here we dedup those fields, which is important because the pending InboundUpdateAdds are persisted whenever the ChannelManager is persisted. --- lightning/src/ln/channel.rs | 69 ++++++++++++++++++++++++------ lightning/src/ln/channelmanager.rs | 2 +- 2 files changed, 56 insertions(+), 15 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 88d2e32e764..b12061bf118 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -50,10 +50,10 @@ use crate::ln::channel_state::{ OutboundHTLCDetails, OutboundHTLCStateDetails, }; use crate::ln::channelmanager::{ - self, ChannelReadyOrder, FundingConfirmedMessage, HTLCFailureMsg, HTLCPreviousHopData, - HTLCSource, OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, PendingHTLCStatus, - RAACommitmentOrder, SentHTLCId, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT, - MIN_CLTV_EXPIRY_DELTA, + self, BlindedFailure, ChannelReadyOrder, FundingConfirmedMessage, HTLCFailureMsg, + HTLCPreviousHopData, HTLCSource, OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, + PendingHTLCStatus, RAACommitmentOrder, SentHTLCId, BREAKDOWN_TIMEOUT, + MAX_LOCAL_BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, }; use crate::ln::funding::{FundingTxInput, SpliceContribution}; use crate::ln::interactivetxs::{ @@ -320,12 +320,16 @@ enum InboundUpdateAdd { WithOnion { update_add_htlc: msgs::UpdateAddHTLC }, /// This inbound HTLC is a forward that was irrevocably committed to the outbound edge, allowing /// its onion to be pruned and no longer persisted. + /// + /// Contains data that is useful if we need to fail or claim this HTLC backwards after a restart + /// and it's missing in the outbound edge. Forwarded { - /// Useful if we need to fail or claim this HTLC backwards after restart, if it's missing in the - /// outbound edge. - hop_data: HTLCPreviousHopData, - /// Useful if we need to claim this HTLC backwards after a restart and it's missing in the - /// outbound edge, to generate an accurate [`Event::PaymentForwarded`]. + incoming_packet_shared_secret: [u8; 32], + phantom_shared_secret: Option<[u8; 32]>, + trampoline_shared_secret: Option<[u8; 32]>, + blinded_failure: Option, + /// Useful for generating an accurate [`Event::PaymentForwarded`], if we need to claim this + /// HTLC post-restart. /// /// [`Event::PaymentForwarded`]: crate::events::Event::PaymentForwarded outbound_amt_msat: u64, @@ -341,8 +345,11 @@ impl_writeable_tlv_based_enum_upgradable!(InboundUpdateAdd, }, (2, Legacy) => {}, (4, Forwarded) => { - (0, hop_data, required), + (0, incoming_packet_shared_secret, required), (2, outbound_amt_msat, required), + (4, phantom_shared_secret, option), + (6, trampoline_shared_secret, option), + (8, blinded_failure, option), }, ); @@ -7927,14 +7934,42 @@ where }) }; + let prev_outbound_scid_alias = self.context.outbound_scid_alias(); + let user_channel_id = self.context.get_user_id(); + let channel_id = self.context.channel_id(); + let outpoint = self.funding_outpoint(); + let counterparty_node_id = self.context.get_counterparty_node_id(); + self.context.pending_inbound_htlcs.iter().filter_map(move |htlc| match &htlc.state { InboundHTLCState::Committed { - update_add_htlc: InboundUpdateAdd::Forwarded { hop_data, outbound_amt_msat }, + update_add_htlc: + InboundUpdateAdd::Forwarded { + incoming_packet_shared_secret, + phantom_shared_secret, + trampoline_shared_secret, + blinded_failure, + outbound_amt_msat, + }, } => { if htlc_resolution_in_holding_cell(htlc.htlc_id) { return None; } - Some((htlc.payment_hash, hop_data.clone(), *outbound_amt_msat)) + // The reconstructed `HTLCPreviousHopData` is used to fail or claim the HTLC backwards + // post-restart, if it is missing in the outbound edge. + let hop_data = HTLCPreviousHopData { + prev_outbound_scid_alias, + user_channel_id: Some(user_channel_id), + htlc_id: htlc.htlc_id, + incoming_packet_shared_secret: *incoming_packet_shared_secret, + phantom_shared_secret: *phantom_shared_secret, + trampoline_shared_secret: *trampoline_shared_secret, + blinded_failure: *blinded_failure, + channel_id, + outpoint, + counterparty_node_id: Some(counterparty_node_id), + cltv_expiry: Some(htlc.cltv_expiry), + }; + Some((htlc.payment_hash, hop_data, *outbound_amt_msat)) }, _ => None, }) @@ -7984,12 +8019,18 @@ where /// This inbound HTLC was irrevocably forwarded to the outbound edge, so we no longer need to /// persist its onion. pub(super) fn prune_inbound_htlc_onion( - &mut self, htlc_id: u64, hop_data: HTLCPreviousHopData, outbound_amt_msat: u64, + &mut self, htlc_id: u64, hop_data: &HTLCPreviousHopData, outbound_amt_msat: u64, ) { for htlc in self.context.pending_inbound_htlcs.iter_mut() { if htlc.htlc_id == htlc_id { if let InboundHTLCState::Committed { ref mut update_add_htlc } = htlc.state { - *update_add_htlc = InboundUpdateAdd::Forwarded { hop_data, outbound_amt_msat }; + *update_add_htlc = InboundUpdateAdd::Forwarded { + incoming_packet_shared_secret: hop_data.incoming_packet_shared_secret, + phantom_shared_secret: hop_data.phantom_shared_secret, + trampoline_shared_secret: hop_data.trampoline_shared_secret, + blinded_failure: hop_data.blinded_failure, + outbound_amt_msat, + }; return; } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index bdc0155054f..68eeb7c4e15 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -10161,7 +10161,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(chan) = peer_state.channel_by_id.get_mut(&source.channel_id).and_then(|c| c.as_funded_mut()) { - chan.prune_inbound_htlc_onion(source.htlc_id, source, outbound_amt_msat); + chan.prune_inbound_htlc_onion(source.htlc_id, &source, outbound_amt_msat); } } } From 3246010beba323077c90f7e703258f4d5bfc0b44 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 12 Feb 2026 15:15:51 -0500 Subject: [PATCH 04/21] Trivial: ChannelManager::read var rename prefactor Makes an upcoming commit cleaner: when we add a next_hop variable we want to distinguish it from the previous hop. --- lightning/src/ln/channelmanager.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 68eeb7c4e15..cc95424dbbd 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18637,13 +18637,13 @@ impl< .or_insert_with(Vec::new) .push(update_add_htlc); } - for (payment_hash, hop_data, outbound_amt_msat) in + for (payment_hash, prev_hop, outbound_amt_msat) in funded_chan.inbound_forwarded_htlcs() { already_forwarded_htlcs - .entry((hop_data.channel_id, payment_hash)) + .entry((prev_hop.channel_id, payment_hash)) .or_insert_with(Vec::new) - .push((hop_data, outbound_amt_msat)); + .push((prev_hop, outbound_amt_msat)); } } } @@ -19352,14 +19352,14 @@ impl< if let Some(forwarded_htlcs) = already_forwarded_htlcs.remove(&(*channel_id, payment_hash)) { - for (hop_data, outbound_amt_msat) in forwarded_htlcs { + for (prev_hop, outbound_amt_msat) in forwarded_htlcs { let new_pending_claim = !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _)| { - matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == hop_data.htlc_id && hop.channel_id == hop_data.channel_id) + matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == prev_hop.htlc_id && hop.channel_id == prev_hop.channel_id) }); if new_pending_claim { let counterparty_node_id = monitor.get_counterparty_node_id(); - let is_channel_closed = channel_manager + let is_downstream_closed = channel_manager .per_peer_state .read() .unwrap() @@ -19372,10 +19372,10 @@ impl< .contains_key(channel_id) }); pending_claims_to_replay.push(( - HTLCSource::PreviousHopData(hop_data), + HTLCSource::PreviousHopData(prev_hop), payment_preimage, outbound_amt_msat, - is_channel_closed, + is_downstream_closed, counterparty_node_id, monitor.get_funding_txo(), *channel_id, From 70ae54fb21b1440c6532aa95fb1d60ede9df96da Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 12 Feb 2026 15:20:31 -0500 Subject: [PATCH 05/21] Trivial: user_channel_id in pending_claims_to_replay Adds support for passing user_channel_id into the pending_claims_to_replay vec, which is used by the ChannelManager on startup. For now user_channel_id is always set to None, but in upcoming commits we will set it to Some when the downstream channel is still open (this is currently a bug). Separated out here for reviewability. --- lightning/src/ln/channelmanager.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index cc95424dbbd..ac2af352e34 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18988,7 +18988,7 @@ impl< Some((htlc_source, payment_preimage, htlc.amount_msat, is_channel_closed, monitor.get_counterparty_node_id(), - monitor.get_funding_txo(), monitor.channel_id())) + monitor.get_funding_txo(), monitor.channel_id(), None)) } else { None } } else { // If it was an outbound payment, we've handled it above - if a preimage @@ -19354,7 +19354,7 @@ impl< { for (prev_hop, outbound_amt_msat) in forwarded_htlcs { let new_pending_claim = - !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _)| { + !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _, _)| { matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == prev_hop.htlc_id && hop.channel_id == prev_hop.channel_id) }); if new_pending_claim { @@ -19379,6 +19379,7 @@ impl< counterparty_node_id, monitor.get_funding_txo(), *channel_id, + None, )); } } @@ -19648,6 +19649,7 @@ impl< downstream_node_id, downstream_funding, downstream_channel_id, + downstream_user_channel_id, ) in pending_claims_to_replay { // We use `downstream_closed` in place of `from_onchain` here just as a guess - we @@ -19663,7 +19665,7 @@ impl< downstream_node_id, downstream_funding, downstream_channel_id, - None, + downstream_user_channel_id, None, None, ); From b3b59e6dfa51b7e2f0fe62575c48693ffeb12332 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 10 Feb 2026 15:18:58 -0500 Subject: [PATCH 06/21] Persist outbound channel info in inbound HTLCs We need these fields to generate a correct PaymentForwarded event if we need to claim this inbound HTLC backwards after restart and it's already been claimed and removed on the outbound edge. --- lightning/src/ln/channel.rs | 53 +++++++++++++++++++++--------- lightning/src/ln/channelmanager.rs | 42 ++++++++++++++++++----- 2 files changed, 72 insertions(+), 23 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index b12061bf118..37a0661de76 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -308,6 +308,32 @@ impl InboundHTLCState { } } +/// Information about the outbound hop for a forwarded HTLC. Useful for generating an accurate +/// [`Event::PaymentForwarded`] if we need to claim this HTLC post-restart. +/// +/// [`Event::PaymentForwarded`]: crate::events::Event::PaymentForwarded +#[derive(Debug, Copy, Clone)] +pub(super) struct OutboundHop { + /// The amount forwarded outbound. + pub(super) amt_msat: u64, + /// The outbound channel this HTLC was forwarded over. + pub(super) channel_id: ChannelId, + /// The next-hop recipient of this HTLC. + pub(super) node_id: PublicKey, + /// The outbound channel's funding outpoint. + pub(super) funding_txo: OutPoint, + /// The outbound channel's user channel ID. + pub(super) user_channel_id: u128, +} + +impl_writeable_tlv_based!(OutboundHop, { + (0, amt_msat, required), + (2, channel_id, required), + (4, node_id, required), + (6, funding_txo, required), + (8, user_channel_id, required), +}); + /// A field of `InboundHTLCState::Committed` containing the HTLC's `update_add_htlc` message. If /// the HTLC is a forward and gets irrevocably committed to the outbound edge, we convert to /// `InboundUpdateAdd::Forwarded`, thus pruning the onion and not persisting it on every @@ -328,11 +354,7 @@ enum InboundUpdateAdd { phantom_shared_secret: Option<[u8; 32]>, trampoline_shared_secret: Option<[u8; 32]>, blinded_failure: Option, - /// Useful for generating an accurate [`Event::PaymentForwarded`], if we need to claim this - /// HTLC post-restart. - /// - /// [`Event::PaymentForwarded`]: crate::events::Event::PaymentForwarded - outbound_amt_msat: u64, + outbound_hop: OutboundHop, }, /// This HTLC was received pre-LDK 0.3, before we started persisting the onion for inbound /// committed HTLCs. @@ -346,7 +368,7 @@ impl_writeable_tlv_based_enum_upgradable!(InboundUpdateAdd, (2, Legacy) => {}, (4, Forwarded) => { (0, incoming_packet_shared_secret, required), - (2, outbound_amt_msat, required), + (2, outbound_hop, required), (4, phantom_shared_secret, option), (6, trampoline_shared_secret, option), (8, blinded_failure, option), @@ -7948,7 +7970,7 @@ where phantom_shared_secret, trampoline_shared_secret, blinded_failure, - outbound_amt_msat, + outbound_hop: OutboundHop { amt_msat, .. }, }, } => { if htlc_resolution_in_holding_cell(htlc.htlc_id) { @@ -7956,7 +7978,7 @@ where } // The reconstructed `HTLCPreviousHopData` is used to fail or claim the HTLC backwards // post-restart, if it is missing in the outbound edge. - let hop_data = HTLCPreviousHopData { + let prev_hop_data = HTLCPreviousHopData { prev_outbound_scid_alias, user_channel_id: Some(user_channel_id), htlc_id: htlc.htlc_id, @@ -7969,7 +7991,7 @@ where counterparty_node_id: Some(counterparty_node_id), cltv_expiry: Some(htlc.cltv_expiry), }; - Some((htlc.payment_hash, hop_data, *outbound_amt_msat)) + Some((htlc.payment_hash, prev_hop_data, *amt_msat)) }, _ => None, }) @@ -8019,17 +8041,18 @@ where /// This inbound HTLC was irrevocably forwarded to the outbound edge, so we no longer need to /// persist its onion. pub(super) fn prune_inbound_htlc_onion( - &mut self, htlc_id: u64, hop_data: &HTLCPreviousHopData, outbound_amt_msat: u64, + &mut self, htlc_id: u64, prev_hop_data: &HTLCPreviousHopData, + outbound_hop_data: OutboundHop, ) { for htlc in self.context.pending_inbound_htlcs.iter_mut() { if htlc.htlc_id == htlc_id { if let InboundHTLCState::Committed { ref mut update_add_htlc } = htlc.state { *update_add_htlc = InboundUpdateAdd::Forwarded { - incoming_packet_shared_secret: hop_data.incoming_packet_shared_secret, - phantom_shared_secret: hop_data.phantom_shared_secret, - trampoline_shared_secret: hop_data.trampoline_shared_secret, - blinded_failure: hop_data.blinded_failure, - outbound_amt_msat, + incoming_packet_shared_secret: prev_hop_data.incoming_packet_shared_secret, + phantom_shared_secret: prev_hop_data.phantom_shared_secret, + trampoline_shared_secret: prev_hop_data.trampoline_shared_secret, + blinded_failure: prev_hop_data.blinded_failure, + outbound_hop: outbound_hop_data, }; return; } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ac2af352e34..b7b39698bb4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -59,9 +59,9 @@ use crate::ln::chan_utils::selected_commitment_sat_per_1000_weight; use crate::ln::channel::QuiescentAction; use crate::ln::channel::{ self, hold_time_since, Channel, ChannelError, ChannelUpdateStatus, DisconnectResult, - FundedChannel, FundingTxSigned, InboundV1Channel, OutboundV1Channel, PendingV2Channel, - ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, UpdateFulfillCommitFetch, - WithChannelContext, + FundedChannel, FundingTxSigned, InboundV1Channel, OutboundHop, OutboundV1Channel, + PendingV2Channel, ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, + UpdateFulfillCommitFetch, WithChannelContext, }; use crate::ln::channel_state::ChannelDetails; use crate::ln::funding::SpliceContribution; @@ -1402,6 +1402,8 @@ enum PostMonitorUpdateChanResume { Unblocked { channel_id: ChannelId, counterparty_node_id: PublicKey, + funding_txo: OutPoint, + user_channel_id: u128, unbroadcasted_batch_funding_txid: Option, update_actions: Vec, htlc_forwards: Option, @@ -9582,8 +9584,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// Handles actions which need to complete after a [`ChannelMonitorUpdate`] has been applied /// which can happen after the per-peer state lock has been dropped. fn post_monitor_update_unlock( - &self, channel_id: ChannelId, counterparty_node_id: PublicKey, - unbroadcasted_batch_funding_txid: Option, + &self, channel_id: ChannelId, counterparty_node_id: PublicKey, funding_txo: OutPoint, + user_channel_id: u128, unbroadcasted_batch_funding_txid: Option, update_actions: Vec, htlc_forwards: Option, decode_update_add_htlcs: Option<(u64, Vec)>, @@ -9660,7 +9662,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }; self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver, None); } - self.prune_persisted_inbound_htlc_onions(committed_outbound_htlc_sources); + self.prune_persisted_inbound_htlc_onions( + channel_id, + counterparty_node_id, + funding_txo, + user_channel_id, + committed_outbound_htlc_sources, + ); } fn handle_monitor_update_completion_actions< @@ -10129,6 +10137,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ PostMonitorUpdateChanResume::Unblocked { channel_id: chan_id, counterparty_node_id, + funding_txo: chan.funding_outpoint(), + user_channel_id: chan.context.get_user_id(), unbroadcasted_batch_funding_txid, update_actions, htlc_forwards, @@ -10144,7 +10154,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// HTLC set on `ChannelManager` read. If an HTLC has been irrevocably forwarded to the outbound /// edge, we no longer need to persist the inbound edge's onion and can prune it here. fn prune_persisted_inbound_htlc_onions( - &self, committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, + &self, outbound_channel_id: ChannelId, outbound_node_id: PublicKey, + outbound_funding_txo: OutPoint, outbound_user_channel_id: u128, + committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, ) { let per_peer_state = self.per_peer_state.read().unwrap(); for (source, outbound_amt_msat) in committed_outbound_htlc_sources { @@ -10161,7 +10173,17 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(chan) = peer_state.channel_by_id.get_mut(&source.channel_id).and_then(|c| c.as_funded_mut()) { - chan.prune_inbound_htlc_onion(source.htlc_id, &source, outbound_amt_msat); + chan.prune_inbound_htlc_onion( + source.htlc_id, + &source, + OutboundHop { + amt_msat: outbound_amt_msat, + channel_id: outbound_channel_id, + node_id: outbound_node_id, + funding_txo: outbound_funding_txo, + user_channel_id: outbound_user_channel_id, + }, + ); } } } @@ -10217,6 +10239,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ PostMonitorUpdateChanResume::Unblocked { channel_id, counterparty_node_id, + funding_txo, + user_channel_id, unbroadcasted_batch_funding_txid, update_actions, htlc_forwards, @@ -10228,6 +10252,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.post_monitor_update_unlock( channel_id, counterparty_node_id, + funding_txo, + user_channel_id, unbroadcasted_batch_funding_txid, update_actions, htlc_forwards, From 48010cbadf6723b679c90e840de0de36d8415265 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 10 Feb 2026 16:33:14 -0500 Subject: [PATCH 07/21] Fix PaymentForwarded fields on restart claim Previously, we were spuriously using the upstream channel's info when we should've been using the downstream channel's. --- lightning/src/ln/channel.rs | 6 +++--- lightning/src/ln/channelmanager.rs | 25 ++++++++++++------------- lightning/src/ln/reload_tests.rs | 8 +------- 3 files changed, 16 insertions(+), 23 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 37a0661de76..4a0d1175b8e 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -7942,7 +7942,7 @@ where /// when reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. pub(super) fn inbound_forwarded_htlcs( &self, - ) -> impl Iterator + '_ { + ) -> impl Iterator + '_ { // We don't want to return an HTLC as needing processing if it already has a resolution that's // pending in the holding cell. let htlc_resolution_in_holding_cell = |id: u64| -> bool { @@ -7970,7 +7970,7 @@ where phantom_shared_secret, trampoline_shared_secret, blinded_failure, - outbound_hop: OutboundHop { amt_msat, .. }, + outbound_hop, }, } => { if htlc_resolution_in_holding_cell(htlc.htlc_id) { @@ -7991,7 +7991,7 @@ where counterparty_node_id: Some(counterparty_node_id), cltv_expiry: Some(htlc.cltv_expiry), }; - Some((htlc.payment_hash, prev_hop_data, *amt_msat)) + Some((htlc.payment_hash, prev_hop_data, *outbound_hop)) }, _ => None, }) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b7b39698bb4..897f10cf2f4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18610,11 +18610,11 @@ impl< // that it is handled. let mut already_forwarded_htlcs: HashMap< (ChannelId, PaymentHash), - Vec<(HTLCPreviousHopData, u64)>, + Vec<(HTLCPreviousHopData, OutboundHop)>, > = new_hash_map(); let prune_forwarded_htlc = |already_forwarded_htlcs: &mut HashMap< (ChannelId, PaymentHash), - Vec<(HTLCPreviousHopData, u64)>, + Vec<(HTLCPreviousHopData, OutboundHop)>, >, prev_hop: &HTLCPreviousHopData, payment_hash: &PaymentHash| { @@ -18663,13 +18663,13 @@ impl< .or_insert_with(Vec::new) .push(update_add_htlc); } - for (payment_hash, prev_hop, outbound_amt_msat) in + for (payment_hash, prev_hop, next_hop) in funded_chan.inbound_forwarded_htlcs() { already_forwarded_htlcs .entry((prev_hop.channel_id, payment_hash)) .or_insert_with(Vec::new) - .push((prev_hop, outbound_amt_msat)); + .push((prev_hop, next_hop)); } } } @@ -19378,34 +19378,33 @@ impl< if let Some(forwarded_htlcs) = already_forwarded_htlcs.remove(&(*channel_id, payment_hash)) { - for (prev_hop, outbound_amt_msat) in forwarded_htlcs { + for (prev_hop, next_hop) in forwarded_htlcs { let new_pending_claim = !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _, _)| { matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == prev_hop.htlc_id && hop.channel_id == prev_hop.channel_id) }); if new_pending_claim { - let counterparty_node_id = monitor.get_counterparty_node_id(); let is_downstream_closed = channel_manager .per_peer_state .read() .unwrap() - .get(&counterparty_node_id) + .get(&next_hop.node_id) .map_or(true, |peer_state_mtx| { !peer_state_mtx .lock() .unwrap() .channel_by_id - .contains_key(channel_id) + .contains_key(&next_hop.channel_id) }); pending_claims_to_replay.push(( HTLCSource::PreviousHopData(prev_hop), payment_preimage, - outbound_amt_msat, + next_hop.amt_msat, is_downstream_closed, - counterparty_node_id, - monitor.get_funding_txo(), - *channel_id, - None, + next_hop.node_id, + next_hop.funding_txo, + next_hop.channel_id, + Some(next_hop.user_channel_id), )); } } diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index c7e7175602d..42986bc41b1 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1958,14 +1958,8 @@ fn test_reload_node_with_preimage_in_monitor_claims_htlc() { ); // When the claim is reconstructed during reload, a PaymentForwarded event is generated. - // This event has next_user_channel_id as None since the outbound HTLC was already removed. // Fetching events triggers the pending monitor update (adding preimage) to be applied. - let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match &events[0] { - Event::PaymentForwarded { total_fee_earned_msat: Some(1000), .. } => {}, - _ => panic!("Expected PaymentForwarded event"), - } + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); check_added_monitors(&nodes[1], 1); // Reconnect nodes[1] to nodes[0]. The claim should be in nodes[1]'s holding cell. From 0d6dcc910558c558e763c02c17d609dc0410d835 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 10 Feb 2026 16:51:46 -0500 Subject: [PATCH 08/21] Fix missing user_channel_id in PaymentForwarded Previously, if a forwarding node reloaded mid-HTLC-forward with a preimage in the outbound edge monitor and the outbound edge channel still open, and subsequently reclaimed the inbound HTLC backwards, the PaymentForwarded event would be missing the next_user_channel_id field. --- lightning/src/ln/chanmon_update_fail_tests.rs | 7 ++++++- lightning/src/ln/channelmanager.rs | 12 +++++++----- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 5a0c37bd61d..e5f6b7259ff 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -3938,7 +3938,12 @@ fn do_test_durable_preimages_on_closed_channel( let evs = nodes[1].node.get_and_clear_pending_events(); assert_eq!(evs.len(), if close_chans_before_reload { 2 } else { 1 }); for ev in evs { - if let Event::PaymentForwarded { .. } = ev { + if let Event::PaymentForwarded { claim_from_onchain_tx, next_user_channel_id, .. } = ev { + if !claim_from_onchain_tx { + // If the outbound channel is still open, the `next_user_channel_id` should be available. + // This was previously broken. + assert!(next_user_channel_id.is_some()) + } } else { panic!(); } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 897f10cf2f4..40342d72700 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18707,14 +18707,16 @@ impl< } } for (channel_id, monitor) in args.channel_monitors.iter() { - let mut is_channel_closed = true; + let (mut is_channel_closed, mut user_channel_id_opt) = (true, None); let counterparty_node_id = monitor.get_counterparty_node_id(); if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; - is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); - if reconstruct_manager_from_monitors && !is_channel_closed { - if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + is_channel_closed = false; + user_channel_id_opt = Some(chan.context().get_user_id()); + + if reconstruct_manager_from_monitors { if let Some(funded_chan) = chan.as_funded() { for (payment_hash, prev_hop) in funded_chan.outbound_htlc_forwards() { @@ -19014,7 +19016,7 @@ impl< Some((htlc_source, payment_preimage, htlc.amount_msat, is_channel_closed, monitor.get_counterparty_node_id(), - monitor.get_funding_txo(), monitor.channel_id(), None)) + monitor.get_funding_txo(), monitor.channel_id(), user_channel_id_opt)) } else { None } } else { // If it was an outbound payment, we've handled it above - if a preimage From 5daf51c00764572f1cecd38c6f9dd16f69f172db Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 11 Feb 2026 16:14:19 -0500 Subject: [PATCH 09/21] Test restart-claim of two MPP holding cell HTLCs Test that if we restart and had two inbound MPP-part HTLCs received over the same channel in the holding cell prior to shutdown, and we lost the holding cell prior to restart, those HTLCs will still be claimed backwards. Test largely written by Claude --- lightning/src/ln/chanmon_update_fail_tests.rs | 3 +- lightning/src/ln/functional_test_utils.rs | 22 ++- lightning/src/ln/reload_tests.rs | 145 ++++++++++++++++++ 3 files changed, 161 insertions(+), 9 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index e5f6b7259ff..b421114e911 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -3519,8 +3519,9 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode .node .handle_commitment_signed_batch_test(node_a_id, &as_htlc_fulfill.commitment_signed); check_added_monitors(&nodes[1], 1); - let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); + let (a, raa, holding_cell) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); assert!(a.is_none()); + assert!(holding_cell.is_empty()); nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); check_added_monitors(&nodes[1], 1); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index d5a29785a94..d3902b26201 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -2672,20 +2672,23 @@ pub fn commitment_signed_dance_through_cp_raa( node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, fail_backwards: bool, includes_claim: bool, ) -> Option { - let (extra_msg_option, bs_revoke_and_ack) = + let (extra_msg_option, bs_revoke_and_ack, node_b_holding_cell_htlcs) = do_main_commitment_signed_dance(node_a, node_b, fail_backwards); + assert!(node_b_holding_cell_htlcs.is_empty()); node_a.node.handle_revoke_and_ack(node_b.node.get_our_node_id(), &bs_revoke_and_ack); check_added_monitors(node_a, if includes_claim { 0 } else { 1 }); extra_msg_option } /// Does the main logic in the commitment_signed dance. After the first `commitment_signed` has -/// been delivered, this method picks up and delivers the response `revoke_and_ack` and -/// `commitment_signed`, returning the recipient's `revoke_and_ack` and any extra message it may -/// have included. +/// been delivered, delivers the response `revoke_and_ack` and `commitment_signed`, and returns: +/// - The recipient's `revoke_and_ack` +/// - The recipient's extra message (if any) after handling the commitment_signed +/// - Any messages released from the initiator's holding cell after handling the `revoke_and_ack` +/// (e.g., a second HTLC on the same channel) pub fn do_main_commitment_signed_dance( node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, fail_backwards: bool, -) -> (Option, msgs::RevokeAndACK) { +) -> (Option, msgs::RevokeAndACK, Vec) { let node_a_id = node_a.node.get_our_node_id(); let node_b_id = node_b.node.get_our_node_id(); @@ -2693,7 +2696,9 @@ pub fn do_main_commitment_signed_dance( check_added_monitors(&node_b, 0); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); - assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); + // Handling the RAA may release HTLCs from node_b's holding cell (e.g., if multiple HTLCs + // were sent over the same channel and the second was queued behind the first). + let node_b_holding_cell_htlcs = node_b.node.get_and_clear_pending_msg_events(); check_added_monitors(&node_b, 1); node_b.node.handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed); let (bs_revoke_and_ack, extra_msg_option) = { @@ -2716,7 +2721,7 @@ pub fn do_main_commitment_signed_dance( assert!(node_a.node.get_and_clear_pending_events().is_empty()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); } - (extra_msg_option, bs_revoke_and_ack) + (extra_msg_option, bs_revoke_and_ack, node_b_holding_cell_htlcs) } /// Runs the commitment_signed dance by delivering the commitment_signed and handling the @@ -2733,9 +2738,10 @@ pub fn commitment_signed_dance_return_raa( .node .handle_commitment_signed_batch_test(node_b.node.get_our_node_id(), commitment_signed); check_added_monitors(&node_a, 1); - let (extra_msg_option, bs_revoke_and_ack) = + let (extra_msg_option, bs_revoke_and_ack, node_b_holding_cell_htlcs) = do_main_commitment_signed_dance(&node_a, &node_b, fail_backwards); assert!(extra_msg_option.is_none()); + assert!(node_b_holding_cell_htlcs.is_empty()); bs_revoke_and_ack } diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 42986bc41b1..d1e34cb7c71 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -2082,3 +2082,148 @@ fn test_reload_node_without_preimage_fails_htlc() { // nodes[0] should now have received the failure and generate PaymentFailed. expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new()); } + +#[test] +fn test_reload_with_mpp_claims_on_same_channel() { + // Test that if a forwarding node has two HTLCs for the same MPP payment that were both + // irrevocably removed on the outbound edge via claim but are still forwarded-and-unresolved + // on the inbound edge, both HTLCs will be claimed backwards on restart. + // + // Topology: + // nodes[0] ----chan_0_1----> nodes[1] ----chan_1_2_a----> nodes[2] + // \----chan_1_2_b---/ + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let node_2_id = nodes[2].node.get_our_node_id(); + + let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 2_000_000, 0); + let chan_1_2_a = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0); + let chan_1_2_b = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0); + + let chan_id_0_1 = chan_0_1.2; + let chan_id_1_2_a = chan_1_2_a.2; + let chan_id_1_2_b = chan_1_2_b.2; + + // Send an MPP payment large enough that the router must split it across both outbound channels. + // Each 1M sat outbound channel has 100M msat max in-flight, so 150M msat requires splitting. + let amt_msat = 150_000_000; + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + + let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); + nodes[0].node.send_payment_with_route( + route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id, + ).unwrap(); + check_added_monitors(&nodes[0], 1); + + // Forward the first HTLC nodes[0] -> nodes[1] -> nodes[2]. Note that the second HTLC is released + // from the holding cell during the first HTLC's commitment_signed_dance. + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event_1 = SendEvent::from_event(events.remove(0)); + + nodes[1].node.handle_update_add_htlc(node_0_id, &payment_event_1.msgs[0]); + check_added_monitors(&nodes[1], 0); + nodes[1].node.handle_commitment_signed_batch_test(node_0_id, &payment_event_1.commitment_msg); + check_added_monitors(&nodes[1], 1); + let (_, raa, holding_cell_htlcs) = + do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); + assert_eq!(holding_cell_htlcs.len(), 1); + let payment_event_2 = holding_cell_htlcs.into_iter().next().unwrap(); + nodes[1].node.handle_revoke_and_ack(node_0_id, &raa); + check_added_monitors(&nodes[1], 1); + + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors(&nodes[1], 1); + let mut events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev_1_2 = events.remove(0); + pass_along_path( + &nodes[1], &[&nodes[2]], amt_msat, payment_hash, Some(payment_secret), ev_1_2, false, None, + ); + + // Second HTLC: full path nodes[0] -> nodes[1] -> nodes[2]. PaymentClaimable expected at end. + pass_along_path( + &nodes[0], &[&nodes[1], &nodes[2]], amt_msat, payment_hash, Some(payment_secret), + payment_event_2, true, None, + ); + + // Claim the HTLCs such that they're fully removed from the outbound edge, but disconnect + // node_0<>node_1 so that they can't be claimed backwards by node_1. + nodes[2].node.claim_funds(payment_preimage); + check_added_monitors(&nodes[2], 2); + expect_payment_claimed!(nodes[2], payment_hash, amt_msat); + + nodes[0].node.peer_disconnected(node_1_id); + nodes[1].node.peer_disconnected(node_0_id); + + let mut events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + for ev in events { + match ev { + MessageSendEvent::UpdateHTLCs { ref node_id, ref updates, .. } => { + assert_eq!(*node_id, node_1_id); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(node_2_id, updates.update_fulfill_htlcs[0].clone()); + check_added_monitors(&nodes[1], 1); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); + }, + _ => panic!("Unexpected event"), + } + } + + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + for event in events { + expect_payment_forwarded( + event, &nodes[1], &nodes[0], &nodes[2], Some(1000), None, false, false, false, + ); + } + + // Clear the holding cell's claim entries on chan_0_1 before serialization. + // This simulates a crash where both HTLCs were fully removed on the outbound edges but are + // still present on the inbound edge without a resolution. + nodes[1].node.test_clear_channel_holding_cell(node_0_id, chan_id_0_1); + + let node_1_serialized = nodes[1].node.encode(); + let mon_0_1_serialized = get_monitor!(nodes[1], chan_id_0_1).encode(); + let mon_1_2_a_serialized = get_monitor!(nodes[1], chan_id_1_2_a).encode(); + let mon_1_2_b_serialized = get_monitor!(nodes[1], chan_id_1_2_b).encode(); + + reload_node!( + nodes[1], + node_1_serialized, + &[&mon_0_1_serialized, &mon_1_2_a_serialized, &mon_1_2_b_serialized], + persister, + new_chain_monitor, + nodes_1_deserialized, + Some(true) + ); + + // When the claims are reconstructed during reload, PaymentForwarded events are regenerated. + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + for event in events { + expect_payment_forwarded( + event, &nodes[1], &nodes[0], &nodes[2], Some(1000), None, false, false, false, + ); + } + // Fetching events triggers the pending monitor updates (one for each HTLC preimage) to be applied. + check_added_monitors(&nodes[1], 2); + + // Reconnect nodes[1] to nodes[0]. Both claims should be in nodes[1]'s holding cell. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[0]); + reconnect_args.pending_cell_htlc_claims = (0, 2); + reconnect_nodes(reconnect_args); + + // nodes[0] should now have received both fulfills and generate PaymentSent. + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} From ab0ba65923158791a9afa196e275c4a9d375a673 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 12 Feb 2026 15:30:16 -0500 Subject: [PATCH 10/21] Update RECONSTRUCT_HTLCS_FROM_CHANS_VERSION 5 -> 2 We previously had 5 due to wanting some flexibility to bump versions in between, but eventually concluded that wasn't necessary. --- lightning/src/ln/channelmanager.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 40342d72700..5a4f569d879 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -16616,7 +16616,7 @@ const MIN_SERIALIZATION_VERSION: u8 = 1; // // If 0.3 or 0.4 reads this manager version, it knows that the legacy maps were not written and // acts accordingly. -const RECONSTRUCT_HTLCS_FROM_CHANS_VERSION: u8 = 5; +const RECONSTRUCT_HTLCS_FROM_CHANS_VERSION: u8 = 2; impl_writeable_tlv_based!(PhantomRouteHints, { (2, channels, required_vec), From a088338c743a65148332d70821dd9e71721bddd8 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 6 Feb 2026 16:13:19 -0500 Subject: [PATCH 11/21] Update upgrade tests: 0.2- to 0.5 doesn't work XXX --- lightning-tests/Cargo.toml | 1 + lightning-tests/src/lib.rs | 1 - .../src/upgrade_downgrade_tests.rs | 276 ++++++++++++------ 3 files changed, 181 insertions(+), 97 deletions(-) diff --git a/lightning-tests/Cargo.toml b/lightning-tests/Cargo.toml index 4e8d330089d..7830792b121 100644 --- a/lightning-tests/Cargo.toml +++ b/lightning-tests/Cargo.toml @@ -15,6 +15,7 @@ lightning-types = { path = "../lightning-types", features = ["_test_utils"] } lightning-invoice = { path = "../lightning-invoice", default-features = false } lightning-macros = { path = "../lightning-macros" } lightning = { path = "../lightning", features = ["_test_utils"] } +lightning_0_3 = { package = "lightning", git = "https://github.com/valentinewallace/rust-lightning", branch = "2026-02-dedup-htlc-fwd-data", features = ["_test_utils"] } lightning_0_2 = { package = "lightning", version = "0.2.0", features = ["_test_utils"] } lightning_0_1 = { package = "lightning", version = "0.1.7", features = ["_test_utils"] } lightning_0_0_125 = { package = "lightning", version = "0.0.125", features = ["_test_utils"] } diff --git a/lightning-tests/src/lib.rs b/lightning-tests/src/lib.rs index c028193d692..4249c957dde 100644 --- a/lightning-tests/src/lib.rs +++ b/lightning-tests/src/lib.rs @@ -1,4 +1,3 @@ -#[cfg_attr(test, macro_use)] extern crate lightning; #[cfg(all(test, not(taproot)))] diff --git a/lightning-tests/src/upgrade_downgrade_tests.rs b/lightning-tests/src/upgrade_downgrade_tests.rs index 14b0a5c5822..42d6204ef35 100644 --- a/lightning-tests/src/upgrade_downgrade_tests.rs +++ b/lightning-tests/src/upgrade_downgrade_tests.rs @@ -45,20 +45,37 @@ use lightning_0_0_125::ln::msgs::ChannelMessageHandler as _; use lightning_0_0_125::routing::router as router_0_0_125; use lightning_0_0_125::util::ser::Writeable as _; +use lightning_0_3::events::bump_transaction::sync::WalletSourceSync as _; +use lightning_0_3::events::{ + Event as Event_0_3, HTLCHandlingFailureType as HTLCHandlingFailureType_0_3, +}; +use lightning_0_3::expect_payment_claimable as expect_payment_claimable_0_3; +use lightning_0_3::get_event_msg as get_event_msg_0_3; +use lightning_0_3::ln::functional_test_utils as lightning_0_3_utils; +use lightning_0_3::ln::functional_test_utils::{ + PaymentFailedConditions as PaymentFailedConditions_0_3, ReconnectArgs as ReconnectArgs_0_3, + SendEvent as SendEvent_0_3, +}; +use lightning_0_3::ln::funding::SpliceContribution as SpliceContribution_0_3; +use lightning_0_3::ln::msgs::BaseMessageHandler as _; +use lightning_0_3::ln::msgs::ChannelMessageHandler as _; +use lightning_0_3::ln::msgs::MessageSendEvent as MessageSendEvent_0_3; +use lightning_0_3::ln::splicing_tests::lock_splice as lock_splice_0_3; +use lightning_0_3::ln::splicing_tests::splice_channel as splice_channel_0_3; +use lightning_0_3::ln::types::ChannelId as ChannelId_0_3; +use lightning_0_3::reload_node as reload_node_0_3; +use lightning_0_3::types::payment::{ + PaymentHash as PaymentHash_0_3, PaymentPreimage as PaymentPreimage_0_3, + PaymentSecret as PaymentSecret_0_3, +}; + use lightning::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER}; -use lightning::events::bump_transaction::sync::WalletSourceSync; -use lightning::events::{ClosureReason, Event, HTLCHandlingFailureType}; -use lightning::ln::functional_test_utils::*; -use lightning::ln::funding::SpliceContribution; -use lightning::ln::msgs::BaseMessageHandler as _; -use lightning::ln::msgs::ChannelMessageHandler as _; -use lightning::ln::msgs::MessageSendEvent; -use lightning::ln::splicing_tests::*; -use lightning::ln::types::ChannelId; +use lightning::check_spends; +use lightning::events::{ClosureReason, Event}; +use lightning::ln::functional_test_utils as lightning_local_utils; +use lightning::reload_node; use lightning::sign::OutputSpender; -use lightning_types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; - use bitcoin::script::Builder; use bitcoin::secp256k1::Secp256k1; use bitcoin::{opcodes, Amount, TxOut}; @@ -68,7 +85,7 @@ use std::sync::Arc; #[test] fn simple_upgrade() { // Tests a simple case of upgrading from LDK 0.1 with a pending payment - let (node_a_ser, node_b_ser, mon_a_ser, mon_b_ser, preimage); + let (node_a_ser, node_b_ser, mon_a_ser, mon_b_ser, preimage_bytes); { let chanmon_cfgs = lightning_0_1_utils::create_chanmon_cfgs(2); let node_cfgs = lightning_0_1_utils::create_node_cfgs(2, &chanmon_cfgs); @@ -79,7 +96,7 @@ fn simple_upgrade() { let payment_preimage = lightning_0_1_utils::route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - preimage = PaymentPreimage(payment_preimage.0 .0); + preimage_bytes = payment_preimage.0 .0; node_a_ser = nodes[0].node.encode(); node_b_ser = nodes[1].node.encode(); @@ -89,26 +106,43 @@ fn simple_upgrade() { // Create a dummy node to reload over with the 0.1 state - let mut chanmon_cfgs = create_chanmon_cfgs(2); + let mut chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(2); // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_cfgs = lightning_0_3_utils::create_node_cfgs(2, &chanmon_cfgs); let (persister_a, persister_b, chain_mon_a, chain_mon_b); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let node_chanmgrs = lightning_0_3_utils::create_node_chanmgrs(2, &node_cfgs, &[None, None]); let (node_a, node_b); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let mut nodes = lightning_0_3_utils::create_network(2, &node_cfgs, &node_chanmgrs); - let config = test_default_channel_config(); + let config = lightning_0_3_utils::test_default_channel_config(); let a_mons = &[&mon_a_ser[..]]; - reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); - reload_node!(nodes[1], config, &node_b_ser, &[&mon_b_ser], persister_b, chain_mon_b, node_b); - - reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - - claim_payment(&nodes[0], &[&nodes[1]], preimage); + reload_node_0_3!( + nodes[0], + config.clone(), + &node_a_ser, + a_mons, + persister_a, + chain_mon_a, + node_a + ); + reload_node_0_3!( + nodes[1], + config, + &node_b_ser, + &[&mon_b_ser], + persister_b, + chain_mon_b, + node_b + ); + + lightning_0_3_utils::reconnect_nodes(ReconnectArgs_0_3::new(&nodes[0], &nodes[1])); + + let preimage = PaymentPreimage_0_3(preimage_bytes); + lightning_0_3_utils::claim_payment(&nodes[0], &[&nodes[1]], preimage); } #[test] @@ -228,7 +262,7 @@ fn test_125_dangling_post_update_actions() { // Create a dummy node to reload over with the 0.0.125 state - let mut chanmon_cfgs = create_chanmon_cfgs(4); + let mut chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(4); // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; @@ -236,14 +270,15 @@ fn test_125_dangling_post_update_actions() { chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; chanmon_cfgs[3].keys_manager.disable_all_state_policy_checks = true; - let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let node_cfgs = lightning_local_utils::create_node_cfgs(4, &chanmon_cfgs); let (persister, chain_mon); - let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let node_chanmgrs = + lightning_local_utils::create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let node; - let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let mut nodes = lightning_local_utils::create_network(4, &node_cfgs, &node_chanmgrs); // Finally, reload the node in the latest LDK. This previously failed. - let config = test_default_channel_config(); + let config = lightning_local_utils::test_default_channel_config(); reload_node!(nodes[3], config, &node_d_ser, &[&mon_ser], persister, chain_mon, node); } @@ -283,14 +318,14 @@ fn test_0_1_legacy_remote_key_derivation() { } // Create a dummy node to reload over with the 0.1 state - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(2); + let node_cfgs = lightning_local_utils::create_node_cfgs(2, &chanmon_cfgs); let (persister_a, persister_b, chain_mon_a, chain_mon_b); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let node_chanmgrs = lightning_local_utils::create_node_chanmgrs(2, &node_cfgs, &[None, None]); let (node_a, node_b); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let mut nodes = lightning_local_utils::create_network(2, &node_cfgs, &node_chanmgrs); - let config = test_default_channel_config(); + let config = lightning_local_utils::test_default_channel_config(); let a_mons = &[&mon_a_ser[..]]; reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); reload_node!(nodes[1], config, &node_b_ser, &[&mon_b_ser], persister_b, chain_mon_b, node_b); @@ -299,13 +334,13 @@ fn test_0_1_legacy_remote_key_derivation() { let node_b_id = nodes[1].node.get_our_node_id(); - mine_transaction(&nodes[0], &commitment_tx[0]); + lightning_local_utils::mine_transaction(&nodes[0], &commitment_tx[0]); let reason = ClosureReason::CommitmentTxConfirmed; - check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100_000); - check_added_monitors(&nodes[0], 1); - check_closed_broadcast(&nodes[0], 1, false); + lightning_local_utils::check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100_000); + lightning_local_utils::check_added_monitors(&nodes[0], 1); + lightning_local_utils::check_closed_broadcast(&nodes[0], 1, false); - connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); + lightning_local_utils::connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); let mut spendable_event = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); assert_eq!(spendable_event.len(), 1); if let Event::SpendableOutputs { outputs, channel_id: ev_id, counterparty_node_id: _ } = @@ -422,7 +457,7 @@ fn do_test_0_1_htlc_forward_after_splice(fail_htlc: bool) { } // Create a dummy node to reload over with the 0.1 state - let mut chanmon_cfgs = create_chanmon_cfgs(3); + let mut chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(3); // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; @@ -433,73 +468,105 @@ fn do_test_0_1_htlc_forward_after_splice(fail_htlc: bool) { chanmon_cfgs[1].tx_broadcaster.blocks = node_b_blocks; chanmon_cfgs[2].tx_broadcaster.blocks = node_c_blocks; - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_cfgs = lightning_0_3_utils::create_node_cfgs(3, &chanmon_cfgs); let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let node_chanmgrs = + lightning_0_3_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let (node_a, node_b, node_c); - let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let mut nodes = lightning_0_3_utils::create_network(3, &node_cfgs, &node_chanmgrs); - let config = test_default_channel_config(); + let config = lightning_0_3_utils::test_default_channel_config(); let a_mons = &[&mon_a_1_ser[..]]; - reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); + reload_node_0_3!( + nodes[0], + config.clone(), + &node_a_ser, + a_mons, + persister_a, + chain_mon_a, + node_a + ); let b_mons = &[&mon_b_1_ser[..], &mon_b_2_ser[..]]; - reload_node!(nodes[1], config.clone(), &node_b_ser, b_mons, persister_b, chain_mon_b, node_b); + reload_node_0_3!( + nodes[1], + config.clone(), + &node_b_ser, + b_mons, + persister_b, + chain_mon_b, + node_b + ); let c_mons = &[&mon_c_1_ser[..]]; - reload_node!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); + reload_node_0_3!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); - reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - let mut reconnect_b_c_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + lightning_0_3_utils::reconnect_nodes(ReconnectArgs_0_3::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs_0_3::new(&nodes[1], &nodes[2]); reconnect_b_c_args.send_channel_ready = (true, true); reconnect_b_c_args.send_announcement_sigs = (true, true); - reconnect_nodes(reconnect_b_c_args); + lightning_0_3_utils::reconnect_nodes(reconnect_b_c_args); - let contribution = SpliceContribution::splice_out(vec![TxOut { + let contribution = SpliceContribution_0_3::splice_out(vec![TxOut { value: Amount::from_sat(1_000), script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), }]); - let splice_tx = splice_channel(&nodes[0], &nodes[1], ChannelId(chan_id_bytes_a), contribution); + let splice_tx = + splice_channel_0_3(&nodes[0], &nodes[1], ChannelId_0_3(chan_id_bytes_a), contribution); for node in nodes.iter() { - mine_transaction(node, &splice_tx); - connect_blocks(node, ANTI_REORG_DELAY - 1); + lightning_0_3_utils::mine_transaction(node, &splice_tx); + lightning_0_3_utils::connect_blocks(node, ANTI_REORG_DELAY - 1); } - let splice_locked = get_event_msg!(nodes[0], MessageSendEvent::SendSpliceLocked, node_b_id); - lock_splice(&nodes[0], &nodes[1], &splice_locked, false); + let splice_locked = + get_event_msg_0_3!(nodes[0], MessageSendEvent_0_3::SendSpliceLocked, node_b_id); + lock_splice_0_3(&nodes[0], &nodes[1], &splice_locked, false); for node in nodes.iter() { - connect_blocks(node, EXTRA_BLOCKS_BEFORE_FAIL - ANTI_REORG_DELAY); + lightning_0_3_utils::connect_blocks(node, EXTRA_BLOCKS_BEFORE_FAIL - ANTI_REORG_DELAY); } // Now release the HTLC to be failed back to node A nodes[1].node.process_pending_htlc_forwards(); - let pay_secret = PaymentSecret(payment_secret_bytes); - let pay_hash = PaymentHash(payment_hash_bytes); - let pay_preimage = PaymentPreimage(payment_preimage_bytes); + let pay_secret = PaymentSecret_0_3(payment_secret_bytes); + let pay_hash = PaymentHash_0_3(payment_hash_bytes); + let pay_preimage = PaymentPreimage_0_3(payment_preimage_bytes); if fail_htlc { - let failure = HTLCHandlingFailureType::Forward { + let failure = HTLCHandlingFailureType_0_3::Forward { node_id: Some(node_c_id), - channel_id: ChannelId(chan_id_bytes_b), + channel_id: ChannelId_0_3(chan_id_bytes_b), }; - expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[failure]); - check_added_monitors(&nodes[1], 1); + lightning_0_3_utils::expect_and_process_pending_htlcs_and_htlc_handling_failed( + &nodes[1], + &[failure], + ); + lightning_0_3_utils::check_added_monitors(&nodes[1], 1); - let updates = get_htlc_update_msgs(&nodes[1], &node_a_id); + let updates = lightning_0_3_utils::get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); - do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); - let conditions = PaymentFailedConditions::new(); - expect_payment_failed_conditions(&nodes[0], pay_hash, false, conditions); + lightning_0_3_utils::do_commitment_signed_dance( + &nodes[0], + &nodes[1], + &updates.commitment_signed, + false, + false, + ); + let conditions = PaymentFailedConditions_0_3::new(); + lightning_0_3_utils::expect_payment_failed_conditions( + &nodes[0], pay_hash, false, conditions, + ); } else { - check_added_monitors(&nodes[1], 1); - let forward_event = SendEvent::from_node(&nodes[1]); + lightning_0_3_utils::check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent_0_3::from_node(&nodes[1]); nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); let commitment = &forward_event.commitment_msg; - do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, false); + lightning_0_3_utils::do_commitment_signed_dance( + &nodes[2], &nodes[1], commitment, false, false, + ); - expect_and_process_pending_htlcs(&nodes[2], false); - expect_payment_claimable!(nodes[2], pay_hash, pay_secret, 1_000_000); - claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); + lightning_0_3_utils::expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable_0_3!(nodes[2], pay_hash, pay_secret, 1_000_000); + lightning_0_3_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); } } @@ -634,32 +701,49 @@ fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { } // Create a dummy node to reload over with the 0.2 state - let mut chanmon_cfgs = create_chanmon_cfgs(3); + let mut chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(3); // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_cfgs = lightning_0_3_utils::create_node_cfgs(3, &chanmon_cfgs); let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let node_chanmgrs = + lightning_0_3_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let (node_a, node_b, node_c); - let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let mut nodes = lightning_0_3_utils::create_network(3, &node_cfgs, &node_chanmgrs); - let config = test_default_channel_config(); + let config = lightning_0_3_utils::test_default_channel_config(); let a_mons = &[&mon_a_1_ser[..]]; - reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); + reload_node_0_3!( + nodes[0], + config.clone(), + &node_a_ser, + a_mons, + persister_a, + chain_mon_a, + node_a + ); let b_mons = &[&mon_b_1_ser[..], &mon_b_2_ser[..]]; - reload_node!(nodes[1], config.clone(), &node_b_ser, b_mons, persister_b, chain_mon_b, node_b); + reload_node_0_3!( + nodes[1], + config.clone(), + &node_b_ser, + b_mons, + persister_b, + chain_mon_b, + node_b + ); let c_mons = &[&mon_c_1_ser[..]]; - reload_node!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); + reload_node_0_3!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); - reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - let mut reconnect_b_c_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + lightning_0_3_utils::reconnect_nodes(ReconnectArgs_0_3::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs_0_3::new(&nodes[1], &nodes[2]); reconnect_b_c_args.send_channel_ready = (true, true); reconnect_b_c_args.send_announcement_sigs = (true, true); - reconnect_nodes(reconnect_b_c_args); + lightning_0_3_utils::reconnect_nodes(reconnect_b_c_args); // Now release the HTLC from node_b to node_c, to be claimed back to node_a nodes[1].node.process_pending_htlc_forwards(); @@ -668,7 +752,7 @@ fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); let (intercept_id, expected_outbound_amt_msat) = match events[0] { - Event::HTLCIntercepted { intercept_id, expected_outbound_amount_msat, .. } => { + Event_0_3::HTLCIntercepted { intercept_id, expected_outbound_amount_msat, .. } => { (intercept_id, expected_outbound_amount_msat) }, _ => panic!(), @@ -677,7 +761,7 @@ fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { .node .forward_intercepted_htlc( intercept_id, - &ChannelId(chan_id_bytes_b_c), + &ChannelId_0_3(chan_id_bytes_b_c), nodes[2].node.get_our_node_id(), expected_outbound_amt_msat, ) @@ -685,17 +769,17 @@ fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { nodes[1].node.process_pending_htlc_forwards(); } - let pay_secret = PaymentSecret(payment_secret_bytes); - let pay_hash = PaymentHash(payment_hash_bytes); - let pay_preimage = PaymentPreimage(payment_preimage_bytes); + let pay_secret = PaymentSecret_0_3(payment_secret_bytes); + let pay_hash = PaymentHash_0_3(payment_hash_bytes); + let pay_preimage = PaymentPreimage_0_3(payment_preimage_bytes); - check_added_monitors(&nodes[1], 1); - let forward_event = SendEvent::from_node(&nodes[1]); + lightning_0_3_utils::check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent_0_3::from_node(&nodes[1]); nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); let commitment = &forward_event.commitment_msg; - do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, false); + lightning_0_3_utils::do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, false); - expect_and_process_pending_htlcs(&nodes[2], false); - expect_payment_claimable!(nodes[2], pay_hash, pay_secret, 1_000_000); - claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); + lightning_0_3_utils::expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable_0_3!(nodes[2], pay_hash, pay_secret, 1_000_000); + lightning_0_3_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); } From 0e8b3a4053ce82e075d0761d65af9ebe5b75e00d Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 16 Jan 2026 17:38:57 -0500 Subject: [PATCH 12/21] Elide legacy manager pending HTLC maps on read/write In LDK 0.3 we added support for reconstructing the ChannelManager's pending forward HTLCs maps from channel data as part of removing the requirement to regularly persist the manager, but til now we still would write/read those maps within the manager to maintain compat with 0.2-. Also 0.3 added a new field to Channel that allowed the map reconstruction. Now that a few versions have passed we have more confidence that the new field is being written, here we deprecate persistence of the old legacy maps and only attempt to read them if the manager serialized version indicates that they were written. Upcoming commits will ensure we error if the new field is missing. XXX clean up claude tests --- .../src/upgrade_downgrade_tests.rs | 640 ++++++++++++++++++ lightning/src/ln/channelmanager.rs | 358 +++------- .../upgrade-0.2-pending-htlc.txt | 9 + 3 files changed, 761 insertions(+), 246 deletions(-) create mode 100644 pending_changelog/upgrade-0.2-pending-htlc.txt diff --git a/lightning-tests/src/upgrade_downgrade_tests.rs b/lightning-tests/src/upgrade_downgrade_tests.rs index 42d6204ef35..2c9720f47e1 100644 --- a/lightning-tests/src/upgrade_downgrade_tests.rs +++ b/lightning-tests/src/upgrade_downgrade_tests.rs @@ -51,6 +51,8 @@ use lightning_0_3::events::{ }; use lightning_0_3::expect_payment_claimable as expect_payment_claimable_0_3; use lightning_0_3::get_event_msg as get_event_msg_0_3; +use lightning_0_3::get_monitor as get_monitor_0_3; +use lightning_0_3::ln::channelmanager::PaymentId as PaymentId_0_3; use lightning_0_3::ln::functional_test_utils as lightning_0_3_utils; use lightning_0_3::ln::functional_test_utils::{ PaymentFailedConditions as PaymentFailedConditions_0_3, ReconnectArgs as ReconnectArgs_0_3, @@ -60,21 +62,29 @@ use lightning_0_3::ln::funding::SpliceContribution as SpliceContribution_0_3; use lightning_0_3::ln::msgs::BaseMessageHandler as _; use lightning_0_3::ln::msgs::ChannelMessageHandler as _; use lightning_0_3::ln::msgs::MessageSendEvent as MessageSendEvent_0_3; +use lightning_0_3::ln::outbound_payment::RecipientOnionFields as RecipientOnionFields_0_3; use lightning_0_3::ln::splicing_tests::lock_splice as lock_splice_0_3; use lightning_0_3::ln::splicing_tests::splice_channel as splice_channel_0_3; use lightning_0_3::ln::types::ChannelId as ChannelId_0_3; use lightning_0_3::reload_node as reload_node_0_3; +use lightning_0_3::routing::router as router_0_3; use lightning_0_3::types::payment::{ PaymentHash as PaymentHash_0_3, PaymentPreimage as PaymentPreimage_0_3, PaymentSecret as PaymentSecret_0_3, }; +use lightning_0_3::util::ser::Writeable as _; use lightning::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER}; use lightning::check_spends; use lightning::events::{ClosureReason, Event}; +use lightning::expect_payment_claimable; use lightning::ln::functional_test_utils as lightning_local_utils; +use lightning::ln::functional_test_utils::{ReconnectArgs, SendEvent}; +use lightning::ln::msgs::ChannelMessageHandler as _; use lightning::reload_node; use lightning::sign::OutputSpender; +use lightning::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; +use lightning::util::ser::Writeable as _; use bitcoin::script::Builder; use bitcoin::secp256k1::Secp256k1; @@ -783,3 +793,633 @@ fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { expect_payment_claimable_0_3!(nodes[2], pay_hash, pay_secret, 1_000_000); lightning_0_3_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); } + +#[test] +fn test_0_3_pending_forward_upgrade() { + // Tests upgrading from 0.3 with a pending HTLC forward. + // Phase 1: Create state in 0.3 with a pending forward (HTLC locked on inbound edge of node B) + // Phase 2: Reload with local lightning and complete the payment + let (node_a_ser, node_b_ser, node_c_ser, mon_a_1_ser, mon_b_1_ser, mon_b_2_ser, mon_c_1_ser); + let (node_a_id, node_b_id, _node_c_id); + let (payment_secret_bytes, payment_hash_bytes, payment_preimage_bytes); + + { + let chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(3); + let node_cfgs = lightning_0_3_utils::create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = + lightning_0_3_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = lightning_0_3_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + node_a_id = nodes[0].node.get_our_node_id(); + node_b_id = nodes[1].node.get_our_node_id(); + _node_c_id = nodes[2].node.get_our_node_id(); + let chan_id_a = lightning_0_3_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 10_000_000, 0, + ) + .2; + + let chan_id_b = lightning_0_3_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 2, 50_000, 0, + ) + .2; + + // Ensure all nodes are at the same initial height. + let node_max_height = nodes.iter().map(|node| node.best_block_info().1).max().unwrap(); + for node in &nodes { + let blocks_to_mine = node_max_height - node.best_block_info().1; + if blocks_to_mine > 0 { + lightning_0_3_utils::connect_blocks(node, blocks_to_mine); + } + } + + // Initiate an HTLC to be sent over node_a -> node_b -> node_c + let (preimage, hash, secret) = + lightning_0_3_utils::get_payment_preimage_hash(&nodes[2], Some(1_000_000), None); + payment_preimage_bytes = preimage.0; + payment_hash_bytes = hash.0; + payment_secret_bytes = secret.0; + + let pay_params = router_0_3::PaymentParameters::from_node_id( + _node_c_id, + lightning_0_3_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + router_0_3::RouteParameters::from_payment_params_and_value(pay_params, 1_000_000); + let route = lightning_0_3_utils::get_route(&nodes[0], &route_params).unwrap(); + + let onion = RecipientOnionFields_0_3::secret_only(secret); + let id = PaymentId_0_3(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + lightning_0_3_utils::check_added_monitors(&nodes[0], 1); + let send_event = SendEvent_0_3::from_node(&nodes[0]); + + // Lock in the HTLC on the inbound edge of node_b without initiating the outbound edge. + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + lightning_0_3_utils::do_commitment_signed_dance( + &nodes[1], + &nodes[0], + &send_event.commitment_msg, + false, + false, + ); + // Process the pending update_add_htlcs but don't forward yet + nodes[1].node.test_process_pending_update_add_htlcs(); + let events = nodes[1].node.get_and_clear_pending_events(); + assert!(events.is_empty()); + + node_a_ser = nodes[0].node.encode(); + node_b_ser = nodes[1].node.encode(); + node_c_ser = nodes[2].node.encode(); + mon_a_1_ser = get_monitor_0_3!(nodes[0], chan_id_a).encode(); + mon_b_1_ser = get_monitor_0_3!(nodes[1], chan_id_a).encode(); + mon_b_2_ser = get_monitor_0_3!(nodes[1], chan_id_b).encode(); + mon_c_1_ser = get_monitor_0_3!(nodes[2], chan_id_b).encode(); + } + + // Create a dummy node to reload over with the 0.3 state + let mut chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(3); + + // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = lightning_local_utils::create_node_cfgs(3, &chanmon_cfgs); + let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); + let node_chanmgrs = + lightning_local_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (node_a, node_b, node_c); + let mut nodes = lightning_local_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + let config = lightning_local_utils::test_default_channel_config(); + let a_mons = &[&mon_a_1_ser[..]]; + reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); + let b_mons = &[&mon_b_1_ser[..], &mon_b_2_ser[..]]; + reload_node!(nodes[1], config.clone(), &node_b_ser, b_mons, persister_b, chain_mon_b, node_b); + let c_mons = &[&mon_c_1_ser[..]]; + reload_node!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); + + lightning_local_utils::reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + reconnect_b_c_args.send_channel_ready = (true, true); + reconnect_b_c_args.send_announcement_sigs = (true, true); + lightning_local_utils::reconnect_nodes(reconnect_b_c_args); + + // Now release the HTLC from node_b to node_c, to be claimed back to node_a + nodes[1].node.process_pending_htlc_forwards(); + + let pay_secret = PaymentSecret(payment_secret_bytes); + let pay_hash = PaymentHash(payment_hash_bytes); + let pay_preimage = PaymentPreimage(payment_preimage_bytes); + + lightning_local_utils::check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); + let commitment = &forward_event.commitment_msg; + lightning_local_utils::do_commitment_signed_dance( + &nodes[2], &nodes[1], commitment, false, false, + ); + + lightning_local_utils::expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable!(nodes[2], pay_hash, pay_secret, 1_000_000); + lightning_local_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); +} + +#[test] +fn test_0_2_pending_forward_upgrade_fails() { + // Tests that upgrading directly from 0.2 to local lightning fails with DecodeError::InvalidValue + // when there's a pending HTLC forward, because in 0.5 we started requiring new pending_htlc + // fields that started being written in 0.3. + // XXX update this + use lightning::ln::channelmanager::ChannelManagerReadArgs; + use lightning::ln::msgs::DecodeError; + use lightning::util::ser::ReadableArgs; + + let (node_b_ser, mon_b_1_ser, mon_b_2_ser); + + { + let chanmon_cfgs = lightning_0_2_utils::create_chanmon_cfgs(3); + let node_cfgs = lightning_0_2_utils::create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = + lightning_0_2_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = lightning_0_2_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_a = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 10_000_000, 0, + ) + .2; + + let chan_id_b = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 2, 50_000, 0, + ) + .2; + + // Send HTLC from node_a to node_c, hold at node_b (don't call process_pending_htlc_forwards) + let node_a_id = nodes[0].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let (_preimage, hash, secret) = + lightning_0_2_utils::get_payment_preimage_hash(&nodes[2], Some(1_000_000), None); + + let pay_params = router_0_2::PaymentParameters::from_node_id( + node_c_id, + lightning_0_2_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + router_0_2::RouteParameters::from_payment_params_and_value(pay_params, 1_000_000); + let route = lightning_0_2_utils::get_route(&nodes[0], &route_params).unwrap(); + + let onion = RecipientOnionFields_0_2::secret_only(secret); + let id = PaymentId_0_2(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + lightning_0_2_utils::check_added_monitors(&nodes[0], 1); + let send_event = lightning_0_2_utils::SendEvent::from_node(&nodes[0]); + + // Lock in the HTLC on the inbound edge of node_b without initiating the outbound edge. + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + commitment_signed_dance_0_2!(nodes[1], nodes[0], send_event.commitment_msg, false); + + // Process the pending HTLC to create a pending forward (but don't actually forward it) + nodes[1].node.test_process_pending_update_add_htlcs(); + + // Serialize node_b with the pending forward + node_b_ser = nodes[1].node.encode(); + mon_b_1_ser = get_monitor_0_2!(nodes[1], chan_id_a).encode(); + mon_b_2_ser = get_monitor_0_2!(nodes[1], chan_id_b).encode(); + } + + // Try to reload using local lightning - this should fail with DecodeError::InvalidValue + let mut chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(1); + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = lightning_local_utils::create_node_cfgs(1, &chanmon_cfgs); + let node_chanmgrs = lightning_local_utils::create_node_chanmgrs(1, &node_cfgs, &[None]); + let nodes = lightning_local_utils::create_network(1, &node_cfgs, &node_chanmgrs); + + // Read the monitors first + use lightning::util::test_channel_signer::TestChannelSigner; + let mut monitor_read_1 = &mon_b_1_ser[..]; + let (_, monitor_1) = <( + bitcoin::BlockHash, + lightning::chain::channelmonitor::ChannelMonitor, + )>::read( + &mut monitor_read_1, (nodes[0].keys_manager, nodes[0].keys_manager) + ) + .unwrap(); + + let mut monitor_read_2 = &mon_b_2_ser[..]; + let (_, monitor_2) = <( + bitcoin::BlockHash, + lightning::chain::channelmonitor::ChannelMonitor, + )>::read( + &mut monitor_read_2, (nodes[0].keys_manager, nodes[0].keys_manager) + ) + .unwrap(); + + // Try to read the ChannelManager - this should fail + use lightning::util::hash_tables::new_hash_map; + let mut channel_monitors = new_hash_map(); + channel_monitors.insert(monitor_1.channel_id(), &monitor_1); + channel_monitors.insert(monitor_2.channel_id(), &monitor_2); + + let config = lightning_local_utils::test_default_channel_config(); + let mut node_read = &node_b_ser[..]; + let read_args = ChannelManagerReadArgs { + config, + entropy_source: nodes[0].keys_manager, + node_signer: nodes[0].keys_manager, + signer_provider: nodes[0].keys_manager, + fee_estimator: nodes[0].fee_estimator, + router: nodes[0].router, + message_router: nodes[0].message_router, + chain_monitor: nodes[0].chain_monitor, + tx_broadcaster: nodes[0].tx_broadcaster, + logger: nodes[0].logger, + channel_monitors, + }; + + let result = + <(bitcoin::BlockHash, lightning::ln::functional_test_utils::TestChannelManager)>::read( + &mut node_read, + read_args, + ); + + assert!(matches!(result, Err(DecodeError::InvalidValue))); +} + +#[test] +fn test_0_2_to_0_3_to_local_pending_forward_upgrade() { + // Tests that upgrading from 0.2 to 0.3 to local with a pending HTLC forward works. + // The key is that 0.3 can read 0.2's format and re-serialize it in the new format + // that local can then read. + let (node_a_ser_0_3, node_b_ser_0_3, node_c_ser_0_3); + let (mon_a_1_ser_0_3, mon_b_1_ser_0_3, mon_b_2_ser_0_3, mon_c_1_ser_0_3); + let (node_a_id, node_b_id); + let (payment_secret_bytes, payment_hash_bytes, payment_preimage_bytes); + let (chan_id_a_bytes, chan_id_b_bytes); + + // Phase 1: Create network on 0.2 with pending HTLC forward + let (node_a_ser_0_2, node_b_ser_0_2, node_c_ser_0_2); + let (mon_a_1_ser_0_2, mon_b_1_ser_0_2, mon_b_2_ser_0_2, mon_c_1_ser_0_2); + { + let chanmon_cfgs = lightning_0_2_utils::create_chanmon_cfgs(3); + let node_cfgs = lightning_0_2_utils::create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = + lightning_0_2_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = lightning_0_2_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + node_a_id = nodes[0].node.get_our_node_id(); + node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + let chan_id_a = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 10_000_000, 0, + ) + .2; + chan_id_a_bytes = chan_id_a.0; + + let chan_id_b = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 2, 50_000, 0, + ) + .2; + chan_id_b_bytes = chan_id_b.0; + + // Ensure all nodes are at the same initial height. + let node_max_height = nodes.iter().map(|node| node.best_block_info().1).max().unwrap(); + for node in &nodes { + let blocks_to_mine = node_max_height - node.best_block_info().1; + if blocks_to_mine > 0 { + lightning_0_2_utils::connect_blocks(node, blocks_to_mine); + } + } + + // Initiate an HTLC to be sent over node_a -> node_b -> node_c + let (preimage, hash, secret) = + lightning_0_2_utils::get_payment_preimage_hash(&nodes[2], Some(1_000_000), None); + payment_preimage_bytes = preimage.0; + payment_hash_bytes = hash.0; + payment_secret_bytes = secret.0; + + let pay_params = router_0_2::PaymentParameters::from_node_id( + node_c_id, + lightning_0_2_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + router_0_2::RouteParameters::from_payment_params_and_value(pay_params, 1_000_000); + let route = lightning_0_2_utils::get_route(&nodes[0], &route_params).unwrap(); + + let onion = RecipientOnionFields_0_2::secret_only(secret); + let id = PaymentId_0_2(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + lightning_0_2_utils::check_added_monitors(&nodes[0], 1); + let send_event = lightning_0_2_utils::SendEvent::from_node(&nodes[0]); + + // Lock in the HTLC on the inbound edge of node_b without initiating the outbound edge. + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + commitment_signed_dance_0_2!(nodes[1], nodes[0], send_event.commitment_msg, false); + // Process the pending update_add_htlcs to create pending forward state + nodes[1].node.test_process_pending_update_add_htlcs(); + let events = nodes[1].node.get_and_clear_pending_events(); + assert!(events.is_empty()); + + node_a_ser_0_2 = nodes[0].node.encode(); + node_b_ser_0_2 = nodes[1].node.encode(); + node_c_ser_0_2 = nodes[2].node.encode(); + mon_a_1_ser_0_2 = get_monitor_0_2!(nodes[0], chan_id_a).encode(); + mon_b_1_ser_0_2 = get_monitor_0_2!(nodes[1], chan_id_a).encode(); + mon_b_2_ser_0_2 = get_monitor_0_2!(nodes[1], chan_id_b).encode(); + mon_c_1_ser_0_2 = get_monitor_0_2!(nodes[2], chan_id_b).encode(); + } + + // Phase 2: Reload on 0.3, forward the HTLC (but don't claim), and re-serialize + { + let mut chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(3); + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = lightning_0_3_utils::create_node_cfgs(3, &chanmon_cfgs); + let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); + let node_chanmgrs = + lightning_0_3_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (node_a, node_b, node_c); + let mut nodes = lightning_0_3_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + let config = lightning_0_3_utils::test_default_channel_config(); + let a_mons = &[&mon_a_1_ser_0_2[..]]; + reload_node_0_3!( + nodes[0], + config.clone(), + &node_a_ser_0_2, + a_mons, + persister_a, + chain_mon_a, + node_a + ); + let b_mons = &[&mon_b_1_ser_0_2[..], &mon_b_2_ser_0_2[..]]; + reload_node_0_3!( + nodes[1], + config.clone(), + &node_b_ser_0_2, + b_mons, + persister_b, + chain_mon_b, + node_b + ); + let c_mons = &[&mon_c_1_ser_0_2[..]]; + reload_node_0_3!( + nodes[2], + config, + &node_c_ser_0_2, + c_mons, + persister_c, + chain_mon_c, + node_c + ); + + // Reconnect nodes after reload + lightning_0_3_utils::reconnect_nodes(ReconnectArgs_0_3::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs_0_3::new(&nodes[1], &nodes[2]); + reconnect_b_c_args.send_channel_ready = (true, true); + reconnect_b_c_args.send_announcement_sigs = (true, true); + lightning_0_3_utils::reconnect_nodes(reconnect_b_c_args); + + // Forward the HTLC from node_b to node_c (but don't claim yet) + nodes[1].node.process_pending_htlc_forwards(); + + let pay_hash = PaymentHash_0_3(payment_hash_bytes); + let pay_secret = PaymentSecret_0_3(payment_secret_bytes); + + lightning_0_3_utils::check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent_0_3::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); + let commitment = &forward_event.commitment_msg; + lightning_0_3_utils::do_commitment_signed_dance( + &nodes[2], &nodes[1], commitment, false, false, + ); + + lightning_0_3_utils::expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable_0_3!(nodes[2], pay_hash, pay_secret, 1_000_000); + + // Re-serialize in 0.3 format - now the HTLC has been forwarded (not just pending) + node_a_ser_0_3 = nodes[0].node.encode(); + node_b_ser_0_3 = nodes[1].node.encode(); + node_c_ser_0_3 = nodes[2].node.encode(); + + let chan_id_a = ChannelId_0_3(chan_id_a_bytes); + let chan_id_b = ChannelId_0_3(chan_id_b_bytes); + + mon_a_1_ser_0_3 = get_monitor_0_3!(nodes[0], chan_id_a).encode(); + mon_b_1_ser_0_3 = get_monitor_0_3!(nodes[1], chan_id_a).encode(); + mon_b_2_ser_0_3 = get_monitor_0_3!(nodes[1], chan_id_b).encode(); + mon_c_1_ser_0_3 = get_monitor_0_3!(nodes[2], chan_id_b).encode(); + } + + // Phase 3: Reload on local and claim the payment (HTLC already forwarded in Phase 2) + let mut chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(3); + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = lightning_local_utils::create_node_cfgs(3, &chanmon_cfgs); + let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); + let node_chanmgrs = + lightning_local_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (node_a, node_b, node_c); + let mut nodes = lightning_local_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + let config = lightning_local_utils::test_default_channel_config(); + let a_mons = &[&mon_a_1_ser_0_3[..]]; + reload_node!( + nodes[0], + config.clone(), + &node_a_ser_0_3, + a_mons, + persister_a, + chain_mon_a, + node_a + ); + let b_mons = &[&mon_b_1_ser_0_3[..], &mon_b_2_ser_0_3[..]]; + reload_node!( + nodes[1], + config.clone(), + &node_b_ser_0_3, + b_mons, + persister_b, + chain_mon_b, + node_b + ); + let c_mons = &[&mon_c_1_ser_0_3[..]]; + reload_node!(nodes[2], config, &node_c_ser_0_3, c_mons, persister_c, chain_mon_c, node_c); + + lightning_local_utils::reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + lightning_local_utils::reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2])); + + // The HTLC was already forwarded and claimable event generated in Phase 2. + // After reload, just claim the payment - the HTLC is already locked on node C. + let pay_preimage = PaymentPreimage(payment_preimage_bytes); + lightning_local_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); +} + +#[test] +fn test_local_to_0_3_pending_forward_downgrade() { + // Tests downgrading from local lightning to 0.3 with a pending HTLC forward works. + // Phase 1: Create state in local lightning with a pending forward + // Phase 2: Reload with 0.3 and complete the payment + use lightning::ln::types::ChannelId; + + let (node_a_ser, node_b_ser, node_c_ser, mon_a_1_ser, mon_b_1_ser, mon_b_2_ser, mon_c_1_ser); + let (node_a_id, node_b_id); + let (payment_secret_bytes, payment_hash_bytes, payment_preimage_bytes); + let (chan_id_a_bytes, chan_id_b_bytes); + + // Phase 1: Create network on local lightning with pending HTLC forward + { + let chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(3); + let node_cfgs = lightning_local_utils::create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = + lightning_local_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = lightning_local_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + node_a_id = nodes[0].node.get_our_node_id(); + node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + let chan_id_a = lightning_local_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 10_000_000, 0, + ) + .2; + chan_id_a_bytes = chan_id_a.0; + + let chan_id_b = lightning_local_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 2, 50_000, 0, + ) + .2; + chan_id_b_bytes = chan_id_b.0; + + // Ensure all nodes are at the same initial height. + let node_max_height = nodes.iter().map(|node| node.best_block_info().1).max().unwrap(); + for node in &nodes { + let blocks_to_mine = node_max_height - node.best_block_info().1; + if blocks_to_mine > 0 { + lightning_local_utils::connect_blocks(node, blocks_to_mine); + } + } + + // Initiate an HTLC to be sent over node_a -> node_b -> node_c + let (preimage, hash, secret) = + lightning_local_utils::get_payment_preimage_hash(&nodes[2], Some(1_000_000), None); + payment_preimage_bytes = preimage.0; + payment_hash_bytes = hash.0; + payment_secret_bytes = secret.0; + + use lightning::routing::router as local_router; + let pay_params = local_router::PaymentParameters::from_node_id( + node_c_id, + lightning_local_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + local_router::RouteParameters::from_payment_params_and_value(pay_params, 1_000_000); + let route = lightning_local_utils::get_route(&nodes[0], &route_params).unwrap(); + + use lightning::ln::channelmanager::PaymentId as LocalPaymentId; + use lightning::ln::outbound_payment::RecipientOnionFields as LocalRecipientOnionFields; + let onion = LocalRecipientOnionFields::secret_only(secret); + let id = LocalPaymentId(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + lightning_local_utils::check_added_monitors(&nodes[0], 1); + let send_event = SendEvent::from_node(&nodes[0]); + + // Lock in the HTLC on the inbound edge of node_b without initiating the outbound edge. + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + lightning_local_utils::do_commitment_signed_dance( + &nodes[1], + &nodes[0], + &send_event.commitment_msg, + false, + false, + ); + node_a_ser = nodes[0].node.encode(); + node_b_ser = nodes[1].node.encode(); + node_c_ser = nodes[2].node.encode(); + mon_a_1_ser = lightning::get_monitor!(nodes[0], ChannelId(chan_id_a_bytes)).encode(); + mon_b_1_ser = lightning::get_monitor!(nodes[1], ChannelId(chan_id_a_bytes)).encode(); + mon_b_2_ser = lightning::get_monitor!(nodes[1], ChannelId(chan_id_b_bytes)).encode(); + mon_c_1_ser = lightning::get_monitor!(nodes[2], ChannelId(chan_id_b_bytes)).encode(); + } + + // Phase 2: Reload on 0.3 and complete the payment + let mut chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(3); + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = lightning_0_3_utils::create_node_cfgs(3, &chanmon_cfgs); + let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); + let node_chanmgrs = + lightning_0_3_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (node_a, node_b, node_c); + let mut nodes = lightning_0_3_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + let config = lightning_0_3_utils::test_default_channel_config(); + let a_mons = &[&mon_a_1_ser[..]]; + reload_node_0_3!( + nodes[0], + config.clone(), + &node_a_ser, + a_mons, + persister_a, + chain_mon_a, + node_a + ); + let b_mons = &[&mon_b_1_ser[..], &mon_b_2_ser[..]]; + reload_node_0_3!( + nodes[1], + config.clone(), + &node_b_ser, + b_mons, + persister_b, + chain_mon_b, + node_b + ); + let c_mons = &[&mon_c_1_ser[..]]; + reload_node_0_3!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); + + lightning_0_3_utils::reconnect_nodes(ReconnectArgs_0_3::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs_0_3::new(&nodes[1], &nodes[2]); + reconnect_b_c_args.send_channel_ready = (true, true); + reconnect_b_c_args.send_announcement_sigs = (true, true); + lightning_0_3_utils::reconnect_nodes(reconnect_b_c_args); + + // Now release the HTLC from node_b to node_c, to be claimed back to node_a + nodes[1].node.process_pending_htlc_forwards(); + + let pay_secret = PaymentSecret_0_3(payment_secret_bytes); + let pay_hash = PaymentHash_0_3(payment_hash_bytes); + let pay_preimage = PaymentPreimage_0_3(payment_preimage_bytes); + + lightning_0_3_utils::check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent_0_3::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); + let commitment = &forward_event.commitment_msg; + lightning_0_3_utils::do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, false); + + lightning_0_3_utils::expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable_0_3!(nodes[2], pay_hash, pay_secret, 1_000_000); + lightning_0_3_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); +} diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 5a4f569d879..4c4b49827f6 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -16604,14 +16604,15 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures { features } -const SERIALIZATION_VERSION: u8 = 1; +// Bumped in 0.5 because we stop reading/writing legacy ChannelManager pending HTLC maps and +// instead reconstruct them from `Channel{Monitor}` data as part of removing the requirement to +// regularly persist the `ChannelManager`. +const SERIALIZATION_VERSION: u8 = 2; const MIN_SERIALIZATION_VERSION: u8 = 1; -// We plan to start writing this version in 0.5. -// -// LDK 0.5+ will reconstruct the set of pending HTLCs from `Channel{Monitor}` data that started +// LDK 0.5+ reconstructs the set of pending HTLCs from `Channel{Monitor}` data that started // being written in 0.3, ignoring legacy `ChannelManager` HTLC maps on read and not writing them. -// LDK 0.5+ will automatically fail to read if the pending HTLC set cannot be reconstructed, i.e. +// LDK 0.5+ automatically fails to read if the pending HTLC set cannot be reconstructed, i.e. // if we were last written with pending HTLCs on 0.2- or if the new 0.3+ fields are missing. // // If 0.3 or 0.4 reads this manager version, it knows that the legacy maps were not written and @@ -17084,24 +17085,6 @@ impl< } } - { - let forward_htlcs = self.forward_htlcs.lock().unwrap(); - (forward_htlcs.len() as u64).write(writer)?; - for (short_channel_id, pending_forwards) in forward_htlcs.iter() { - short_channel_id.write(writer)?; - (pending_forwards.len() as u64).write(writer)?; - for forward in pending_forwards { - forward.write(writer)?; - } - } - } - - let mut decode_update_add_htlcs_opt = None; - let decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap(); - if !decode_update_add_htlcs.is_empty() { - decode_update_add_htlcs_opt = Some(decode_update_add_htlcs); - } - let claimable_payments = self.claimable_payments.lock().unwrap(); let pending_outbound_payments = self.pending_outbound_payments.pending_outbound_payments.lock().unwrap(); @@ -17148,8 +17131,6 @@ impl< } } - let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); - // Since some FundingNegotiation variants are not persisted, any splice in such state must // be failed upon reload. However, as the necessary information for the SpliceFailed event // is not persisted, the event itself needs to be persisted even though it hasn't been @@ -17245,11 +17226,6 @@ impl< } } - let mut pending_intercepted_htlcs = None; - if our_pending_intercepts.len() != 0 { - pending_intercepted_htlcs = Some(our_pending_intercepts); - } - let mut pending_claiming_payments = Some(&claimable_payments.pending_claiming_payments); if pending_claiming_payments.as_ref().unwrap().is_empty() { // LDK versions prior to 0.0.113 do not know how to read the pending claimed payments @@ -17272,7 +17248,7 @@ impl< write_tlv_fields!(writer, { (1, pending_outbound_payments_no_retry, required), - (2, pending_intercepted_htlcs, option), + // 2 was previously used for the pending_intercepted_htlcs map. (3, pending_outbound_payments, required), (4, pending_claiming_payments, option), (5, self.our_network_pubkey, required), @@ -17283,7 +17259,7 @@ impl< (10, legacy_in_flight_monitor_updates, option), (11, self.probing_cookie_secret, required), (13, htlc_onion_fields, optional_vec), - (14, decode_update_add_htlcs_opt, option), + // 14 was previously used for the decode_update_add_htlcs map. (15, self.inbound_payment_id_secret, required), (17, in_flight_monitor_updates, option), (19, peer_storage_dir, optional_vec), @@ -17363,12 +17339,6 @@ pub(super) struct ChannelManagerData { in_flight_monitor_updates: HashMap<(PublicKey, ChannelId), Vec>, peer_storage_dir: Vec<(PublicKey, Vec)>, async_receive_offer_cache: AsyncReceiveOfferCache, - // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of - // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from - // `Channel{Monitor}` data. - forward_htlcs_legacy: HashMap>, - pending_intercepted_htlcs_legacy: HashMap, - decode_update_add_htlcs_legacy: HashMap>, // The `ChannelManager` version that was written. version: u8, } @@ -17416,26 +17386,24 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> channels.push(channel); } - let forward_htlcs_legacy: HashMap> = - if version < RECONSTRUCT_HTLCS_FROM_CHANS_VERSION { - let forward_htlcs_count: u64 = Readable::read(reader)?; - let mut fwds = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); - for _ in 0..forward_htlcs_count { - let short_channel_id = Readable::read(reader)?; - let pending_forwards_count: u64 = Readable::read(reader)?; - let mut pending_forwards = Vec::with_capacity(cmp::min( - pending_forwards_count as usize, - MAX_ALLOC_SIZE / mem::size_of::(), - )); - for _ in 0..pending_forwards_count { - pending_forwards.push(Readable::read(reader)?); - } - fwds.insert(short_channel_id, pending_forwards); + if version < RECONSTRUCT_HTLCS_FROM_CHANS_VERSION { + let forward_htlcs_count: u64 = Readable::read(reader)?; + // This map is written if version = 1 (LDK versions 0.4-) only. + let mut _forward_htlcs_legacy: HashMap> = + hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); + for _ in 0..forward_htlcs_count { + let short_channel_id = Readable::read(reader)?; + let pending_forwards_count: u64 = Readable::read(reader)?; + let mut pending_forwards = Vec::with_capacity(cmp::min( + pending_forwards_count as usize, + MAX_ALLOC_SIZE / mem::size_of::(), + )); + for _ in 0..pending_forwards_count { + pending_forwards.push(Readable::read(reader)?); } - fwds - } else { - new_hash_map() - }; + _forward_htlcs_legacy.insert(short_channel_id, pending_forwards); + } + } let claimable_htlcs_count: u64 = Readable::read(reader)?; let mut claimable_htlcs_list = @@ -17521,9 +17489,13 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> }; } - let mut pending_intercepted_htlcs_legacy: Option> = - None; - let mut decode_update_add_htlcs_legacy: Option>> = + // Read for compatibility with 0.4- but no longer used in 0.5+, instead these maps are + // reconstructed during runtime from decode_update_add_htlcs, via the next call to + // process_pending_htlc_forwards. + let mut _pending_intercepted_htlcs_legacy: Option< + HashMap, + > = None; + let mut _decode_update_add_htlcs_legacy: Option>> = None; // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. let mut pending_outbound_payments_no_retry: Option>> = @@ -17551,7 +17523,7 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> let mut async_receive_offer_cache: AsyncReceiveOfferCache = AsyncReceiveOfferCache::new(); read_tlv_fields!(reader, { (1, pending_outbound_payments_no_retry, option), - (2, pending_intercepted_htlcs_legacy, option), + (2, _pending_intercepted_htlcs_legacy, option), (3, pending_outbound_payments, option), (4, pending_claiming_payments, option), (5, received_network_pubkey, option), @@ -17562,7 +17534,7 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> (10, legacy_in_flight_monitor_updates, option), (11, probing_cookie_secret, option), (13, claimable_htlc_onion_fields, optional_vec), - (14, decode_update_add_htlcs_legacy, option), + (14, _decode_update_add_htlcs_legacy, option), (15, inbound_payment_id_secret, option), (17, in_flight_monitor_updates, option), (19, peer_storage_dir, optional_vec), @@ -17695,13 +17667,10 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> best_block_height, best_block_hash, channels, - forward_htlcs_legacy, claimable_payments, peer_init_features, pending_events_read, highest_seen_timestamp, - pending_intercepted_htlcs_legacy: pending_intercepted_htlcs_legacy - .unwrap_or_else(new_hash_map), pending_outbound_payments, pending_claiming_payments: pending_claiming_payments.unwrap_or_else(new_hash_map), received_network_pubkey, @@ -17709,8 +17678,6 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> .unwrap_or_else(Vec::new), fake_scid_rand_bytes, probing_cookie_secret, - decode_update_add_htlcs_legacy: decode_update_add_htlcs_legacy - .unwrap_or_else(new_hash_map), inbound_payment_id_secret, in_flight_monitor_updates: in_flight_monitor_updates.unwrap_or_default(), peer_storage_dir: peer_storage_dir.unwrap_or_default(), @@ -18001,19 +17968,16 @@ impl< best_block_height, best_block_hash, channels, - mut forward_htlcs_legacy, claimable_payments, peer_init_features, mut pending_events_read, highest_seen_timestamp, - mut pending_intercepted_htlcs_legacy, pending_outbound_payments, pending_claiming_payments, received_network_pubkey, monitor_update_blocked_actions_per_peer, mut fake_scid_rand_bytes, mut probing_cookie_secret, - mut decode_update_add_htlcs_legacy, mut inbound_payment_id_secret, mut in_flight_monitor_updates, peer_storage_dir, @@ -18564,40 +18528,6 @@ impl< pending_background_events.push(new_event); } - // In LDK 0.2 and below, the `ChannelManager` would track all payments and HTLCs internally and - // persist that state, relying on it being up-to-date on restart. Newer versions are moving - // towards reducing this reliance on regular persistence of the `ChannelManager`, and instead - // reconstruct HTLC/payment state based on `Channel{Monitor}` data if - // `reconstruct_manager_from_monitors` is set below. Currently we set in tests randomly to - // ensure the legacy codepaths also have test coverage. - #[cfg(not(test))] - let reconstruct_manager_from_monitors = _version >= RECONSTRUCT_HTLCS_FROM_CHANS_VERSION; - #[cfg(test)] - let reconstruct_manager_from_monitors = - args.reconstruct_manager_from_monitors.unwrap_or_else(|| { - use core::hash::{BuildHasher, Hasher}; - - match std::env::var("LDK_TEST_REBUILD_MGR_FROM_MONITORS") { - Ok(val) => match val.as_str() { - "1" => true, - "0" => false, - _ => panic!( - "LDK_TEST_REBUILD_MGR_FROM_MONITORS must be 0 or 1, got: {}", - val - ), - }, - Err(_) => { - let rand_val = - std::collections::hash_map::RandomState::new().build_hasher().finish(); - if rand_val % 2 == 0 { - true - } else { - false - } - }, - } - }); - // If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we // should ensure we try them again on the inbound edge. We put them here and do so after we // have a fully-constructed `ChannelManager` at the end. @@ -18646,31 +18576,29 @@ impl< let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); - if reconstruct_manager_from_monitors { - if let Some(chan) = peer_state.channel_by_id.get(channel_id) { - if let Some(funded_chan) = chan.as_funded() { - // Legacy HTLCs are from pre-LDK 0.3 and cannot be reconstructed. - if funded_chan.has_legacy_inbound_htlcs() { - return Err(DecodeError::InvalidValue); - } - // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized - // `Channel` as part of removing the requirement to regularly persist the - // `ChannelManager`. - let scid_alias = funded_chan.context.outbound_scid_alias(); - for update_add_htlc in funded_chan.inbound_htlcs_pending_decode() { - decode_update_add_htlcs - .entry(scid_alias) - .or_insert_with(Vec::new) - .push(update_add_htlc); - } - for (payment_hash, prev_hop, next_hop) in - funded_chan.inbound_forwarded_htlcs() - { - already_forwarded_htlcs - .entry((prev_hop.channel_id, payment_hash)) - .or_insert_with(Vec::new) - .push((prev_hop, next_hop)); - } + if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + if let Some(funded_chan) = chan.as_funded() { + // Legacy HTLCs are from pre-LDK 0.3 and cannot be reconstructed. + if funded_chan.has_legacy_inbound_htlcs() { + return Err(DecodeError::InvalidValue); + } + // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized + // `Channel` as part of removing the requirement to regularly persist the + // `ChannelManager`. + let scid_alias = funded_chan.context.outbound_scid_alias(); + for update_add_htlc in funded_chan.inbound_htlcs_pending_decode() { + decode_update_add_htlcs + .entry(scid_alias) + .or_insert_with(Vec::new) + .push(update_add_htlc); + } + for (payment_hash, prev_hop, next_hop) in + funded_chan.inbound_forwarded_htlcs() + { + already_forwarded_htlcs + .entry((prev_hop.channel_id, payment_hash)) + .or_insert_with(Vec::new) + .push((prev_hop, next_hop)); } } } @@ -18716,22 +18644,19 @@ impl< is_channel_closed = false; user_channel_id_opt = Some(chan.context().get_user_id()); - if reconstruct_manager_from_monitors { - if let Some(funded_chan) = chan.as_funded() { - for (payment_hash, prev_hop) in funded_chan.outbound_htlc_forwards() - { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - &prev_hop, - "HTLC already forwarded to the outbound edge", - &args.logger, - ); - prune_forwarded_htlc( - &mut already_forwarded_htlcs, - &prev_hop, - &payment_hash, - ); - } + if let Some(funded_chan) = chan.as_funded() { + for (payment_hash, prev_hop) in funded_chan.outbound_htlc_forwards() { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop, + "HTLC already forwarded to the outbound edge", + &args.logger, + ); + prune_forwarded_htlc( + &mut already_forwarded_htlcs, + &prev_hop, + &payment_hash, + ); } } } @@ -18749,65 +18674,20 @@ impl< let htlc_id = SentHTLCId::from_source(&htlc_source); match htlc_source { HTLCSource::PreviousHopData(prev_hop_data) => { - let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { - info.prev_funding_outpoint == prev_hop_data.outpoint - && info.prev_htlc_id == prev_hop_data.htlc_id - }; - - // If `reconstruct_manager_from_monitors` is set, we always add all inbound committed - // HTLCs to `decode_update_add_htlcs` in the above loop, but we need to prune from - // those added HTLCs if they were already forwarded to the outbound edge. Otherwise, - // we'll double-forward. - if reconstruct_manager_from_monitors { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - &prev_hop_data, - "HTLC already forwarded to the outbound edge", - &&logger, - ); - prune_forwarded_htlc( - &mut already_forwarded_htlcs, - &prev_hop_data, - &htlc.payment_hash, - ); - } - - // The ChannelMonitor is now responsible for this HTLC's - // failure/success and will let us know what its outcome is. If we - // still have an entry for this HTLC in `forward_htlcs_legacy`, - // `pending_intercepted_htlcs_legacy`, or - // `decode_update_add_htlcs_legacy`, we were apparently not persisted - // after the monitor was when forwarding the payment. + // We always add all inbound committed HTLCs to `decode_update_add_htlcs` in the + // above loop, but we need to prune from those added HTLCs if they were already + // forwarded to the outbound edge. Otherwise, we'll double-forward. dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs_legacy, + &mut decode_update_add_htlcs, &prev_hop_data, - "HTLC was forwarded to the closed channel", + "HTLC already forwarded to the outbound edge", &&logger, ); - forward_htlcs_legacy.retain(|_, forwards| { - forwards.retain(|forward| { - if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - false - } else { true } - } else { true } - }); - !forwards.is_empty() - }); - pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - pending_events_read.retain(|(event, _)| { - if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { - intercepted_id != ev_id - } else { true } - }); - false - } else { true } - }); + prune_forwarded_htlc( + &mut already_forwarded_htlcs, + &prev_hop_data, + &htlc.payment_hash, + ); }, HTLCSource::OutboundRoute { payment_id, @@ -19214,55 +19094,41 @@ impl< } } - if reconstruct_manager_from_monitors { - // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`. - // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs. - for (src, payment_hash, _, _, _, _) in failed_htlcs.iter() { - if let HTLCSource::PreviousHopData(prev_hop_data) = src { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - prev_hop_data, - "HTLC was failed backwards during manager read", - &args.logger, - ); - prune_forwarded_htlc(&mut already_forwarded_htlcs, prev_hop_data, payment_hash); - } + // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`. + // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs. + for (src, payment_hash, _, _, _, _) in failed_htlcs.iter() { + if let HTLCSource::PreviousHopData(prev_hop_data) = src { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was failed backwards during manager read", + &args.logger, + ); + prune_forwarded_htlc(&mut already_forwarded_htlcs, prev_hop_data, payment_hash); } + } - // See above comment on `failed_htlcs`. - for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { - for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - prev_hop_data, - "HTLC was already decoded and marked as a claimable payment", - &args.logger, - ); - } + // See above comment on `failed_htlcs`. + for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { + for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was already decoded and marked as a claimable payment", + &args.logger, + ); } } - let (decode_update_add_htlcs, forward_htlcs, pending_intercepted_htlcs) = - if reconstruct_manager_from_monitors { - (decode_update_add_htlcs, new_hash_map(), new_hash_map()) - } else { - ( - decode_update_add_htlcs_legacy, - forward_htlcs_legacy, - pending_intercepted_htlcs_legacy, - ) - }; - - // If we have a pending intercept HTLC present but no corresponding event, add that now rather - // than relying on the user having persisted the event prior to shutdown. - for (id, fwd) in pending_intercepted_htlcs.iter() { - if !pending_events_read.iter().any( - |(ev, _)| matches!(ev, Event::HTLCIntercepted { intercept_id, .. } if intercept_id == id), - ) { - match create_htlc_intercepted_event(*id, fwd) { - Ok(ev) => pending_events_read.push_back((ev, None)), - Err(()) => debug_assert!(false), - } + // See above comment on `failed_htlcs`. + for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { + for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was already decoded and marked as a claimable payment", + &args.logger, + ); } } @@ -19318,9 +19184,9 @@ impl< inbound_payment_key: expanded_inbound_key, pending_outbound_payments: pending_outbounds, - pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs), + pending_intercepted_htlcs: Mutex::new(new_hash_map()), - forward_htlcs: Mutex::new(forward_htlcs), + forward_htlcs: Mutex::new(new_hash_map()), decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, diff --git a/pending_changelog/upgrade-0.2-pending-htlc.txt b/pending_changelog/upgrade-0.2-pending-htlc.txt new file mode 100644 index 00000000000..6ca11755ee9 --- /dev/null +++ b/pending_changelog/upgrade-0.2-pending-htlc.txt @@ -0,0 +1,9 @@ +Backwards Compatibility +======================= + + * Upgrading directly from 0.2 to 0.5 with pending HTLC forwards is not supported. + If you have pending HTLC forwards when upgrading from 0.2, you must first upgrade + to 0.3, forward the HTLCs (completing at least the outbound commitment), then + upgrade to 0.5. Attempting to upgrade directly from 0.2 to 0.5 with pending HTLC + forwards will result in a `DecodeError::InvalidValue` when reading the + `ChannelManager`. From f2c8f0e1aaa70990edb062d64b3e7bafc1af52ab Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 3 Feb 2026 17:08:15 -0500 Subject: [PATCH 13/21] Remove now-unused reconstruct_manager test arg See previous commit, but now in prod reconstruct_manager_from_monitors will always be set, so there's no need to have an option to set it in tests. --- lightning/src/ln/channelmanager.rs | 11 -------- lightning/src/ln/functional_test_utils.rs | 32 +++-------------------- lightning/src/ln/reload_tests.rs | 8 ++---- 3 files changed, 6 insertions(+), 45 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 4c4b49827f6..b2a8b5d3aa8 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17794,15 +17794,6 @@ pub struct ChannelManagerReadArgs< /// /// This is not exported to bindings users because we have no HashMap bindings pub channel_monitors: HashMap>, - - /// Whether the `ChannelManager` should attempt to reconstruct its set of pending HTLCs from - /// `Channel{Monitor}` data rather than its own persisted maps, which is planned to become - /// the default behavior in upcoming versions. - /// - /// If `None`, whether we reconstruct or use the legacy maps will be decided randomly during - /// `ChannelManager::from_channel_manager_data`. - #[cfg(test)] - pub reconstruct_manager_from_monitors: Option, } impl< @@ -17840,8 +17831,6 @@ impl< channel_monitors: hash_map_from_iter( channel_monitors.drain(..).map(|monitor| (monitor.channel_id(), monitor)), ), - #[cfg(test)] - reconstruct_manager_from_monitors: None, } } } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index d3902b26201..8fbcfcd12e6 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -911,8 +911,6 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { tx_broadcaster: &broadcaster, logger: &self.logger, channel_monitors, - #[cfg(test)] - reconstruct_manager_from_monitors: None, }, ) .unwrap(); @@ -1311,7 +1309,7 @@ fn check_claimed_htlcs_match_route<'a, 'b, 'c>( pub fn _reload_node<'a, 'b, 'c>( node: &'a Node<'a, 'b, 'c>, config: UserConfig, chanman_encoded: &[u8], - monitors_encoded: &[&[u8]], _reconstruct_manager_from_monitors: Option, + monitors_encoded: &[&[u8]], ) -> TestChannelManager<'b, 'c> { let mut monitors_read = Vec::with_capacity(monitors_encoded.len()); for encoded in monitors_encoded { @@ -1345,8 +1343,6 @@ pub fn _reload_node<'a, 'b, 'c>( tx_broadcaster: node.tx_broadcaster, logger: node.logger, channel_monitors, - #[cfg(test)] - reconstruct_manager_from_monitors: _reconstruct_manager_from_monitors, }, ) .unwrap() @@ -1368,7 +1364,7 @@ pub fn _reload_node<'a, 'b, 'c>( #[macro_export] macro_rules! _reload_node_inner { ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: - ident, $new_chain_monitor: ident, $new_channelmanager: ident, $reconstruct_pending_htlcs: expr + ident, $new_chain_monitor: ident, $new_channelmanager: ident ) => { let chanman_encoded = $chanman_encoded; @@ -1388,7 +1384,6 @@ macro_rules! _reload_node_inner { $new_config, &chanman_encoded, $monitors_encoded, - $reconstruct_pending_htlcs, ); $node.node = &$new_channelmanager; $node.onion_messenger.set_offers_handler(&$new_channelmanager); @@ -1408,8 +1403,7 @@ macro_rules! reload_node { $monitors_encoded, $persister, $new_chain_monitor, - $new_channelmanager, - None + $new_channelmanager ); }; // Reload the node with the new provided config @@ -1421,25 +1415,7 @@ macro_rules! reload_node { $monitors_encoded, $persister, $new_chain_monitor, - $new_channelmanager, - None - ); - }; - // Reload the node and have the `ChannelManager` use new codepaths that reconstruct its set of - // pending HTLCs from `Channel{Monitor}` data. - ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: - ident, $new_chain_monitor: ident, $new_channelmanager: ident, $reconstruct_pending_htlcs: expr - ) => { - let config = $node.node.get_current_config(); - $crate::_reload_node_inner!( - $node, - config, - $chanman_encoded, - $monitors_encoded, - $persister, - $new_chain_monitor, - $new_channelmanager, - $reconstruct_pending_htlcs + $new_channelmanager ); }; } diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index d1e34cb7c71..89827c156ca 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -439,7 +439,6 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { tx_broadcaster: nodes[0].tx_broadcaster, logger: &logger, channel_monitors: node_0_stale_monitors.iter().map(|monitor| { (monitor.channel_id(), monitor) }).collect(), - reconstruct_manager_from_monitors: None, }) { } else { panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return"); }; @@ -458,7 +457,6 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { tx_broadcaster: nodes[0].tx_broadcaster, logger: &logger, channel_monitors: node_0_monitors.iter().map(|monitor| { (monitor.channel_id(), monitor) }).collect(), - reconstruct_manager_from_monitors: None, }).unwrap(); nodes_0_deserialized = nodes_0_deserialized_tmp; assert!(nodes_0_read.is_empty()); @@ -1953,8 +1951,7 @@ fn test_reload_node_with_preimage_in_monitor_claims_htlc() { &[&mon_0_1_serialized, &mon_1_2_serialized], persister, new_chain_monitor, - nodes_1_deserialized, - Some(true) + nodes_1_deserialized ); // When the claim is reconstructed during reload, a PaymentForwarded event is generated. @@ -2056,8 +2053,7 @@ fn test_reload_node_without_preimage_fails_htlc() { &[&mon_0_1_serialized, &mon_1_2_serialized], persister, new_chain_monitor, - nodes_1_deserialized, - Some(true) + nodes_1_deserialized ); // After reload, nodes[1] should have generated an HTLCHandlingFailed event. From c5395b88ad8d88a53bca1ac0085239c42617a680 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 6 Jan 2026 13:29:12 -0500 Subject: [PATCH 14/21] Pass logger into FundedChannel::read Used in the next commit when we log on some read errors. --- lightning/src/ln/channel.rs | 16 +++++++++------- lightning/src/ln/channelmanager.rs | 1 + 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 4a0d1175b8e..bdd6edd2273 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -15198,13 +15198,13 @@ impl Writeable for FundedChannel { } } -impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> - ReadableArgs<(&'a ES, &'b SP, &'c ChannelTypeFeatures)> for FundedChannel +impl<'a, 'b, 'c, 'd, ES: EntropySource, SP: SignerProvider, L: Logger> + ReadableArgs<(&'a ES, &'b SP, &'c L, &'d ChannelTypeFeatures)> for FundedChannel { fn read( - reader: &mut R, args: (&'a ES, &'b SP, &'c ChannelTypeFeatures), + reader: &mut R, args: (&'a ES, &'b SP, &'c L, &'d ChannelTypeFeatures), ) -> Result { - let (entropy_source, signer_provider, our_supported_features) = args; + let (entropy_source, signer_provider, logger, our_supported_features) = args; let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); if ver <= 2 { return Err(DecodeError::UnknownVersion); @@ -16954,9 +16954,11 @@ mod tests { let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64); let features = channelmanager::provided_channel_type_features(&config); - let decoded_chan = - FundedChannel::read(&mut reader, (&&keys_provider, &&keys_provider, &features)) - .unwrap(); + let decoded_chan = FundedChannel::read( + &mut reader, + (&&keys_provider, &&keys_provider, &&logger, &features), + ) + .unwrap(); assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs); assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates); } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b2a8b5d3aa8..873d75296f5 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17380,6 +17380,7 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> ( args.entropy_source, args.signer_provider, + args.logger, &provided_channel_type_features(&args.config), ), )?; From 4c3e14ba9012bd6e6c760fdd723fdc184cd67104 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 6 Jan 2026 13:47:27 -0500 Subject: [PATCH 15/21] Remove deprecated InboundHTLCResolution::Resolved variant In 0.3, we added a new field to Channel as part of adding support for reconstructing the ChannelManager's pending forward HTLC set from Channel{Monitor} data, moving towards removing the requirement of regularly persisting the ChannelManager. This new field cannot be set for HTLCs received pre-0.1 that are using this legacy ::Resolved variant. In this version, we fully rely on the new Channel field being set and cease handling legacy HTLCs entirely, and thus must fully deprecate the ::Resolved variant and require all inbound HTLCs be in state InboundHTLCResolution::Pending, which we do here. Further cleanups are coming in upcoming commits. --- lightning/src/ln/channel.rs | 96 ++++++++------------------------ lightning/src/ln/reload_tests.rs | 3 +- 2 files changed, 23 insertions(+), 76 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index bdd6edd2273..3e85628e7c7 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -50,10 +50,9 @@ use crate::ln::channel_state::{ OutboundHTLCDetails, OutboundHTLCStateDetails, }; use crate::ln::channelmanager::{ - self, BlindedFailure, ChannelReadyOrder, FundingConfirmedMessage, HTLCFailureMsg, - HTLCPreviousHopData, HTLCSource, OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, - PendingHTLCStatus, RAACommitmentOrder, SentHTLCId, BREAKDOWN_TIMEOUT, - MAX_LOCAL_BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, + self, BlindedFailure, ChannelReadyOrder, FundingConfirmedMessage, HTLCPreviousHopData, + HTLCSource, OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, RAACommitmentOrder, + SentHTLCId, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, }; use crate::ln::funding::{FundingTxInput, SpliceContribution}; use crate::ln::interactivetxs::{ @@ -149,12 +148,6 @@ enum InboundHTLCRemovalReason { #[cfg_attr(test, derive(Debug))] #[derive(Clone)] enum InboundHTLCResolution { - /// Resolved implies the action we must take with the inbound HTLC has already been determined, - /// i.e., we already know whether it must be failed back or forwarded. - // - // TODO: Once this variant is removed, we should also clean up - // [`MonitorRestoreUpdates::accepted_htlcs`] as the path will be unreachable. - Resolved { pending_htlc_status: PendingHTLCStatus }, /// Pending implies we will attempt to resolve the inbound HTLC once it has been fully committed /// to by both sides of the channel, i.e., once a `revoke_and_ack` has been processed by both /// nodes for the state update in which it was proposed. @@ -162,9 +155,7 @@ enum InboundHTLCResolution { } impl_writeable_tlv_based_enum!(InboundHTLCResolution, - (0, Resolved) => { - (0, pending_htlc_status, required), - }, + // 0 used to be used for InboundHTLCResolution::Resolved in 0.4 and below. (2, Pending) => { (0, update_add_htlc, required), }, @@ -301,7 +292,6 @@ impl InboundHTLCState { InboundHTLCResolution::Pending { update_add_htlc } => { update_add_htlc.hold_htlc.is_some() }, - InboundHTLCResolution::Resolved { .. } => false, }, InboundHTLCState::Committed { .. } | InboundHTLCState::LocalRemoved(_) => false, } @@ -8958,12 +8948,9 @@ where } log_trace!(logger, "Updating HTLCs on receipt of RAA..."); - let mut to_forward_infos = Vec::new(); let mut pending_update_adds = Vec::new(); let mut revoked_htlcs = Vec::new(); let mut finalized_claimed_htlcs = Vec::new(); - let mut update_fail_htlcs = Vec::new(); - let mut update_fail_malformed_htlcs = Vec::new(); let mut static_invoices = Vec::new(); let mut require_commitment = false; let mut value_to_self_msat_diff: i64 = 0; @@ -9039,42 +9026,6 @@ where state { match resolution { - InboundHTLCResolution::Resolved { pending_htlc_status } => { - match pending_htlc_status { - PendingHTLCStatus::Fail(fail_msg) => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash); - require_commitment = true; - match fail_msg { - HTLCFailureMsg::Relay(msg) => { - htlc.state = InboundHTLCState::LocalRemoved( - InboundHTLCRemovalReason::FailRelay( - msg.clone().into(), - ), - ); - update_fail_htlcs.push(msg) - }, - HTLCFailureMsg::Malformed(msg) => { - htlc.state = InboundHTLCState::LocalRemoved( - InboundHTLCRemovalReason::FailMalformed { - sha256_of_onion: msg.sha256_of_onion, - failure_code: msg.failure_code, - }, - ); - update_fail_malformed_htlcs.push(msg) - }, - } - }, - PendingHTLCStatus::Forward(forward_info) => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed, attempting to forward", &htlc.payment_hash); - to_forward_infos.push((forward_info, htlc.htlc_id)); - htlc.state = InboundHTLCState::Committed { - // HTLCs will only be in state `InboundHTLCResolution::Resolved` if they were - // received on LDK 0.1-. - update_add_htlc: InboundUpdateAdd::Legacy, - }; - }, - } - }, InboundHTLCResolution::Pending { update_add_htlc } => { log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash); pending_update_adds.push(update_add_htlc.clone()); @@ -9200,7 +9151,7 @@ where false, true, false, - to_forward_infos, + Vec::new(), revoked_htlcs, finalized_claimed_htlcs, logger, @@ -9227,9 +9178,11 @@ where release_state_str ); if self.context.channel_state.can_generate_new_commitment() { - log_debug!(logger, "Responding with a commitment update with {} HTLCs failed for channel {}", - update_fail_htlcs.len() + update_fail_malformed_htlcs.len(), - &self.context.channel_id); + log_debug!( + logger, + "Responding with a commitment update for channel {}", + &self.context.channel_id + ); } else { let reason = if self.context.channel_state.is_local_stfu_sent() { "exits quiescence" @@ -9245,7 +9198,7 @@ where false, true, false, - to_forward_infos, + Vec::new(), revoked_htlcs, finalized_claimed_htlcs, logger, @@ -9259,7 +9212,7 @@ where false, false, false, - to_forward_infos, + Vec::new(), revoked_htlcs, finalized_claimed_htlcs, logger, @@ -15239,8 +15192,9 @@ impl<'a, 'b, 'c, 'd, ES: EntropySource, SP: SignerProvider, L: Logger> let counterparty_next_commitment_transaction_number = Readable::read(reader)?; let value_to_self_msat = Readable::read(reader)?; - let pending_inbound_htlc_count: u64 = Readable::read(reader)?; + let logger = WithContext::from(logger, None, Some(channel_id), None); + let pending_inbound_htlc_count: u64 = Readable::read(reader)?; let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min( pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize, @@ -15253,23 +15207,17 @@ impl<'a, 'b, 'c, 'd, ES: EntropySource, SP: SignerProvider, L: Logger> payment_hash: Readable::read(reader)?, state: match ::read(reader)? { 1 => { - let resolution = if ver <= 3 { - InboundHTLCResolution::Resolved { - pending_htlc_status: Readable::read(reader)?, - } - } else { - Readable::read(reader)? - }; + let resolution = Readable::read(reader).map_err(|e| { + log_error!(logger, "Found deprecated HTLC received on LDK 0.1 or earlier. HTLC must be resolved before upgrading to LDK 0.5+, see CHANGELOG.md"); + e + })?; InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) }, 2 => { - let resolution = if ver <= 3 { - InboundHTLCResolution::Resolved { - pending_htlc_status: Readable::read(reader)?, - } - } else { - Readable::read(reader)? - }; + let resolution = Readable::read(reader).map_err(|e| { + log_error!(logger, "Found deprecated HTLC received on LDK 0.1 or earlier. HTLC must be resolved before upgrading to LDK 0.5+, see CHANGELOG.md"); + e + })?; InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) }, 3 => InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy }, diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 89827c156ca..572f5120366 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -2200,8 +2200,7 @@ fn test_reload_with_mpp_claims_on_same_channel() { &[&mon_0_1_serialized, &mon_1_2_a_serialized, &mon_1_2_b_serialized], persister, new_chain_monitor, - nodes_1_deserialized, - Some(true) + nodes_1_deserialized ); // When the claims are reconstructed during reload, PaymentForwarded events are regenerated. From c700ac94db7d6a697f906e032b119cced3884ace Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 6 Jan 2026 16:17:12 -0500 Subject: [PATCH 16/21] Simplify InboundHTLCStates' htlc resolution type Previously, several variants of InboundHTLCState contained an InboundHTLCResolution enum. Now that the resolution enum only has one variant, we can get rid of it and simplify the parent InboundHTLCState type. --- lightning/src/ln/channel.rs | 137 ++++++++++++++++++++---------------- 1 file changed, 75 insertions(+), 62 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 3e85628e7c7..42734eb13cc 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -144,28 +144,11 @@ enum InboundHTLCRemovalReason { Fulfill { preimage: PaymentPreimage, attribution_data: Option }, } -/// Represents the resolution status of an inbound HTLC. -#[cfg_attr(test, derive(Debug))] -#[derive(Clone)] -enum InboundHTLCResolution { - /// Pending implies we will attempt to resolve the inbound HTLC once it has been fully committed - /// to by both sides of the channel, i.e., once a `revoke_and_ack` has been processed by both - /// nodes for the state update in which it was proposed. - Pending { update_add_htlc: msgs::UpdateAddHTLC }, -} - -impl_writeable_tlv_based_enum!(InboundHTLCResolution, - // 0 used to be used for InboundHTLCResolution::Resolved in 0.4 and below. - (2, Pending) => { - (0, update_add_htlc, required), - }, -); - #[cfg_attr(test, derive(Debug))] enum InboundHTLCState { /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an /// update_add_htlc message for this HTLC. - RemoteAnnounced(InboundHTLCResolution), + RemoteAnnounced(msgs::UpdateAddHTLC), /// Included in a received commitment_signed message (implying we've /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous /// state (see the example below). We have not yet included this HTLC in a @@ -195,13 +178,13 @@ enum InboundHTLCState { /// Implies AwaitingRemoteRevoke. /// /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md - AwaitingRemoteRevokeToAnnounce(InboundHTLCResolution), + AwaitingRemoteRevokeToAnnounce(msgs::UpdateAddHTLC), /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it). /// We have also included this HTLC in our latest commitment_signed and are now just waiting /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the /// channel (before it can then get forwarded and/or removed). /// Implies AwaitingRemoteRevoke. - AwaitingAnnouncedRemoteRevoke(InboundHTLCResolution), + AwaitingAnnouncedRemoteRevoke(msgs::UpdateAddHTLC), /// An HTLC irrevocably committed in the latest commitment transaction, ready to be forwarded or /// removed. Committed { @@ -286,12 +269,10 @@ impl InboundHTLCState { /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc fn should_hold_htlc(&self) -> bool { match self { - InboundHTLCState::RemoteAnnounced(res) - | InboundHTLCState::AwaitingRemoteRevokeToAnnounce(res) - | InboundHTLCState::AwaitingAnnouncedRemoteRevoke(res) => match res { - InboundHTLCResolution::Pending { update_add_htlc } => { - update_add_htlc.hold_htlc.is_some() - }, + InboundHTLCState::RemoteAnnounced(update_add_htlc) + | InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add_htlc) + | InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add_htlc) => { + update_add_htlc.hold_htlc.is_some() }, InboundHTLCState::Committed { .. } | InboundHTLCState::LocalRemoved(_) => false, } @@ -7897,9 +7878,7 @@ where amount_msat: msg.amount_msat, payment_hash: msg.payment_hash, cltv_expiry: msg.cltv_expiry, - state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Pending { - update_add_htlc: msg.clone(), - }), + state: InboundHTLCState::RemoteAnnounced(msg.clone()) }); Ok(()) } @@ -8503,11 +8482,10 @@ where } for htlc in self.context.pending_inbound_htlcs.iter_mut() { - if let &InboundHTLCState::RemoteAnnounced(ref htlc_resolution) = &htlc.state { + if let &InboundHTLCState::RemoteAnnounced(ref update_add) = &htlc.state { log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.", &htlc.payment_hash, &self.context.channel_id); - htlc.state = - InboundHTLCState::AwaitingRemoteRevokeToAnnounce(htlc_resolution.clone()); + htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add.clone()); need_commitment = true; } } @@ -9018,24 +8996,22 @@ where InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy }; mem::swap(&mut state, &mut htlc.state); - if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) = state { + if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add) = state { log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash); - htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution); + htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add); require_commitment = true; - } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) = + } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add_htlc) = state { - match resolution { - InboundHTLCResolution::Pending { update_add_htlc } => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash); - pending_update_adds.push(update_add_htlc.clone()); - htlc.state = InboundHTLCState::Committed { - update_add_htlc: InboundUpdateAdd::WithOnion { - update_add_htlc, - }, - }; - }, - } + log_trace!( + logger, + " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", + &htlc.payment_hash + ); + pending_update_adds.push(update_add_htlc.clone()); + htlc.state = InboundHTLCState::Committed { + update_add_htlc: InboundUpdateAdd::WithOnion { update_add_htlc }, + }; } } } @@ -12949,10 +12925,10 @@ where // is acceptable. for htlc in self.context.pending_inbound_htlcs.iter_mut() { let new_state = - if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = + if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref update_add) = &htlc.state { - Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone())) + Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add.clone())) } else { None }; @@ -14662,6 +14638,41 @@ impl Readable for AnnouncementSigsState { } } +/// Represents the resolution status of an inbound HTLC. Previously we had multiple variants here, +/// now we only use it for backwards compatibility when (de)serializing [`InboundHTLCState`]s. +enum WriteableLegacyHTLCResolution<'a> { + Pending { update_add_htlc: &'a msgs::UpdateAddHTLC }, +} + +// We can't use `impl_writeable_tlv_based_enum` due to the lifetime. +impl<'a> Writeable for WriteableLegacyHTLCResolution<'a> { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + match self { + Self::Pending { update_add_htlc } => { + 2u8.write(writer)?; + crate::_encode_varint_length_prefixed_tlv!(writer, { + (0, update_add_htlc, required) + }); + }, + } + + Ok(()) + } +} + +/// Represents the resolution status of an inbound HTLC. Previously we had multiple variants here, +/// now we only use it for backwards compatibility when (de)serializing [`InboundHTLCState`]s. +enum ReadableLegacyHTLCResolution { + Pending { update_add_htlc: msgs::UpdateAddHTLC }, +} + +impl_writeable_tlv_based_enum!(ReadableLegacyHTLCResolution, + // 0 used to be used for the ::Resolved variant in 0.2 and below. + (2, Pending) => { + (0, update_add_htlc, required), + }, +); + impl Writeable for FundedChannel { fn write(&self, writer: &mut W) -> Result<(), io::Error> { // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been @@ -14746,13 +14757,13 @@ impl Writeable for FundedChannel { htlc.payment_hash.write(writer)?; match &htlc.state { &InboundHTLCState::RemoteAnnounced(_) => unreachable!(), - &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution) => { + &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref update_add_htlc) => { 1u8.write(writer)?; - htlc_resolution.write(writer)?; + WriteableLegacyHTLCResolution::Pending { update_add_htlc }.write(writer)?; }, - &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => { + &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref update_add_htlc) => { 2u8.write(writer)?; - htlc_resolution.write(writer)?; + WriteableLegacyHTLCResolution::Pending { update_add_htlc }.write(writer)?; }, &InboundHTLCState::Committed { ref update_add_htlc } => { 3u8.write(writer)?; @@ -15207,18 +15218,20 @@ impl<'a, 'b, 'c, 'd, ES: EntropySource, SP: SignerProvider, L: Logger> payment_hash: Readable::read(reader)?, state: match ::read(reader)? { 1 => { - let resolution = Readable::read(reader).map_err(|e| { - log_error!(logger, "Found deprecated HTLC received on LDK 0.1 or earlier. HTLC must be resolved before upgrading to LDK 0.5+, see CHANGELOG.md"); - e - })?; - InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) + let ReadableLegacyHTLCResolution::Pending { update_add_htlc } = + Readable::read(reader).map_err(|e| { + log_error!(logger, "Found deprecated HTLC received on LDK 0.1 or earlier. HTLC must be resolved before upgrading to LDK 0.5+, see CHANGELOG.md"); + e + })?; + InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add_htlc) }, 2 => { - let resolution = Readable::read(reader).map_err(|e| { - log_error!(logger, "Found deprecated HTLC received on LDK 0.1 or earlier. HTLC must be resolved before upgrading to LDK 0.5+, see CHANGELOG.md"); - e - })?; - InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) + let ReadableLegacyHTLCResolution::Pending { update_add_htlc } = + Readable::read(reader).map_err(|e| { + log_error!(logger, "Found deprecated HTLC received on LDK 0.1 or earlier. HTLC must be resolved before upgrading to LDK 0.5+, see CHANGELOG.md"); + e + })?; + InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add_htlc) }, 3 => InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy }, 4 => { From 620f21147381d51342d79eacfdcfbeea21c8bfdb Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 28 Jan 2026 16:47:26 -0500 Subject: [PATCH 17/21] Remove now-unused InboundUpdateAdd::Legacy This version of LDK no longer supports HTLCs created before 0.3, which allows us to remove this variant that will only be present if an HTLC was received on a pre-0.3 LDK version. See the past few commits. --- lightning/src/ln/channel.rs | 48 ++++++++++++++++-------------- lightning/src/ln/channelmanager.rs | 4 --- 2 files changed, 25 insertions(+), 27 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 42734eb13cc..7f877f576a6 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -327,16 +327,13 @@ enum InboundUpdateAdd { blinded_failure: Option, outbound_hop: OutboundHop, }, - /// This HTLC was received pre-LDK 0.3, before we started persisting the onion for inbound - /// committed HTLCs. - Legacy, } impl_writeable_tlv_based_enum_upgradable!(InboundUpdateAdd, (0, WithOnion) => { (0, update_add_htlc, required), }, - (2, Legacy) => {}, + // 2 was previously used for the ::Legacy variant, used for HTLCs received on LDK 0.1-. (4, Forwarded) => { (0, incoming_packet_shared_secret, required), (2, outbound_hop, required), @@ -7883,17 +7880,6 @@ where Ok(()) } - /// Returns true if any committed inbound HTLCs were received pre-LDK 0.3 and cannot be used - /// during `ChannelManager` deserialization to reconstruct the set of pending HTLCs. - pub(super) fn has_legacy_inbound_htlcs(&self) -> bool { - self.context.pending_inbound_htlcs.iter().any(|htlc| { - matches!( - &htlc.state, - InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy } - ) - }) - } - /// Returns committed inbound HTLCs whose onion has not yet been decoded and processed. Useful /// for reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. pub(super) fn inbound_htlcs_pending_decode( @@ -8993,7 +8979,10 @@ where }; if swap { let mut state = - InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy }; + InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed { + sha256_of_onion: [0; 32], + failure_code: 0, + }); mem::swap(&mut state, &mut htlc.state); if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add) = state { @@ -15233,7 +15222,7 @@ impl<'a, 'b, 'c, 'd, ES: EntropySource, SP: SignerProvider, L: Logger> })?; InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add_htlc) }, - 3 => InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy }, + 3 => InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, 4 => { let reason = match ::read(reader)? { 0 => InboundHTLCRemovalReason::FailRelay(msgs::OnionErrorPacket { @@ -16084,6 +16073,23 @@ pub(crate) fn hold_time_since(send_timestamp: Option) -> Option { }) } +fn dummy_inbound_update_add() -> InboundUpdateAdd { + let txid_hash = Sha256d::from_bytes_ref(&[0; 32]); + InboundUpdateAdd::Forwarded { + incoming_packet_shared_secret: [0; 32], + phantom_shared_secret: None, + trampoline_shared_secret: None, + blinded_failure: None, + outbound_hop: OutboundHop { + amt_msat: 0, + channel_id: ChannelId([0; 32]), + funding_txo: OutPoint { txid: Txid::from_raw_hash(*txid_hash), index: 0 }, + node_id: PublicKey::from_slice(&[2; 33]).unwrap(), + user_channel_id: 0, + }, + } +} + #[cfg(test)] mod tests { use crate::chain::chaininterface::LowerBoundedFeeEstimator; @@ -16091,8 +16097,8 @@ mod tests { use crate::chain::BestBlock; use crate::ln::chan_utils::{self, commit_tx_fee_sat, ChannelTransactionParameters}; use crate::ln::channel::{ - AwaitingChannelReadyFlags, ChannelState, FundedChannel, HTLCCandidate, HTLCInitiator, - HTLCUpdateAwaitingACK, InboundHTLCOutput, InboundHTLCState, InboundUpdateAdd, + dummy_inbound_update_add, AwaitingChannelReadyFlags, ChannelState, FundedChannel, + HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, InboundHTLCOutput, InboundHTLCState, InboundV1Channel, OutboundHTLCOutput, OutboundHTLCState, OutboundV1Channel, }; use crate::ln::channel::{ @@ -16136,10 +16142,6 @@ mod tests { use bitcoin::{ScriptBuf, WPubkeyHash, WitnessProgram, WitnessVersion}; use std::cmp; - fn dummy_inbound_update_add() -> InboundUpdateAdd { - InboundUpdateAdd::Legacy - } - #[test] #[rustfmt::skip] fn test_channel_state_order() { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 873d75296f5..d566f7b39c4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18568,10 +18568,6 @@ impl< is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); if let Some(chan) = peer_state.channel_by_id.get(channel_id) { if let Some(funded_chan) = chan.as_funded() { - // Legacy HTLCs are from pre-LDK 0.3 and cannot be reconstructed. - if funded_chan.has_legacy_inbound_htlcs() { - return Err(DecodeError::InvalidValue); - } // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized // `Channel` as part of removing the requirement to regularly persist the // `ChannelManager`. From 25ab31d319bc40a99f1002892165d6801789887b Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 6 Jan 2026 15:19:33 -0500 Subject: [PATCH 18/21] Remove now-unused param from monitor_updating_paused In the previous commit, we removed the InboundHTLCResolution::Resolved enum variant, which caused us to never provide any HTLCs in this now-removed parameter. --- lightning/src/ln/channel.rs | 79 ++++--------------------------------- 1 file changed, 7 insertions(+), 72 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 7f877f576a6..b80c7fe1f3d 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -7615,7 +7615,6 @@ where false, Vec::new(), Vec::new(), - Vec::new(), logger, ); UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat } @@ -8153,15 +8152,7 @@ where &self.context.channel_id() ); - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), logger); self.context .interactive_tx_signing_session .as_mut() @@ -8265,15 +8256,7 @@ where .as_mut() .expect("Signing session must exist for negotiated pending splice") .received_commitment_signed(); - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), logger); self.context.monitor_pending_tx_signatures = true; Ok(self.push_ret_blockable_mon_update(monitor_update)) @@ -8573,7 +8556,6 @@ where false, Vec::new(), Vec::new(), - Vec::new(), logger, ); return Ok(self.push_ret_blockable_mon_update(monitor_update)); @@ -8773,15 +8755,7 @@ where if update_fee.is_some() { "a fee update, " } else { "" }, update_add_count, update_fulfill_count, update_fail_count); - self.monitor_updating_paused( - false, - true, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), logger); (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail) } else { (None, Vec::new()) @@ -9116,7 +9090,6 @@ where false, true, false, - Vec::new(), revoked_htlcs, finalized_claimed_htlcs, logger, @@ -9163,7 +9136,6 @@ where false, true, false, - Vec::new(), revoked_htlcs, finalized_claimed_htlcs, logger, @@ -9177,7 +9149,6 @@ where false, false, false, - Vec::new(), revoked_htlcs, finalized_claimed_htlcs, logger, @@ -9487,7 +9458,6 @@ where /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress fn monitor_updating_paused( &mut self, resend_raa: bool, resend_commitment: bool, resend_channel_ready: bool, - pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pending_finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, logger: &L, ) { @@ -9496,7 +9466,6 @@ where self.context.monitor_pending_revoke_and_ack |= resend_raa; self.context.monitor_pending_commitment_signed |= resend_commitment; self.context.monitor_pending_channel_ready |= resend_channel_ready; - self.context.monitor_pending_forwards.extend(pending_forwards); self.context.monitor_pending_failures.extend(pending_fails); self.context.monitor_pending_finalized_fulfills.extend(pending_finalized_claimed_htlcs); self.context.channel_state.set_monitor_update_in_progress(); @@ -10712,15 +10681,7 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), logger); self.push_ret_blockable_mon_update(monitor_update) } else { None @@ -11461,15 +11422,7 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), logger); let monitor_update = self.push_ret_blockable_mon_update(monitor_update); let announcement_sigs = @@ -13138,15 +13091,7 @@ where let can_add_htlc = send_res.map_err(|(_, msg)| ChannelError::Ignore(msg))?; if can_add_htlc { let monitor_update = self.build_commitment_no_status_check(logger); - self.monitor_updating_paused( - false, - true, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), logger); Ok(self.push_ret_blockable_mon_update(monitor_update)) } else { Ok(None) @@ -13266,15 +13211,7 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - &&logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), &&logger); self.push_ret_blockable_mon_update(monitor_update) } else { None @@ -13883,7 +13820,6 @@ impl OutboundV1Channel { need_channel_ready, Vec::new(), Vec::new(), - Vec::new(), logger, ); Ok((channel, channel_monitor)) @@ -14182,7 +14118,6 @@ impl InboundV1Channel { need_channel_ready, Vec::new(), Vec::new(), - Vec::new(), logger, ); From fe90cd85b1a8cdf7691f0e8ebf5ac60848f9456c Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 6 Jan 2026 16:31:30 -0500 Subject: [PATCH 19/21] Delete now-unused ChannelContext::monitor_pending_forwards We stopped adding any HTLCs to this vec a few commits ago when we removed support for HTLCs that were originally received on LDK 0.1. --- lightning/src/ln/channel.rs | 29 +++++++++-------------------- 1 file changed, 9 insertions(+), 20 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index b80c7fe1f3d..c7ee90d8d8d 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1179,6 +1179,7 @@ pub(super) struct MonitorRestoreUpdates { /// A `CommitmentUpdate` to be sent to our channel peer. pub commitment_update: Option, pub commitment_order: RAACommitmentOrder, + // TODO: get rid of this pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>, pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pub finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, @@ -3089,7 +3090,6 @@ pub(super) struct ChannelContext { // responsible for some of the HTLCs here or not - we don't know whether the update in question // completed or not. We currently ignore these fields entirely when force-closing a channel, // but need to handle this somehow or we run the risk of losing HTLCs! - monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>, monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, monitor_pending_finalized_fulfills: Vec<(HTLCSource, Option)>, monitor_pending_update_adds: Vec, @@ -3793,7 +3793,6 @@ impl ChannelContext { monitor_pending_channel_ready: false, monitor_pending_revoke_and_ack: false, monitor_pending_commitment_signed: false, - monitor_pending_forwards: Vec::new(), monitor_pending_failures: Vec::new(), monitor_pending_finalized_fulfills: Vec::new(), monitor_pending_update_adds: Vec::new(), @@ -4027,7 +4026,6 @@ impl ChannelContext { monitor_pending_channel_ready: false, monitor_pending_revoke_and_ack: false, monitor_pending_commitment_signed: false, - monitor_pending_forwards: Vec::new(), monitor_pending_failures: Vec::new(), monitor_pending_finalized_fulfills: Vec::new(), monitor_pending_update_adds: Vec::new(), @@ -9552,8 +9550,6 @@ where let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger); - let mut accepted_htlcs = Vec::new(); - mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards); let mut failed_htlcs = Vec::new(); mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures); let mut finalized_claimed_htlcs = Vec::new(); @@ -9574,7 +9570,7 @@ where self.context.monitor_pending_commitment_signed = false; return MonitorRestoreUpdates { raa: None, commitment_update: None, commitment_order: RAACommitmentOrder::RevokeAndACKFirst, - accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_update_adds, + accepted_htlcs: Vec::new(), failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures: None, channel_ready_order, committed_outbound_htlc_sources }; @@ -9605,7 +9601,7 @@ where if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, match commitment_order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); MonitorRestoreUpdates { - raa, commitment_update, commitment_order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, + raa, commitment_update, commitment_order, accepted_htlcs: Vec::new(), failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures, channel_ready_order, committed_outbound_htlc_sources } @@ -14863,11 +14859,8 @@ impl Writeable for FundedChannel { self.context.monitor_pending_revoke_and_ack.write(writer)?; self.context.monitor_pending_commitment_signed.write(writer)?; - (self.context.monitor_pending_forwards.len() as u64).write(writer)?; - for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() { - pending_forward.write(writer)?; - htlc_id.write(writer)?; - } + // Previously used for monitor_pending_forwards prior to LDK 0.5. + 0u64.write(writer)?; (self.context.monitor_pending_failures.len() as u64).write(writer)?; for &(ref htlc_source, ref payment_hash, ref fail_reason) in @@ -15289,13 +15282,10 @@ impl<'a, 'b, 'c, 'd, ES: EntropySource, SP: SignerProvider, L: Logger> let monitor_pending_revoke_and_ack = Readable::read(reader)?; let monitor_pending_commitment_signed = Readable::read(reader)?; - let monitor_pending_forwards_count: u64 = Readable::read(reader)?; - let mut monitor_pending_forwards = Vec::with_capacity(cmp::min( - monitor_pending_forwards_count as usize, - DEFAULT_MAX_HTLCS as usize, - )); - for _ in 0..monitor_pending_forwards_count { - monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?)); + let monitor_pending_forwards_count_legacy: u64 = Readable::read(reader)?; + if monitor_pending_forwards_count_legacy != 0 { + log_error!(logger, "Found deprecated HTLC received on LDK 0.1 or earlier. HTLC must be resolved before upgrading to LDK 0.5+, see CHANGELOG.md"); + return Err(DecodeError::InvalidValue); } let monitor_pending_failures_count: u64 = Readable::read(reader)?; @@ -15899,7 +15889,6 @@ impl<'a, 'b, 'c, 'd, ES: EntropySource, SP: SignerProvider, L: Logger> monitor_pending_channel_ready, monitor_pending_revoke_and_ack, monitor_pending_commitment_signed, - monitor_pending_forwards, monitor_pending_failures, monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(), monitor_pending_update_adds: monitor_pending_update_adds.unwrap_or_default(), From bf32829b988d8d565d63866852161fe431507ffc Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 6 Jan 2026 16:40:27 -0500 Subject: [PATCH 20/21] Delete now-unused MonitorRestoreUpdates::accepted_htlcs We stopped adding any HTLCs to this vec a few commits ago when we removed support for HTLCs that were originally received on LDK 0.1 --- lightning/src/ln/channel.rs | 14 +++++------ lightning/src/ln/channelmanager.rs | 37 ++++++++---------------------- 2 files changed, 15 insertions(+), 36 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index c7ee90d8d8d..ccceef01796 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -51,8 +51,8 @@ use crate::ln::channel_state::{ }; use crate::ln::channelmanager::{ self, BlindedFailure, ChannelReadyOrder, FundingConfirmedMessage, HTLCPreviousHopData, - HTLCSource, OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, RAACommitmentOrder, - SentHTLCId, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, + HTLCSource, OpenChannelMessage, PaymentClaimDetails, RAACommitmentOrder, SentHTLCId, + BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, }; use crate::ln::funding::{FundingTxInput, SpliceContribution}; use crate::ln::interactivetxs::{ @@ -1179,8 +1179,6 @@ pub(super) struct MonitorRestoreUpdates { /// A `CommitmentUpdate` to be sent to our channel peer. pub commitment_update: Option, pub commitment_order: RAACommitmentOrder, - // TODO: get rid of this - pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>, pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pub finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, /// Inbound update_adds that are now irrevocably committed to this channel and are ready for the @@ -9570,9 +9568,9 @@ where self.context.monitor_pending_commitment_signed = false; return MonitorRestoreUpdates { raa: None, commitment_update: None, commitment_order: RAACommitmentOrder::RevokeAndACKFirst, - accepted_htlcs: Vec::new(), failed_htlcs, finalized_claimed_htlcs, pending_update_adds, - funding_broadcastable, channel_ready, announcement_sigs, tx_signatures: None, - channel_ready_order, committed_outbound_htlc_sources + failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, + channel_ready, announcement_sigs, tx_signatures: None, channel_ready_order, + committed_outbound_htlc_sources }; } @@ -9601,7 +9599,7 @@ where if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, match commitment_order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); MonitorRestoreUpdates { - raa, commitment_update, commitment_order, accepted_htlcs: Vec::new(), failed_htlcs, finalized_claimed_htlcs, + raa, commitment_update, commitment_order, failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures, channel_ready_order, committed_outbound_htlc_sources } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index d566f7b39c4..6fa43120523 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1406,7 +1406,6 @@ enum PostMonitorUpdateChanResume { user_channel_id: u128, unbroadcasted_batch_funding_txid: Option, update_actions: Vec, - htlc_forwards: Option, decode_update_add_htlcs: Option<(u64, Vec)>, finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, @@ -9587,7 +9586,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ &self, channel_id: ChannelId, counterparty_node_id: PublicKey, funding_txo: OutPoint, user_channel_id: u128, unbroadcasted_batch_funding_txid: Option, update_actions: Vec, - htlc_forwards: Option, decode_update_add_htlcs: Option<(u64, Vec)>, finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, @@ -9648,9 +9646,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.handle_monitor_update_completion_actions(update_actions); - if let Some(forwards) = htlc_forwards { - self.forward_htlcs(&mut [forwards][..]); - } if let Some(decode) = decode_update_add_htlcs { self.push_decode_update_add_htlcs(decode); } @@ -10112,13 +10107,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ None }; - let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption( + let decode_update_add_htlcs = self.handle_channel_resumption( pending_msg_events, chan, updates.raa, updates.commitment_update, updates.commitment_order, - updates.accepted_htlcs, updates.pending_update_adds, updates.funding_broadcastable, updates.channel_ready, @@ -10141,7 +10135,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ user_channel_id: chan.context.get_user_id(), unbroadcasted_batch_funding_txid, update_actions, - htlc_forwards, decode_update_add_htlcs, finalized_claimed_htlcs: updates.finalized_claimed_htlcs, failed_htlcs: updates.failed_htlcs, @@ -10243,7 +10236,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ user_channel_id, unbroadcasted_batch_funding_txid, update_actions, - htlc_forwards, decode_update_add_htlcs, finalized_claimed_htlcs, failed_htlcs, @@ -10256,7 +10248,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ user_channel_id, unbroadcasted_batch_funding_txid, update_actions, - htlc_forwards, decode_update_add_htlcs, finalized_claimed_htlcs, failed_htlcs, @@ -10272,17 +10263,16 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn handle_channel_resumption(&self, pending_msg_events: &mut Vec, channel: &mut FundedChannel, raa: Option, commitment_update: Option, commitment_order: RAACommitmentOrder, - pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec, - funding_broadcastable: Option, + pending_update_adds: Vec, funding_broadcastable: Option, channel_ready: Option, announcement_sigs: Option, tx_signatures: Option, tx_abort: Option, channel_ready_order: ChannelReadyOrder, - ) -> (Option<(u64, PublicKey, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec)>) { + ) -> Option<(u64, Vec)> { let logger = WithChannelContext::from(&self.logger, &channel.context, None); - log_trace!(logger, "Handling channel resumption with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort", + log_trace!(logger, "Handling channel resumption with {} RAA, {} commitment update, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort", if raa.is_some() { "an" } else { "no" }, if commitment_update.is_some() { "a" } else { "no" }, - pending_forwards.len(), pending_update_adds.len(), + pending_update_adds.len(), if funding_broadcastable.is_some() { "" } else { "not " }, if channel_ready.is_some() { "sending" } else { "without" }, if announcement_sigs.is_some() { "sending" } else { "without" }, @@ -10293,14 +10283,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let counterparty_node_id = channel.context.get_counterparty_node_id(); let outbound_scid_alias = channel.context.outbound_scid_alias(); - let mut htlc_forwards = None; - if !pending_forwards.is_empty() { - htlc_forwards = Some(( - outbound_scid_alias, channel.context.get_counterparty_node_id(), - channel.funding.get_funding_txo().unwrap(), channel.context.channel_id(), - channel.context.get_user_id(), pending_forwards - )); - } let mut decode_update_add_htlcs = None; if !pending_update_adds.is_empty() { decode_update_add_htlcs = Some((outbound_scid_alias, pending_update_adds)); @@ -10387,7 +10369,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, None => { debug_assert!(false, "Channel resumed without a funding txo, this should never happen!"); - return (htlc_forwards, decode_update_add_htlcs); + return decode_update_add_htlcs; } }; } else { @@ -10405,7 +10387,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ emit_initial_channel_ready_event!(pending_events, channel); } - (htlc_forwards, decode_update_add_htlcs) + decode_update_add_htlcs } #[rustfmt::skip] @@ -12454,12 +12436,11 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take(); - let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption( + let decode_update_add_htlcs = self.handle_channel_resumption( &mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.commitment_order, - Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs, + Vec::new(), None, responses.channel_ready, responses.announcement_sigs, responses.tx_signatures, responses.tx_abort, responses.channel_ready_order, ); - debug_assert!(htlc_forwards.is_none()); debug_assert!(decode_update_add_htlcs.is_none()); if let Some(upd) = channel_update { peer_state.pending_msg_events.push(upd); From eb547f21008095e907ecd43f6f94e705e0bba334 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 7 Jan 2026 16:50:37 -0500 Subject: [PATCH 21/21] Delete now-unused PendingHTLCStatus We stopped using this struct a few commits ago when we removed support for HTLCs that were originally received on LDK 0.1. --- lightning/src/ln/channelmanager.rs | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 6fa43120523..58b0589af15 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -188,23 +188,6 @@ use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use core::time::Duration; use core::{cmp, mem}; -// We hold various information about HTLC relay in the HTLC objects in Channel itself: -// -// Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should -// forward the HTLC with information it will give back to us when it does so, or if it should Fail -// the HTLC with the relevant message for the Channel to handle giving to the remote peer. -// -// Once said HTLC is committed in the Channel, if the PendingHTLCStatus indicated Forward, the -// Channel will return the PendingHTLCInfo back to us, and we will create an HTLCForwardInfo -// with it to track where it came from (in case of onwards-forward error), waiting a random delay -// before we forward it. -// -// We will then use HTLCForwardInfo's PendingHTLCInfo to construct an outbound HTLC, with a -// relevant HTLCSource::PreviousHopData filled in to indicate where it came from (which we can use -// to either fail-backwards or fulfill the HTLC backwards along the relevant path). -// Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is -// our payment, which we can use to decode errors or inform the user that the payment was sent. - /// Information about where a received HTLC('s onion) has indicated the HTLC should go. #[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug #[cfg_attr(test, derive(Debug, PartialEq))] @@ -437,14 +420,6 @@ pub(super) enum HTLCFailureMsg { Malformed(msgs::UpdateFailMalformedHTLC), } -/// Stores whether we can't forward an HTLC or relevant forwarding info -#[cfg_attr(test, derive(Debug))] -#[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug -pub(super) enum PendingHTLCStatus { - Forward(PendingHTLCInfo), - Fail(HTLCFailureMsg), -} - #[cfg_attr(test, derive(Clone, Debug, PartialEq))] pub(super) struct PendingAddHTLCInfo { pub(super) forward_info: PendingHTLCInfo, @@ -16735,11 +16710,6 @@ impl Readable for HTLCFailureMsg { } } -impl_writeable_tlv_based_enum_legacy!(PendingHTLCStatus, ; - (0, Forward), - (1, Fail), -); - impl_writeable_tlv_based_enum!(BlindedFailure, (0, FromIntroductionNode) => {}, (2, FromBlindedNode) => {},