From 7c97f3c08738851e66d3daa229a4a31de8656817 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 16 Dec 2025 10:55:14 -0500 Subject: [PATCH 01/14] Remove unnecessary update_add clone on Channel ser --- lightning/src/ln/channel.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 57f10207e17..306d3c9f6ef 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -14713,7 +14713,7 @@ where } } let mut removed_htlc_attribution_data: Vec<&Option> = Vec::new(); - let mut inbound_committed_update_adds: Vec> = Vec::new(); + let mut inbound_committed_update_adds: Vec<&Option> = Vec::new(); (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?; for htlc in self.context.pending_inbound_htlcs.iter() { if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state { @@ -14735,7 +14735,7 @@ where }, &InboundHTLCState::Committed { ref update_add_htlc_opt } => { 3u8.write(writer)?; - inbound_committed_update_adds.push(update_add_htlc_opt.clone()); + inbound_committed_update_adds.push(update_add_htlc_opt); }, &InboundHTLCState::LocalRemoved(ref removal_reason) => { 4u8.write(writer)?; From 8204e0aba0588341ac77661ad19113c2b6866d57 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 5 Jan 2026 14:04:19 -0500 Subject: [PATCH 02/14] Move is_chan_closed check into loop Necessary for the next commit and makes it easier to read. --- lightning/src/ln/channelmanager.rs | 285 +++++++++++++++-------------- 1 file changed, 147 insertions(+), 138 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 0b6cc07738a..b774467215b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18005,156 +18005,165 @@ where is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); } - if is_channel_closed { - for (htlc_source, (htlc, preimage_opt)) in - monitor.get_all_current_outbound_htlcs() - { - let logger = WithChannelMonitor::from( - &args.logger, - monitor, - Some(htlc.payment_hash), - ); - let htlc_id = SentHTLCId::from_source(&htlc_source); - match htlc_source { - HTLCSource::PreviousHopData(prev_hop_data) => { - let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { - info.prev_funding_outpoint == prev_hop_data.outpoint - && info.prev_htlc_id == prev_hop_data.htlc_id - }; - // The ChannelMonitor is now responsible for this HTLC's - // failure/success and will let us know what its outcome is. If we - // still have an entry for this HTLC in `forward_htlcs`, - // `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not - // persisted after the monitor was when forwarding the payment. - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - &prev_hop_data, - "HTLC was forwarded to the closed channel", - &args.logger, - ); - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs_legacy, - &prev_hop_data, - "HTLC was forwarded to the closed channel", - &args.logger, - ); - forward_htlcs_legacy.retain(|_, forwards| { - forwards.retain(|forward| { - if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - false - } else { true } + for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() + { + let logger = + WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash)); + let htlc_id = SentHTLCId::from_source(&htlc_source); + match htlc_source { + HTLCSource::PreviousHopData(prev_hop_data) => { + let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { + info.prev_funding_outpoint == prev_hop_data.outpoint + && info.prev_htlc_id == prev_hop_data.htlc_id + }; + if !is_channel_closed { + continue; + } + // The ChannelMonitor is now responsible for this HTLC's + // failure/success and will let us know what its outcome is. If we + // still have an entry for this HTLC in `forward_htlcs`, + // `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not + // persisted after the monitor was when forwarding the payment. + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &args.logger, + ); + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs_legacy, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &args.logger, + ); + forward_htlcs_legacy.retain(|_, forwards| { + forwards.retain(|forward| { + if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", + &htlc.payment_hash, &monitor.channel_id()); + false } else { true } - }); - !forwards.is_empty() - }); - pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - pending_events_read.retain(|(event, _)| { - if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { - intercepted_id != ev_id - } else { true } - }); - false } else { true } }); - }, - HTLCSource::OutboundRoute { - payment_id, - session_priv, - path, - bolt12_invoice, - .. - } => { - if let Some(preimage) = preimage_opt { - let pending_events = Mutex::new(pending_events_read); - let update = PaymentCompleteUpdate { - counterparty_node_id: monitor.get_counterparty_node_id(), - channel_funding_outpoint: monitor.get_funding_txo(), - channel_id: monitor.channel_id(), - htlc_id, - }; - let mut compl_action = Some( - EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(update) - ); - pending_outbounds.claim_htlc( - payment_id, - preimage, - bolt12_invoice, - session_priv, - path, - true, - &mut compl_action, - &pending_events, - ); - // If the completion action was not consumed, then there was no - // payment to claim, and we need to tell the `ChannelMonitor` - // we don't need to hear about the HTLC again, at least as long - // as the PaymentSent event isn't still sitting around in our - // event queue. - let have_action = if compl_action.is_some() { - let pending_events = pending_events.lock().unwrap(); - pending_events.iter().any(|(_, act)| *act == compl_action) - } else { - false - }; - if !have_action && compl_action.is_some() { - let mut peer_state = per_peer_state - .get(&counterparty_node_id) - .map(|state| state.lock().unwrap()) - .expect("Channels originating a preimage must have peer state"); - let update_id = peer_state - .closed_channel_monitor_update_ids - .get_mut(channel_id) - .expect("Channels originating a preimage must have a monitor"); - // Note that for channels closed pre-0.1, the latest - // update_id is `u64::MAX`. - *update_id = update_id.saturating_add(1); - - pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id: monitor.get_counterparty_node_id(), + !forwards.is_empty() + }); + pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", + &htlc.payment_hash, &monitor.channel_id()); + pending_events_read.retain(|(event, _)| { + if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { + intercepted_id != ev_id + } else { true } + }); + false + } else { true } + }); + }, + HTLCSource::OutboundRoute { + payment_id, + session_priv, + path, + bolt12_invoice, + .. + } => { + if !is_channel_closed { + continue; + } + if let Some(preimage) = preimage_opt { + let pending_events = Mutex::new(pending_events_read); + let update = PaymentCompleteUpdate { + counterparty_node_id: monitor.get_counterparty_node_id(), + channel_funding_outpoint: monitor.get_funding_txo(), + channel_id: monitor.channel_id(), + htlc_id, + }; + let mut compl_action = Some( + EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(update) + ); + pending_outbounds.claim_htlc( + payment_id, + preimage, + bolt12_invoice, + session_priv, + path, + true, + &mut compl_action, + &pending_events, + ); + // If the completion action was not consumed, then there was no + // payment to claim, and we need to tell the `ChannelMonitor` + // we don't need to hear about the HTLC again, at least as long + // as the PaymentSent event isn't still sitting around in our + // event queue. + let have_action = if compl_action.is_some() { + let pending_events = pending_events.lock().unwrap(); + pending_events.iter().any(|(_, act)| *act == compl_action) + } else { + false + }; + if !have_action && compl_action.is_some() { + let mut peer_state = per_peer_state + .get(&counterparty_node_id) + .map(|state| state.lock().unwrap()) + .expect( + "Channels originating a preimage must have peer state", + ); + let update_id = peer_state + .closed_channel_monitor_update_ids + .get_mut(channel_id) + .expect( + "Channels originating a preimage must have a monitor", + ); + // Note that for channels closed pre-0.1, the latest + // update_id is `u64::MAX`. + *update_id = update_id.saturating_add(1); + + pending_background_events.push( + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id: monitor + .get_counterparty_node_id(), funding_txo: monitor.get_funding_txo(), channel_id: monitor.channel_id(), update: ChannelMonitorUpdate { update_id: *update_id, channel_id: Some(monitor.channel_id()), - updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete { - htlc: htlc_id, - }], + updates: vec![ + ChannelMonitorUpdateStep::ReleasePaymentComplete { + htlc: htlc_id, + }, + ], }, - }); - } - pending_events_read = pending_events.into_inner().unwrap(); + }, + ); } - }, - } + pending_events_read = pending_events.into_inner().unwrap(); + } + }, } - for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() { - log_info!( - args.logger, - "Failing HTLC with payment hash {} as it was resolved on-chain.", - payment_hash - ); - let completion_action = Some(PaymentCompleteUpdate { - counterparty_node_id: monitor.get_counterparty_node_id(), - channel_funding_outpoint: monitor.get_funding_txo(), - channel_id: monitor.channel_id(), - htlc_id: SentHTLCId::from_source(&htlc_source), - }); + } + for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() { + log_info!( + args.logger, + "Failing HTLC with payment hash {} as it was resolved on-chain.", + payment_hash + ); + let completion_action = Some(PaymentCompleteUpdate { + counterparty_node_id: monitor.get_counterparty_node_id(), + channel_funding_outpoint: monitor.get_funding_txo(), + channel_id: monitor.channel_id(), + htlc_id: SentHTLCId::from_source(&htlc_source), + }); - failed_htlcs.push(( - htlc_source, - payment_hash, - monitor.get_counterparty_node_id(), - monitor.channel_id(), - LocalHTLCFailureReason::OnChainTimeout, - completion_action, - )); - } + failed_htlcs.push(( + htlc_source, + payment_hash, + monitor.get_counterparty_node_id(), + monitor.channel_id(), + LocalHTLCFailureReason::OnChainTimeout, + completion_action, + )); } // Whether the downstream channel was closed or not, try to re-apply any payment From f0317b8a6d09936d29381e16ddd39fa48197e731 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 12 Dec 2025 16:41:55 -0500 Subject: [PATCH 03/14] Don't double-forward HTLCs in rebuilt update_adds map We recently began reconstructing ChannelManager::decode_update_add_htlcs on startup, using data present in the Channels. However, we failed to prune HTLCs from this rebuilt map if a given HTLC was already forwarded to the outbound edge (we pruned correctly if the outbound edge was a closed channel, but not otherwise). Here we fix this bug that would have caused us to double-forward inbound HTLC forwards. --- lightning/src/ln/channelmanager.rs | 16 +++++---- lightning/src/ln/reload_tests.rs | 58 ++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 6 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b774467215b..e99f7934039 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18016,6 +18016,16 @@ where info.prev_funding_outpoint == prev_hop_data.outpoint && info.prev_htlc_id == prev_hop_data.htlc_id }; + // We always add all inbound committed HTLCs to `decode_update_add_htlcs` in the above + // loop, but we need to prune from those added HTLCs if they were already forwarded to + // the outbound edge. Otherwise, we'll double-forward. + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &args.logger, + ); + if !is_channel_closed { continue; } @@ -18024,12 +18034,6 @@ where // still have an entry for this HTLC in `forward_htlcs`, // `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not // persisted after the monitor was when forwarding the payment. - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - &prev_hop_data, - "HTLC was forwarded to the closed channel", - &args.logger, - ); dedup_decode_update_add_htlcs( &mut decode_update_add_htlcs_legacy, &prev_hop_data, diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index d143082821d..a38262e6952 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1258,6 +1258,64 @@ fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { expect_payment_sent(&nodes[0], payment_preimage, None, true, true); } +#[test] +fn test_manager_persisted_post_outbound_edge_forward() { + // Test that we will not double-forward an HTLC after restart if it has already been forwarded to + // the outbound edge, which was previously broken. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; + + // Lock in the HTLC from node_a <> node_b. + let amt_msat = 5000; + let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors(&nodes[0], 1); + let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); + + // Add the HTLC to the outbound edge, node_b <> node_c. + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors(&nodes[1], 1); + + // Disconnect peers and reload the forwarding node_b. + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + + let node_b_encoded = nodes[1].node.encode(); + let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); + reload_node!(nodes[1], node_b_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + + reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[0])); + let mut args_b_c = ReconnectArgs::new(&nodes[1], &nodes[2]); + args_b_c.send_channel_ready = (true, true); + args_b_c.send_announcement_sigs = (true, true); + args_b_c.pending_htlc_adds = (0, 1); + // While reconnecting, we re-send node_b's outbound update_add and commit the HTLC to the b<>c + // channel. + reconnect_nodes(args_b_c); + + // Ensure node_b won't double-forward the outbound HTLC (this was previously broken). + nodes[1].node.process_pending_htlc_forwards(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Claim the HTLC backwards to node_a. + expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id()); + let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], path, payment_preimage)); + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} + #[test] fn test_reload_partial_funding_batch() { let chanmon_cfgs = create_chanmon_cfgs(3); From 31b8d544f1e99e5b049b7e89cc0a775fd9caa4b4 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 16 Dec 2025 11:09:08 -0500 Subject: [PATCH 04/14] Optimize dedup_decode_update_add_htlcs No need to iterate through all entries in the map, we can instead pull out the specific entry that we want. --- lightning/src/ln/channelmanager.rs | 46 ++++++++++++++++-------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e99f7934039..ce44071d8da 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17106,28 +17106,32 @@ fn dedup_decode_update_add_htlcs( ) where L::Target: Logger, { - decode_update_add_htlcs.retain(|src_outb_alias, update_add_htlcs| { - update_add_htlcs.retain(|update_add| { - let matches = *src_outb_alias == prev_hop_data.prev_outbound_scid_alias - && update_add.htlc_id == prev_hop_data.htlc_id; - if matches { - let logger = WithContext::from( - logger, - prev_hop_data.counterparty_node_id, - Some(update_add.channel_id), - Some(update_add.payment_hash), - ); - log_info!( - logger, - "Removing pending to-decode HTLC with id {}: {}", - update_add.htlc_id, - removal_reason - ); + match decode_update_add_htlcs.entry(prev_hop_data.prev_outbound_scid_alias) { + hash_map::Entry::Occupied(mut update_add_htlcs) => { + update_add_htlcs.get_mut().retain(|update_add| { + let matches = update_add.htlc_id == prev_hop_data.htlc_id; + if matches { + let logger = WithContext::from( + logger, + prev_hop_data.counterparty_node_id, + Some(update_add.channel_id), + Some(update_add.payment_hash), + ); + log_info!( + logger, + "Removing pending to-decode HTLC with id {}: {}", + update_add.htlc_id, + removal_reason + ); + } + !matches + }); + if update_add_htlcs.get().is_empty() { + update_add_htlcs.remove(); } - !matches - }); - !update_add_htlcs.is_empty() - }); + }, + _ => {}, + } } // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the From f245139dfb86185f7bab6c2fcda6a6032708132c Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 16 Dec 2025 16:52:58 -0500 Subject: [PATCH 05/14] Prefer legacy forward maps on manager read We are working on removing the requirement of regularly persisting the ChannelManager, and as a result began reconstructing the manager's forwards maps from Channel data on startup in a recent PR, see cb398f6b761edde6b45fcda93a01c564cb49a13c and parent commits. At the time, we implemented ChannelManager::read to prefer to use the newly reconstructed maps, partly to ensure we have test coverage of the new maps' usage. This resulted in a lot of code that would deduplicate HTLCs that were present in the old maps to avoid redundant HTLC handling/duplicate forwards, adding extra complexity. Instead, always use the old maps in prod, but randomly use the newly reconstructed maps in testing, to exercise the new codepaths (see reconstruct_manager_from_monitors in ChannelManager::read). --- CONTRIBUTING.md | 6 + lightning/src/ln/channelmanager.rs | 281 +++++++++++------------------ 2 files changed, 113 insertions(+), 174 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d837c873efa..ad25fb10558 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -192,6 +192,12 @@ welcomed. * `LDK_TEST_DETERMINISTIC_HASHES` - When set to `1`, uses deterministic hash map iteration order in tests. This ensures consistent test output across runs, useful for comparing logs before and after changes. +* `LDK_TEST_REBUILD_MGR_FROM_MONITORS` - If set to `1`, on test node reload the `ChannelManager`'s + HTLC set will be reconstructed from `Channel{Monitor}` persisted data. If `0`, test nodes will be + reloaded from persisted `ChannelManager` data using legacy code paths. This ensures consistent + test output across runs, useful for comparing logs before and after changes, since otherwise the + selection of which codepaths to be used on reload will be chosen randomly. + C/C++ Bindings -------------- diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ce44071d8da..2cf522b987a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -11747,6 +11747,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if !new_intercept_events.is_empty() { let mut events = self.pending_events.lock().unwrap(); + // It's possible we processed this intercept forward, generated an event, then re-processed + // it here after restart, in which case the intercept event should not be pushed + // redundantly. + new_intercept_events.retain(|ev| !events.contains(ev)); events.append(&mut new_intercept_events); } } @@ -17484,9 +17488,9 @@ where const MAX_ALLOC_SIZE: usize = 1024 * 64; let forward_htlcs_count: u64 = Readable::read(reader)?; - // This map is read but may no longer be used because we'll attempt to rebuild the set of HTLC - // forwards from the `Channel{Monitor}`s instead, as a step towards removing the requirement of - // regularly persisting the `ChannelManager`. + // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of + // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from + // `Channel{Monitor}` data. See `reconstruct_manager_from_monitors` usage below. let mut forward_htlcs_legacy: HashMap> = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); for _ in 0..forward_htlcs_count { @@ -17587,9 +17591,9 @@ where }; } - // Some maps are read but may no longer be used because we attempt to rebuild the pending HTLC - // set from the `Channel{Monitor}`s instead, as a step towards removing the requirement of - // regularly persisting the `ChannelManager`. + // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of + // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from + // `Channel{Monitor}` data. See `reconstruct_manager_from_monitors` below. let mut pending_intercepted_htlcs_legacy: Option> = None; let mut decode_update_add_htlcs_legacy: Option>> = @@ -17930,6 +17934,36 @@ where pending_background_events.push(new_event); } + // In LDK 0.2 and below, the `ChannelManager` would track all payments and HTLCs internally and + // persist that state, relying on it being up-to-date on restart. Newer versions are moving + // towards reducing this reliance on regular persistence of the `ChannelManager`, and instead + // reconstruct HTLC/payment state based on `Channel{Monitor}` data if + // `reconstruct_manager_from_monitors` is set below. Currently it is only set in tests, randomly + // to ensure the legacy codepaths also have test coverage. + #[cfg(not(test))] + let reconstruct_manager_from_monitors = false; + #[cfg(test)] + let reconstruct_manager_from_monitors = { + use core::hash::{BuildHasher, Hasher}; + + match std::env::var("LDK_TEST_REBUILD_MGR_FROM_MONITORS") { + Ok(val) => match val.as_str() { + "1" => true, + "0" => false, + _ => panic!("LDK_TEST_REBUILD_MGR_FROM_MONITORS must be 0 or 1, got: {}", val), + }, + Err(_) => { + let rand_val = + std::collections::hash_map::RandomState::new().build_hasher().finish(); + if rand_val % 2 == 0 { + true + } else { + false + } + }, + } + }; + // If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we // should ensure we try them again on the inbound edge. We put them here and do so after we // have a fully-constructed `ChannelManager` at the end. @@ -17954,18 +17988,20 @@ where let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); - if let Some(chan) = peer_state.channel_by_id.get(channel_id) { - if let Some(funded_chan) = chan.as_funded() { - let inbound_committed_update_adds = - funded_chan.get_inbound_committed_update_adds(); - if !inbound_committed_update_adds.is_empty() { - // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized - // `Channel`, as part of removing the requirement to regularly persist the - // `ChannelManager`. - decode_update_add_htlcs.insert( - funded_chan.context.outbound_scid_alias(), - inbound_committed_update_adds, - ); + if reconstruct_manager_from_monitors { + if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + if let Some(funded_chan) = chan.as_funded() { + let inbound_committed_update_adds = + funded_chan.get_inbound_committed_update_adds(); + if !inbound_committed_update_adds.is_empty() { + // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized + // `Channel`, as part of removing the requirement to regularly persist the + // `ChannelManager`. + decode_update_add_htlcs.insert( + funded_chan.context.outbound_scid_alias(), + inbound_committed_update_adds, + ); + } } } } @@ -18020,17 +18056,20 @@ where info.prev_funding_outpoint == prev_hop_data.outpoint && info.prev_htlc_id == prev_hop_data.htlc_id }; - // We always add all inbound committed HTLCs to `decode_update_add_htlcs` in the above - // loop, but we need to prune from those added HTLCs if they were already forwarded to - // the outbound edge. Otherwise, we'll double-forward. - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - &prev_hop_data, - "HTLC was forwarded to the closed channel", - &args.logger, - ); + // If `reconstruct_manager_from_monitors` is set, we always add all inbound committed + // HTLCs to `decode_update_add_htlcs` in the above loop, but we need to prune from + // those added HTLCs if they were already forwarded to the outbound edge. Otherwise, + // we'll double-forward. + if reconstruct_manager_from_monitors { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &args.logger, + ); + } - if !is_channel_closed { + if !is_channel_closed || reconstruct_manager_from_monitors { continue; } // The ChannelMonitor is now responsible for this HTLC's @@ -18539,99 +18578,55 @@ where } } - // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`. - // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs. - for (src, _, _, _, _, _) in failed_htlcs.iter() { - if let HTLCSource::PreviousHopData(prev_hop_data) = src { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - prev_hop_data, - "HTLC was failed backwards during manager read", - &args.logger, - ); - } - } - - // See above comment on `failed_htlcs`. - for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { - for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - prev_hop_data, - "HTLC was already decoded and marked as a claimable payment", - &args.logger, - ); - } - } - - // Remove HTLCs from `forward_htlcs` if they are also present in `decode_update_add_htlcs`. - // - // In the future, the full set of pending HTLCs will be pulled from `Channel{Monitor}` data and - // placed in `ChannelManager::decode_update_add_htlcs` on read, to be handled on the next call - // to `process_pending_htlc_forwards`. This is part of a larger effort to remove the requirement - // of regularly persisting the `ChannelManager`. The new pipeline is supported for HTLC forwards - // received on LDK 0.3+ but not <= 0.2, so prune non-legacy HTLCs from `forward_htlcs`. - forward_htlcs_legacy.retain(|scid, pending_fwds| { - for fwd in pending_fwds { - let (prev_scid, prev_htlc_id) = match fwd { - HTLCForwardInfo::AddHTLC(htlc) => { - (htlc.prev_outbound_scid_alias, htlc.prev_htlc_id) - }, - HTLCForwardInfo::FailHTLC { htlc_id, .. } - | HTLCForwardInfo::FailMalformedHTLC { htlc_id, .. } => (*scid, *htlc_id), - }; - if let Some(pending_update_adds) = decode_update_add_htlcs.get_mut(&prev_scid) { - if pending_update_adds - .iter() - .any(|update_add| update_add.htlc_id == prev_htlc_id) - { - return false; - } + if reconstruct_manager_from_monitors { + // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`. + // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs. + for (src, _, _, _, _, _) in failed_htlcs.iter() { + if let HTLCSource::PreviousHopData(prev_hop_data) = src { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was failed backwards during manager read", + &args.logger, + ); } } - true - }); - // Remove intercepted HTLC forwards if they are also present in `decode_update_add_htlcs`. See - // the above comment. - pending_intercepted_htlcs_legacy.retain(|id, fwd| { - let prev_scid = fwd.prev_outbound_scid_alias; - if let Some(pending_update_adds) = decode_update_add_htlcs.get_mut(&prev_scid) { - if pending_update_adds - .iter() - .any(|update_add| update_add.htlc_id == fwd.prev_htlc_id) - { - pending_events_read.retain( - |(ev, _)| !matches!(ev, Event::HTLCIntercepted { intercept_id, .. } if intercept_id == id), + + // See above comment on `failed_htlcs`. + for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { + for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was already decoded and marked as a claimable payment", + &args.logger, ); - return false; } } + } + + let (decode_update_add_htlcs, forward_htlcs, pending_intercepted_htlcs) = + if reconstruct_manager_from_monitors { + (decode_update_add_htlcs, new_hash_map(), new_hash_map()) + } else { + ( + decode_update_add_htlcs_legacy, + forward_htlcs_legacy, + pending_intercepted_htlcs_legacy, + ) + }; + + // If we have a pending intercept HTLC present but no corresponding event, add that now rather + // than relying on the user having persisted the event prior to shutdown. + for (id, fwd) in pending_intercepted_htlcs.iter() { if !pending_events_read.iter().any( |(ev, _)| matches!(ev, Event::HTLCIntercepted { intercept_id, .. } if intercept_id == id), ) { - match create_htlc_intercepted_event(*id, &fwd) { + match create_htlc_intercepted_event(*id, fwd) { Ok(ev) => pending_events_read.push_back((ev, None)), Err(()) => debug_assert!(false), } } - true - }); - // Add legacy update_adds that were received on LDK <= 0.2 that are not present in the - // `decode_update_add_htlcs` map that was rebuilt from `Channel{Monitor}` data, see above - // comment. - for (scid, legacy_update_adds) in decode_update_add_htlcs_legacy.drain() { - match decode_update_add_htlcs.entry(scid) { - hash_map::Entry::Occupied(mut update_adds) => { - for legacy_update_add in legacy_update_adds { - if !update_adds.get().contains(&legacy_update_add) { - update_adds.get_mut().push(legacy_update_add); - } - } - }, - hash_map::Entry::Vacant(entry) => { - entry.insert(legacy_update_adds); - }, - } } let best_block = BestBlock::new(best_block_hash, best_block_height); @@ -18660,9 +18655,9 @@ where inbound_payment_key: expanded_inbound_key, pending_outbound_payments: pending_outbounds, - pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs_legacy), + pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs), - forward_htlcs: Mutex::new(forward_htlcs_legacy), + forward_htlcs: Mutex::new(forward_htlcs), decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, @@ -18998,12 +18993,11 @@ where mod tests { use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; use crate::ln::channelmanager::{ - create_recv_pending_htlc_info, inbound_payment, HTLCForwardInfo, InterceptId, PaymentId, + create_recv_pending_htlc_info, inbound_payment, InterceptId, PaymentId, RecipientOnionFields, }; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; - use crate::ln::onion_utils::AttributionData; use crate::ln::onion_utils::{self, LocalHTLCFailureReason}; use crate::ln::outbound_payment::Retry; use crate::ln::types::ChannelId; @@ -19013,7 +19007,6 @@ mod tests { use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use crate::util::config::{ChannelConfig, ChannelConfigUpdate}; use crate::util::errors::APIError; - use crate::util::ser::Writeable; use crate::util::test_utils; use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; @@ -20071,66 +20064,6 @@ mod tests { check_spends!(txn[0], funding_tx); } } - - #[test] - #[rustfmt::skip] - fn test_malformed_forward_htlcs_ser() { - // Ensure that `HTLCForwardInfo::FailMalformedHTLC`s are (de)serialized properly. - let chanmon_cfg = create_chanmon_cfgs(1); - let node_cfg = create_node_cfgs(1, &chanmon_cfg); - let persister; - let chain_monitor; - let chanmgrs = create_node_chanmgrs(1, &node_cfg, &[None]); - let deserialized_chanmgr; - let mut nodes = create_network(1, &node_cfg, &chanmgrs); - - let dummy_failed_htlc = |htlc_id| { - HTLCForwardInfo::FailHTLC { htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42], attribution_data: Some(AttributionData::new()) } } - }; - let dummy_malformed_htlc = |htlc_id| { - HTLCForwardInfo::FailMalformedHTLC { - htlc_id, - failure_code: LocalHTLCFailureReason::InvalidOnionPayload.failure_code(), - sha256_of_onion: [0; 32], - } - }; - - let dummy_htlcs_1: Vec = (1..10).map(|htlc_id| { - if htlc_id % 2 == 0 { - dummy_failed_htlc(htlc_id) - } else { - dummy_malformed_htlc(htlc_id) - } - }).collect(); - - let dummy_htlcs_2: Vec = (1..10).map(|htlc_id| { - if htlc_id % 2 == 1 { - dummy_failed_htlc(htlc_id) - } else { - dummy_malformed_htlc(htlc_id) - } - }).collect(); - - - let (scid_1, scid_2) = (42, 43); - let mut forward_htlcs = new_hash_map(); - forward_htlcs.insert(scid_1, dummy_htlcs_1.clone()); - forward_htlcs.insert(scid_2, dummy_htlcs_2.clone()); - - let mut chanmgr_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap(); - *chanmgr_fwd_htlcs = forward_htlcs.clone(); - core::mem::drop(chanmgr_fwd_htlcs); - - reload_node!(nodes[0], nodes[0].node.encode(), &[], persister, chain_monitor, deserialized_chanmgr); - - let mut deserialized_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap(); - for scid in [scid_1, scid_2].iter() { - let deserialized_htlcs = deserialized_fwd_htlcs.remove(scid).unwrap(); - assert_eq!(forward_htlcs.remove(scid).unwrap(), deserialized_htlcs); - } - assert!(deserialized_fwd_htlcs.is_empty()); - core::mem::drop(deserialized_fwd_htlcs); - } } #[cfg(ldk_bench)] From cd8007c30965db7b5089215ed24bd71aa937135a Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 5 Jan 2026 16:20:21 -0500 Subject: [PATCH 06/14] Trivial: document some fields on MonitorRestoreUpdates --- lightning/src/ln/channel.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 306d3c9f6ef..e20116e2d14 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1148,11 +1148,14 @@ pub enum UpdateFulfillCommitFetch { /// The return value of `monitor_updating_restored` pub(super) struct MonitorRestoreUpdates { pub raa: Option, + // A `CommitmentUpdate` to be sent to our channel peer. pub commitment_update: Option, pub commitment_order: RAACommitmentOrder, pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>, pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pub finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, + // Inbound update_adds that are now irrevocably committed to this channel and are ready for the + // onion to be processed in order to forward or receive the HTLC. pub pending_update_adds: Vec, pub funding_broadcastable: Option, pub channel_ready: Option, From 5cf8d3d48aaa58ddce99cd24364b03ea18f788f5 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 5 Jan 2026 16:28:00 -0500 Subject: [PATCH 07/14] Prune inbound HTLC onions once forwarded We store inbound committed HTLCs' onions in Channels for use in reconstructing the pending HTLC set on ChannelManager read. If an HTLC has been irrevocably forwarded to the outbound edge, we no longer need to persist the inbound edge's onion and can prune it here. --- lightning/src/ln/channel.rs | 33 +++++++++++++++++++++--- lightning/src/ln/channelmanager.rs | 41 ++++++++++++++++++++++++++++++ lightning/src/ln/reload_tests.rs | 20 +++++++++++++++ 3 files changed, 90 insertions(+), 4 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index e20116e2d14..4219a23a499 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -50,8 +50,8 @@ use crate::ln::channel_state::{ OutboundHTLCDetails, OutboundHTLCStateDetails, }; use crate::ln::channelmanager::{ - self, ChannelReadyOrder, FundingConfirmedMessage, HTLCFailureMsg, HTLCSource, - OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, PendingHTLCStatus, + self, ChannelReadyOrder, FundingConfirmedMessage, HTLCFailureMsg, HTLCPreviousHopData, + HTLCSource, OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, PendingHTLCStatus, RAACommitmentOrder, SentHTLCId, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, }; @@ -1162,6 +1162,10 @@ pub(super) struct MonitorRestoreUpdates { pub channel_ready_order: ChannelReadyOrder, pub announcement_sigs: Option, pub tx_signatures: Option, + // The sources of outbound HTLCs that were forwarded and irrevocably committed on this channel + // (the outbound edge). Useful to prune data that must be persisted in the inbound edge channel + // until the HTLC is forwarded. + pub committed_outbound_htlc_sources: Vec, } /// The return value of `signer_maybe_unblocked` @@ -7791,6 +7795,19 @@ where .collect() } + /// This inbound HTLC was irrevocably forwarded to the outbound edge, so we no longer need to + /// persist its onion. + pub(super) fn prune_inbound_htlc_onion(&mut self, htlc_id: u64) { + for htlc in self.context.pending_inbound_htlcs.iter_mut() { + if htlc.htlc_id == htlc_id { + if let InboundHTLCState::Committed { ref mut update_add_htlc_opt } = htlc.state { + update_add_htlc_opt.take(); + return; + } + } + } + } + /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed #[inline] fn mark_outbound_htlc_removed( @@ -9500,6 +9517,14 @@ where mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills); let mut pending_update_adds = Vec::new(); mem::swap(&mut pending_update_adds, &mut self.context.monitor_pending_update_adds); + let committed_outbound_htlc_sources = self.context.pending_outbound_htlcs.iter().filter_map(|htlc| { + if let &OutboundHTLCState::Committed = &htlc.state { + if let HTLCSource::PreviousHopData(prev_hop_data) = &htlc.source { + return Some(prev_hop_data.clone()) + } + } + None + }).collect(); if self.context.channel_state.is_peer_disconnected() { self.context.monitor_pending_revoke_and_ack = false; @@ -9508,7 +9533,7 @@ where raa: None, commitment_update: None, commitment_order: RAACommitmentOrder::RevokeAndACKFirst, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures: None, - channel_ready_order, + channel_ready_order, committed_outbound_htlc_sources }; } @@ -9539,7 +9564,7 @@ where MonitorRestoreUpdates { raa, commitment_update, commitment_order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures: None, - channel_ready_order, + channel_ready_order, committed_outbound_htlc_sources } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 2cf522b987a..fffac3228db 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3365,6 +3365,7 @@ macro_rules! handle_monitor_update_completion { decode_update_add_htlcs, updates.finalized_claimed_htlcs, updates.failed_htlcs, + updates.committed_outbound_htlc_sources, ); } }}; @@ -9558,6 +9559,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ decode_update_add_htlcs: Option<(u64, Vec)>, finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + committed_outbound_htlc_sources: Vec, ) { // If the channel belongs to a batch funding transaction, the progress of the batch // should be updated as we have received funding_signed and persisted the monitor. @@ -9623,6 +9625,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }; self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver, None); } + self.prune_persisted_inbound_htlc_onions(committed_outbound_htlc_sources); } fn handle_monitor_update_completion_actions< @@ -9779,6 +9782,44 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } + /// We store inbound committed HTLCs' onions in `Channel`s for use in reconstructing the pending + /// HTLC set on `ChannelManager` read. If an HTLC has been irrevocably forwarded to the outbound + /// edge, we no longer need to persist the inbound edge's onion and can prune it here. + fn prune_persisted_inbound_htlc_onions( + &self, committed_outbound_htlc_sources: Vec, + ) { + let per_peer_state = self.per_peer_state.read().unwrap(); + for source in committed_outbound_htlc_sources { + let counterparty_node_id = match source.counterparty_node_id.as_ref() { + Some(id) => id, + None => continue, + }; + let mut peer_state = + match per_peer_state.get(counterparty_node_id).map(|state| state.lock().unwrap()) { + Some(peer_state) => peer_state, + None => continue, + }; + + if let Some(chan) = + peer_state.channel_by_id.get_mut(&source.channel_id).and_then(|c| c.as_funded_mut()) + { + chan.prune_inbound_htlc_onion(source.htlc_id); + } + } + } + + #[cfg(test)] + /// Useful to check that we prune inbound HTLC onions once they are irrevocably forwarded to the + /// outbound edge, see [`Self::prune_persisted_inbound_htlc_onions`]. + pub(crate) fn test_get_inbound_committed_update_adds_count( + &self, cp_id: PublicKey, chan_id: ChannelId, + ) -> usize { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state = per_peer_state.get(&cp_id).map(|state| state.lock().unwrap()).unwrap(); + let chan = peer_state.channel_by_id.get(&chan_id).and_then(|c| c.as_funded()).unwrap(); + chan.get_inbound_committed_update_adds().len() + } + /// Handles a channel reentering a functional state, either due to reconnect or a monitor /// update completion. #[rustfmt::skip] diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index a38262e6952..80a3fe06d6d 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1208,6 +1208,13 @@ fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); + // While an inbound HTLC is committed in a channel but not yet forwarded, we store its onion in + // the `Channel` in case we need to remember it on restart. Once it's irrevocably forwarded to the + // outbound edge, we can prune it on the inbound edge. + assert_eq!( + nodes[1].node.test_get_inbound_committed_update_adds_count(nodes[0].node.get_our_node_id(), chan_id_1), + 1 + ); // Decode the HTLC onion but don't forward it to the next hop, such that the HTLC ends up in // `ChannelManager::forward_htlcs` or `ChannelManager::pending_intercepted_htlcs`. @@ -1229,6 +1236,13 @@ fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { args_b_c.send_announcement_sigs = (true, true); reconnect_nodes(args_b_c); + // Before an inbound HTLC is irrevocably forwarded, its onion should still be persisted within the + // inbound edge channel. + assert_eq!( + nodes[1].node.test_get_inbound_committed_update_adds_count(nodes[0].node.get_our_node_id(), chan_id_1), + 1 + ); + // Forward the HTLC and ensure we can claim it post-reload. nodes[1].node.process_pending_htlc_forwards(); @@ -1251,6 +1265,12 @@ fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[2], &nodes[1], &updates.commitment_signed, false, false); expect_and_process_pending_htlcs(&nodes[2], false); + // After an inbound HTLC is irrevocably forwarded, its onion should be pruned within the inbound + // edge channel. + assert_eq!( + nodes[1].node.test_get_inbound_committed_update_adds_count(nodes[0].node.get_our_node_id(), chan_id_1), + 0 + ); expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id()); let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; From 11b3f12deffacc5d0c47d4fe7904bc3e7a71c06a Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 6 Jan 2026 13:29:12 -0500 Subject: [PATCH 08/14] Pass logger into FundedChannel::read Used in the next commit when we log on some read errors. --- lightning/src/ln/channel.rs | 17 ++++++++++------- lightning/src/ln/channelmanager.rs | 1 + 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 4219a23a499..09685506b1e 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -15146,16 +15146,17 @@ where } } -impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, &'c ChannelTypeFeatures)> - for FundedChannel +impl<'a, 'b, 'c, 'd, ES: Deref, SP: Deref, L: Deref> + ReadableArgs<(&'a ES, &'b SP, &'c L, &'d ChannelTypeFeatures)> for FundedChannel where ES::Target: EntropySource, SP::Target: SignerProvider, + L::Target: Logger, { fn read( - reader: &mut R, args: (&'a ES, &'b SP, &'c ChannelTypeFeatures), + reader: &mut R, args: (&'a ES, &'b SP, &'c L, &'d ChannelTypeFeatures), ) -> Result { - let (entropy_source, signer_provider, our_supported_features) = args; + let (entropy_source, signer_provider, logger, our_supported_features) = args; let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); if ver <= 2 { return Err(DecodeError::UnknownVersion); @@ -16864,9 +16865,11 @@ mod tests { let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64); let features = channelmanager::provided_channel_type_features(&config); - let decoded_chan = - FundedChannel::read(&mut reader, (&&keys_provider, &&keys_provider, &features)) - .unwrap(); + let decoded_chan = FundedChannel::read( + &mut reader, + (&&keys_provider, &&keys_provider, &&logger, &features), + ) + .unwrap(); assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs); assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates); } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index fffac3228db..b1b35c2411d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17276,6 +17276,7 @@ where ( &args.entropy_source, &args.signer_provider, + &args.logger, &provided_channel_type_features(&args.config), ), )?; From 16d4bbd3ba39cf81432d547c99b91e11de19501a Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 6 Jan 2026 13:47:27 -0500 Subject: [PATCH 09/14] Remove deprecated InboundHTLCResolution::Resolved variant We already pretty much deprecated handling of these HTLCs already in 0.1, see the 0.1 CHANGELOG.md entry. But as of 4f055aca878ae990280d5b467d0e1faac5691aa0, we really no longer handle these deprecated HTLCs properly (see comment in Channel::revoke_and_ack). Therefore, remove support for them more explicitly here. --- lightning/src/ln/channel.rs | 90 ++++++++----------------------------- 1 file changed, 19 insertions(+), 71 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 09685506b1e..fcfca4ae937 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -50,10 +50,9 @@ use crate::ln::channel_state::{ OutboundHTLCDetails, OutboundHTLCStateDetails, }; use crate::ln::channelmanager::{ - self, ChannelReadyOrder, FundingConfirmedMessage, HTLCFailureMsg, HTLCPreviousHopData, - HTLCSource, OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, PendingHTLCStatus, - RAACommitmentOrder, SentHTLCId, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT, - MIN_CLTV_EXPIRY_DELTA, + self, ChannelReadyOrder, FundingConfirmedMessage, HTLCPreviousHopData, HTLCSource, + OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, RAACommitmentOrder, SentHTLCId, + BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, }; use crate::ln::funding::{FundingTxInput, SpliceContribution}; use crate::ln::interactivetxs::{ @@ -149,12 +148,6 @@ enum InboundHTLCRemovalReason { #[cfg_attr(test, derive(Debug))] #[derive(Clone)] enum InboundHTLCResolution { - /// Resolved implies the action we must take with the inbound HTLC has already been determined, - /// i.e., we already know whether it must be failed back or forwarded. - // - // TODO: Once this variant is removed, we should also clean up - // [`MonitorRestoreUpdates::accepted_htlcs`] as the path will be unreachable. - Resolved { pending_htlc_status: PendingHTLCStatus }, /// Pending implies we will attempt to resolve the inbound HTLC once it has been fully committed /// to by both sides of the channel, i.e., once a `revoke_and_ack` has been processed by both /// nodes for the state update in which it was proposed. @@ -162,9 +155,7 @@ enum InboundHTLCResolution { } impl_writeable_tlv_based_enum!(InboundHTLCResolution, - (0, Resolved) => { - (0, pending_htlc_status, required), - }, + // 0 used to be used for InboundHTLCResolution::Resolved in 0.2 and below. (2, Pending) => { (0, update_add_htlc, required), }, @@ -301,7 +292,6 @@ impl InboundHTLCState { InboundHTLCResolution::Pending { update_add_htlc } => { update_add_htlc.hold_htlc.is_some() }, - InboundHTLCResolution::Resolved { .. } => false, }, InboundHTLCState::Committed { .. } | InboundHTLCState::LocalRemoved(_) => false, } @@ -8731,8 +8721,6 @@ where let mut pending_update_adds = Vec::new(); let mut revoked_htlcs = Vec::new(); let mut finalized_claimed_htlcs = Vec::new(); - let mut update_fail_htlcs = Vec::new(); - let mut update_fail_malformed_htlcs = Vec::new(); let mut static_invoices = Vec::new(); let mut require_commitment = false; let mut value_to_self_msat_diff: i64 = 0; @@ -8807,43 +8795,6 @@ where state { match resolution { - InboundHTLCResolution::Resolved { pending_htlc_status } => { - match pending_htlc_status { - PendingHTLCStatus::Fail(fail_msg) => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash); - require_commitment = true; - match fail_msg { - HTLCFailureMsg::Relay(msg) => { - htlc.state = InboundHTLCState::LocalRemoved( - InboundHTLCRemovalReason::FailRelay( - msg.clone().into(), - ), - ); - update_fail_htlcs.push(msg) - }, - HTLCFailureMsg::Malformed(msg) => { - htlc.state = InboundHTLCState::LocalRemoved( - InboundHTLCRemovalReason::FailMalformed { - sha256_of_onion: msg.sha256_of_onion, - failure_code: msg.failure_code, - }, - ); - update_fail_malformed_htlcs.push(msg) - }, - } - }, - PendingHTLCStatus::Forward(forward_info) => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed, attempting to forward", &htlc.payment_hash); - to_forward_infos.push((forward_info, htlc.htlc_id)); - htlc.state = InboundHTLCState::Committed { - // HTLCs will only be in state `InboundHTLCResolution::Resolved` if they were - // received on an old pre-0.0.123 version of LDK. In this case, the HTLC is - // required to be resolved prior to upgrading to 0.1+ per CHANGELOG.md. - update_add_htlc_opt: None, - }; - }, - } - }, InboundHTLCResolution::Pending { update_add_htlc } => { log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash); pending_update_adds.push(update_add_htlc.clone()); @@ -8994,9 +8945,11 @@ where release_state_str ); if self.context.channel_state.can_generate_new_commitment() { - log_debug!(logger, "Responding with a commitment update with {} HTLCs failed for channel {}", - update_fail_htlcs.len() + update_fail_malformed_htlcs.len(), - &self.context.channel_id); + log_debug!( + logger, + "Responding with a commitment update for channel {}", + &self.context.channel_id + ); } else { debug_assert!(htlcs_to_fail.is_empty()); let reason = if self.context.channel_state.is_local_stfu_sent() { @@ -15191,8 +15144,9 @@ where let counterparty_next_commitment_transaction_number = Readable::read(reader)?; let value_to_self_msat = Readable::read(reader)?; - let pending_inbound_htlc_count: u64 = Readable::read(reader)?; + let logger = WithContext::from(logger, None, Some(channel_id), None); + let pending_inbound_htlc_count: u64 = Readable::read(reader)?; let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min( pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize, @@ -15205,23 +15159,17 @@ where payment_hash: Readable::read(reader)?, state: match ::read(reader)? { 1 => { - let resolution = if ver <= 3 { - InboundHTLCResolution::Resolved { - pending_htlc_status: Readable::read(reader)?, - } - } else { - Readable::read(reader)? - }; + let resolution = Readable::read(reader).map_err(|e| { + log_error!(logger, "Found deprecated HTLC received on LDK 0.0.123 or earlier. HTLC must be resolved before upgrading to LDK 0.3+, see CHANGELOG.md"); + e + })?; InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) }, 2 => { - let resolution = if ver <= 3 { - InboundHTLCResolution::Resolved { - pending_htlc_status: Readable::read(reader)?, - } - } else { - Readable::read(reader)? - }; + let resolution = Readable::read(reader).map_err(|e| { + log_error!(logger, "Found deprecated HTLC received on LDK 0.0.123 or earlier. HTLC must be resolved before upgrading to LDK 0.3+, see CHANGELOG.md"); + e + })?; InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) }, 3 => InboundHTLCState::Committed { update_add_htlc_opt: None }, From 4bb08cc2e58487ee2aaba4ebbfd989d11d1ec8ad Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 6 Jan 2026 15:19:33 -0500 Subject: [PATCH 10/14] Remove now-unused param from monitor_updating_paused In the previous commit, we removed the InboundHTLCResolution::Resolved enum variant, which caused us to never provide any HTLCs in this now-removed parameter. --- lightning/src/ln/channel.rs | 80 ++++--------------------------------- 1 file changed, 7 insertions(+), 73 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index fcfca4ae937..aa532b58f60 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -7494,7 +7494,6 @@ where false, Vec::new(), Vec::new(), - Vec::new(), logger, ); UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat } @@ -7932,15 +7931,7 @@ where &self.context.channel_id() ); - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), logger); self.context .interactive_tx_signing_session .as_mut() @@ -8048,15 +8039,7 @@ where .as_mut() .expect("Signing session must exist for negotiated pending splice") .received_commitment_signed(); - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), logger); Ok(self.push_ret_blockable_mon_update(monitor_update)) } @@ -8367,7 +8350,6 @@ where false, Vec::new(), Vec::new(), - Vec::new(), logger, ); return Ok(self.push_ret_blockable_mon_update(monitor_update)); @@ -8574,15 +8556,7 @@ where if update_fee.is_some() { "a fee update, " } else { "" }, update_add_count, update_fulfill_count, update_fail_count); - self.monitor_updating_paused( - false, - true, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), logger); (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail) } else { (None, Vec::new()) @@ -8717,7 +8691,6 @@ where } log_trace!(logger, "Updating HTLCs on receipt of RAA..."); - let mut to_forward_infos = Vec::new(); let mut pending_update_adds = Vec::new(); let mut revoked_htlcs = Vec::new(); let mut finalized_claimed_htlcs = Vec::new(); @@ -8918,7 +8891,6 @@ where false, true, false, - to_forward_infos, revoked_htlcs, finalized_claimed_htlcs, logger, @@ -8966,7 +8938,6 @@ where false, true, false, - to_forward_infos, revoked_htlcs, finalized_claimed_htlcs, logger, @@ -8980,7 +8951,6 @@ where false, false, false, - to_forward_infos, revoked_htlcs, finalized_claimed_htlcs, logger, @@ -9386,7 +9356,6 @@ where /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress fn monitor_updating_paused( &mut self, resend_raa: bool, resend_commitment: bool, resend_channel_ready: bool, - pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pending_finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, logger: &L, ) where @@ -9397,7 +9366,6 @@ where self.context.monitor_pending_revoke_and_ack |= resend_raa; self.context.monitor_pending_commitment_signed |= resend_commitment; self.context.monitor_pending_channel_ready |= resend_channel_ready; - self.context.monitor_pending_forwards.extend(pending_forwards); self.context.monitor_pending_failures.extend(pending_fails); self.context.monitor_pending_finalized_fulfills.extend(pending_finalized_claimed_htlcs); self.context.channel_state.set_monitor_update_in_progress(); @@ -10564,15 +10532,7 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), logger); self.push_ret_blockable_mon_update(monitor_update) } else { None @@ -11329,15 +11289,7 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), logger); let monitor_update = self.push_ret_blockable_mon_update(monitor_update); let announcement_sigs = @@ -13061,15 +13013,7 @@ where let can_add_htlc = send_res.map_err(|(_, msg)| ChannelError::Ignore(msg))?; if can_add_htlc { let monitor_update = self.build_commitment_no_status_check(logger); - self.monitor_updating_paused( - false, - true, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), logger); Ok(self.push_ret_blockable_mon_update(monitor_update)) } else { Ok(None) @@ -13192,15 +13136,7 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - &&logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), &&logger); self.push_ret_blockable_mon_update(monitor_update) } else { None @@ -13835,7 +13771,6 @@ where need_channel_ready, Vec::new(), Vec::new(), - Vec::new(), logger, ); Ok((channel, channel_monitor)) @@ -14157,7 +14092,6 @@ where need_channel_ready, Vec::new(), Vec::new(), - Vec::new(), logger, ); From 37e57dd2fb1d023b0aeef2e2826e50f063b1ae0e Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 6 Jan 2026 16:17:12 -0500 Subject: [PATCH 11/14] Simplify InboundHTLCStates' htlc resolution type Previously, several variants of InboundHTLCState contained an InboundHTLCResolution enum. Now that the resolution enum only has one variant, we can get rid of it and simplify the parent InboundHTLCState type. --- lightning/src/ln/channel.rs | 134 ++++++++++++++++++++---------------- 1 file changed, 74 insertions(+), 60 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index aa532b58f60..a89081c3d61 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -144,28 +144,11 @@ enum InboundHTLCRemovalReason { Fulfill { preimage: PaymentPreimage, attribution_data: Option }, } -/// Represents the resolution status of an inbound HTLC. -#[cfg_attr(test, derive(Debug))] -#[derive(Clone)] -enum InboundHTLCResolution { - /// Pending implies we will attempt to resolve the inbound HTLC once it has been fully committed - /// to by both sides of the channel, i.e., once a `revoke_and_ack` has been processed by both - /// nodes for the state update in which it was proposed. - Pending { update_add_htlc: msgs::UpdateAddHTLC }, -} - -impl_writeable_tlv_based_enum!(InboundHTLCResolution, - // 0 used to be used for InboundHTLCResolution::Resolved in 0.2 and below. - (2, Pending) => { - (0, update_add_htlc, required), - }, -); - #[cfg_attr(test, derive(Debug))] enum InboundHTLCState { /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an /// update_add_htlc message for this HTLC. - RemoteAnnounced(InboundHTLCResolution), + RemoteAnnounced(msgs::UpdateAddHTLC), /// Included in a received commitment_signed message (implying we've /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous /// state (see the example below). We have not yet included this HTLC in a @@ -195,13 +178,13 @@ enum InboundHTLCState { /// Implies AwaitingRemoteRevoke. /// /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md - AwaitingRemoteRevokeToAnnounce(InboundHTLCResolution), + AwaitingRemoteRevokeToAnnounce(msgs::UpdateAddHTLC), /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it). /// We have also included this HTLC in our latest commitment_signed and are now just waiting /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the /// channel (before it can then get forwarded and/or removed). /// Implies AwaitingRemoteRevoke. - AwaitingAnnouncedRemoteRevoke(InboundHTLCResolution), + AwaitingAnnouncedRemoteRevoke(msgs::UpdateAddHTLC), /// An HTLC irrevocably committed in the latest commitment transaction, ready to be forwarded or /// removed. Committed { @@ -286,12 +269,10 @@ impl InboundHTLCState { /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc fn should_hold_htlc(&self) -> bool { match self { - InboundHTLCState::RemoteAnnounced(res) - | InboundHTLCState::AwaitingRemoteRevokeToAnnounce(res) - | InboundHTLCState::AwaitingAnnouncedRemoteRevoke(res) => match res { - InboundHTLCResolution::Pending { update_add_htlc } => { - update_add_htlc.hold_htlc.is_some() - }, + InboundHTLCState::RemoteAnnounced(update_add_htlc) + | InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add_htlc) + | InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add_htlc) => { + update_add_htlc.hold_htlc.is_some() }, InboundHTLCState::Committed { .. } | InboundHTLCState::LocalRemoved(_) => false, } @@ -7763,9 +7744,7 @@ where amount_msat: msg.amount_msat, payment_hash: msg.payment_hash, cltv_expiry: msg.cltv_expiry, - state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Pending { - update_add_htlc: msg.clone(), - }), + state: InboundHTLCState::RemoteAnnounced(msg.clone()) }); Ok(()) } @@ -8244,11 +8223,10 @@ where } for htlc in self.context.pending_inbound_htlcs.iter_mut() { - if let &InboundHTLCState::RemoteAnnounced(ref htlc_resolution) = &htlc.state { + if let &InboundHTLCState::RemoteAnnounced(ref update_add) = &htlc.state { log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.", &htlc.payment_hash, &self.context.channel_id); - htlc.state = - InboundHTLCState::AwaitingRemoteRevokeToAnnounce(htlc_resolution.clone()); + htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add.clone()); need_commitment = true; } } @@ -8760,22 +8738,21 @@ where let mut state = InboundHTLCState::Committed { update_add_htlc_opt: None }; mem::swap(&mut state, &mut htlc.state); - if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) = state { + if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add) = state { log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash); - htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution); + htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add); require_commitment = true; - } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) = + } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add) = state { - match resolution { - InboundHTLCResolution::Pending { update_add_htlc } => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash); - pending_update_adds.push(update_add_htlc.clone()); - htlc.state = InboundHTLCState::Committed { - update_add_htlc_opt: Some(update_add_htlc), - }; - }, - } + log_trace!( + logger, + " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", + &htlc.payment_hash + ); + pending_update_adds.push(update_add.clone()); + htlc.state = + InboundHTLCState::Committed { update_add_htlc_opt: Some(update_add) }; } } } @@ -12778,10 +12755,10 @@ where // is acceptable. for htlc in self.context.pending_inbound_htlcs.iter_mut() { let new_state = - if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = + if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref update_add) = &htlc.state { - Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone())) + Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add.clone())) } else { None }; @@ -14553,6 +14530,41 @@ impl Readable for AnnouncementSigsState { } } +/// Represents the resolution status of an inbound HTLC. Previously we had multiple variants here, +/// now we only use it for backwards compatibility when (de)serializing [`InboundHTLCState`]s. +enum WriteableLegacyHTLCResolution<'a> { + Pending { update_add_htlc: &'a msgs::UpdateAddHTLC }, +} + +// We can't use `impl_writeable_tlv_based_enum` due to the lifetime. +impl<'a> Writeable for WriteableLegacyHTLCResolution<'a> { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + match self { + Self::Pending { update_add_htlc } => { + 2u8.write(writer)?; + crate::_encode_varint_length_prefixed_tlv!(writer, { + (0, update_add_htlc, required) + }); + }, + } + + Ok(()) + } +} + +/// Represents the resolution status of an inbound HTLC. Previously we had multiple variants here, +/// now we only use it for backwards compatibility when (de)serializing [`InboundHTLCState`]s. +enum ReadableLegacyHTLCResolution { + Pending { update_add_htlc: msgs::UpdateAddHTLC }, +} + +impl_writeable_tlv_based_enum!(ReadableLegacyHTLCResolution, + // 0 used to be used for the ::Resolved variant in 0.2 and below. + (2, Pending) => { + (0, update_add_htlc, required), + }, +); + impl Writeable for FundedChannel where SP::Target: SignerProvider, @@ -14640,13 +14652,13 @@ where htlc.payment_hash.write(writer)?; match &htlc.state { &InboundHTLCState::RemoteAnnounced(_) => unreachable!(), - &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution) => { + &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref update_add_htlc) => { 1u8.write(writer)?; - htlc_resolution.write(writer)?; + WriteableLegacyHTLCResolution::Pending { update_add_htlc }.write(writer)?; }, - &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => { + &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref update_add_htlc) => { 2u8.write(writer)?; - htlc_resolution.write(writer)?; + WriteableLegacyHTLCResolution::Pending { update_add_htlc }.write(writer)?; }, &InboundHTLCState::Committed { ref update_add_htlc_opt } => { 3u8.write(writer)?; @@ -15093,18 +15105,20 @@ where payment_hash: Readable::read(reader)?, state: match ::read(reader)? { 1 => { - let resolution = Readable::read(reader).map_err(|e| { - log_error!(logger, "Found deprecated HTLC received on LDK 0.0.123 or earlier. HTLC must be resolved before upgrading to LDK 0.3+, see CHANGELOG.md"); - e - })?; - InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) + let ReadableLegacyHTLCResolution::Pending { update_add_htlc } = + Readable::read(reader).map_err(|e| { + log_error!(logger, "Found deprecated HTLC received on LDK 0.0.123 or earlier. HTLC must be resolved before upgrading to LDK 0.3+, see CHANGELOG.md"); + e + })?; + InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add_htlc) }, 2 => { - let resolution = Readable::read(reader).map_err(|e| { - log_error!(logger, "Found deprecated HTLC received on LDK 0.0.123 or earlier. HTLC must be resolved before upgrading to LDK 0.3+, see CHANGELOG.md"); - e - })?; - InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) + let ReadableLegacyHTLCResolution::Pending { update_add_htlc } = + Readable::read(reader).map_err(|e| { + log_error!(logger, "Found deprecated HTLC received on LDK 0.0.123 or earlier. HTLC must be resolved before upgrading to LDK 0.3+, see CHANGELOG.md"); + e + })?; + InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add_htlc) }, 3 => InboundHTLCState::Committed { update_add_htlc_opt: None }, 4 => { From cc5759591d4cf65699b91f72a85e302d1e64a61f Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 6 Jan 2026 16:31:30 -0500 Subject: [PATCH 12/14] Delete now-unused ChannelContext::monitor_pending_forwards We stopped adding any HTLCs to this vec a few commits ago when we removed support for HTLCs that were originally received on LDK 0.0.123-. --- lightning/src/ln/channel.rs | 29 +++++++++-------------------- 1 file changed, 9 insertions(+), 20 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index a89081c3d61..79fba124a3a 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1122,6 +1122,7 @@ pub(super) struct MonitorRestoreUpdates { // A `CommitmentUpdate` to be sent to our channel peer. pub commitment_update: Option, pub commitment_order: RAACommitmentOrder, + // TODO: get rid of this pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>, pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pub finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, @@ -2895,7 +2896,6 @@ where // responsible for some of the HTLCs here or not - we don't know whether the update in question // completed or not. We currently ignore these fields entirely when force-closing a channel, // but need to handle this somehow or we run the risk of losing HTLCs! - monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>, monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, monitor_pending_finalized_fulfills: Vec<(HTLCSource, Option)>, monitor_pending_update_adds: Vec, @@ -3621,7 +3621,6 @@ where monitor_pending_channel_ready: false, monitor_pending_revoke_and_ack: false, monitor_pending_commitment_signed: false, - monitor_pending_forwards: Vec::new(), monitor_pending_failures: Vec::new(), monitor_pending_finalized_fulfills: Vec::new(), monitor_pending_update_adds: Vec::new(), @@ -3860,7 +3859,6 @@ where monitor_pending_channel_ready: false, monitor_pending_revoke_and_ack: false, monitor_pending_commitment_signed: false, - monitor_pending_forwards: Vec::new(), monitor_pending_failures: Vec::new(), monitor_pending_finalized_fulfills: Vec::new(), monitor_pending_update_adds: Vec::new(), @@ -9407,8 +9405,6 @@ where let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger); - let mut accepted_htlcs = Vec::new(); - mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards); let mut failed_htlcs = Vec::new(); mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures); let mut finalized_claimed_htlcs = Vec::new(); @@ -9429,7 +9425,7 @@ where self.context.monitor_pending_commitment_signed = false; return MonitorRestoreUpdates { raa: None, commitment_update: None, commitment_order: RAACommitmentOrder::RevokeAndACKFirst, - accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_update_adds, + accepted_htlcs: Vec::new(), failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures: None, channel_ready_order, committed_outbound_htlc_sources }; @@ -9460,7 +9456,7 @@ where if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, match commitment_order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); MonitorRestoreUpdates { - raa, commitment_update, commitment_order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, + raa, commitment_update, commitment_order, accepted_htlcs: Vec::new(), failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures: None, channel_ready_order, committed_outbound_htlc_sources } @@ -14828,11 +14824,8 @@ where self.context.monitor_pending_revoke_and_ack.write(writer)?; self.context.monitor_pending_commitment_signed.write(writer)?; - (self.context.monitor_pending_forwards.len() as u64).write(writer)?; - for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() { - pending_forward.write(writer)?; - htlc_id.write(writer)?; - } + // Previously used for monitor_pending_forwards prior to LDK 0.3. + 0u64.write(writer)?; (self.context.monitor_pending_failures.len() as u64).write(writer)?; for &(ref htlc_source, ref payment_hash, ref fail_reason) in @@ -15250,13 +15243,10 @@ where let monitor_pending_revoke_and_ack = Readable::read(reader)?; let monitor_pending_commitment_signed = Readable::read(reader)?; - let monitor_pending_forwards_count: u64 = Readable::read(reader)?; - let mut monitor_pending_forwards = Vec::with_capacity(cmp::min( - monitor_pending_forwards_count as usize, - DEFAULT_MAX_HTLCS as usize, - )); - for _ in 0..monitor_pending_forwards_count { - monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?)); + let monitor_pending_forwards_count_legacy: u64 = Readable::read(reader)?; + if monitor_pending_forwards_count_legacy != 0 { + log_error!(logger, "Found deprecated HTLC received on LDK 0.0.123 or earlier. HTLC must be resolved before upgrading to LDK 0.3+, see CHANGELOG.md"); + return Err(DecodeError::InvalidValue); } let monitor_pending_failures_count: u64 = Readable::read(reader)?; @@ -15830,7 +15820,6 @@ where monitor_pending_channel_ready, monitor_pending_revoke_and_ack, monitor_pending_commitment_signed, - monitor_pending_forwards, monitor_pending_failures, monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(), monitor_pending_update_adds: monitor_pending_update_adds.unwrap_or_default(), From 7f5b830edb055a9cffaf0557f9576cd688bc4ea8 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 6 Jan 2026 16:40:27 -0500 Subject: [PATCH 13/14] Delete now-unused MonitorRestoreUpdates::accepted_htlcs We stopped adding any HTLCs to this vec a few commits ago when we removed support for HTLCs that were originally received on LDK 0.0.123-. --- lightning/src/ln/channel.rs | 18 +++++++--------- lightning/src/ln/channelmanager.rs | 34 ++++++++---------------------- 2 files changed, 17 insertions(+), 35 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 79fba124a3a..0eb6120ba6a 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -51,8 +51,8 @@ use crate::ln::channel_state::{ }; use crate::ln::channelmanager::{ self, ChannelReadyOrder, FundingConfirmedMessage, HTLCPreviousHopData, HTLCSource, - OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, RAACommitmentOrder, SentHTLCId, - BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, + OpenChannelMessage, PaymentClaimDetails, RAACommitmentOrder, SentHTLCId, BREAKDOWN_TIMEOUT, + MAX_LOCAL_BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, }; use crate::ln::funding::{FundingTxInput, SpliceContribution}; use crate::ln::interactivetxs::{ @@ -1122,8 +1122,6 @@ pub(super) struct MonitorRestoreUpdates { // A `CommitmentUpdate` to be sent to our channel peer. pub commitment_update: Option, pub commitment_order: RAACommitmentOrder, - // TODO: get rid of this - pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>, pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pub finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, // Inbound update_adds that are now irrevocably committed to this channel and are ready for the @@ -9425,9 +9423,9 @@ where self.context.monitor_pending_commitment_signed = false; return MonitorRestoreUpdates { raa: None, commitment_update: None, commitment_order: RAACommitmentOrder::RevokeAndACKFirst, - accepted_htlcs: Vec::new(), failed_htlcs, finalized_claimed_htlcs, pending_update_adds, - funding_broadcastable, channel_ready, announcement_sigs, tx_signatures: None, - channel_ready_order, committed_outbound_htlc_sources + failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, + channel_ready, announcement_sigs, tx_signatures: None, channel_ready_order, + committed_outbound_htlc_sources }; } @@ -9456,9 +9454,9 @@ where if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, match commitment_order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); MonitorRestoreUpdates { - raa, commitment_update, commitment_order, accepted_htlcs: Vec::new(), failed_htlcs, finalized_claimed_htlcs, - pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures: None, - channel_ready_order, committed_outbound_htlc_sources + raa, commitment_update, commitment_order, failed_htlcs, finalized_claimed_htlcs, + pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, + tx_signatures: None, channel_ready_order, committed_outbound_htlc_sources } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b1b35c2411d..f72e0dec9dc 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3332,13 +3332,12 @@ macro_rules! handle_monitor_update_completion { None }; - let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption( + let decode_update_add_htlcs = $self.handle_channel_resumption( &mut $peer_state.pending_msg_events, $chan, updates.raa, updates.commitment_update, updates.commitment_order, - updates.accepted_htlcs, updates.pending_update_adds, updates.funding_broadcastable, updates.channel_ready, @@ -3361,7 +3360,6 @@ macro_rules! handle_monitor_update_completion { cp_node_id, unbroadcasted_batch_funding_txid, update_actions, - htlc_forwards, decode_update_add_htlcs, updates.finalized_claimed_htlcs, updates.failed_htlcs, @@ -9555,7 +9553,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ &self, channel_id: ChannelId, counterparty_node_id: PublicKey, unbroadcasted_batch_funding_txid: Option, update_actions: Vec, - htlc_forwards: Option, decode_update_add_htlcs: Option<(u64, Vec)>, finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, @@ -9611,9 +9608,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.handle_monitor_update_completion_actions(update_actions); - if let Some(forwards) = htlc_forwards { - self.forward_htlcs(&mut [forwards][..]); - } if let Some(decode) = decode_update_add_htlcs { self.push_decode_update_add_htlcs(decode); } @@ -9826,17 +9820,16 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn handle_channel_resumption(&self, pending_msg_events: &mut Vec, channel: &mut FundedChannel, raa: Option, commitment_update: Option, commitment_order: RAACommitmentOrder, - pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec, - funding_broadcastable: Option, + pending_update_adds: Vec, funding_broadcastable: Option, channel_ready: Option, announcement_sigs: Option, tx_signatures: Option, tx_abort: Option, channel_ready_order: ChannelReadyOrder, - ) -> (Option<(u64, PublicKey, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec)>) { + ) -> Option<(u64, Vec)> { let logger = WithChannelContext::from(&self.logger, &channel.context, None); - log_trace!(logger, "Handling channel resumption with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort", + log_trace!(logger, "Handling channel resumption with {} RAA, {} commitment update, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort", if raa.is_some() { "an" } else { "no" }, if commitment_update.is_some() { "a" } else { "no" }, - pending_forwards.len(), pending_update_adds.len(), + pending_update_adds.len(), if funding_broadcastable.is_some() { "" } else { "not " }, if channel_ready.is_some() { "sending" } else { "without" }, if announcement_sigs.is_some() { "sending" } else { "without" }, @@ -9847,14 +9840,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let counterparty_node_id = channel.context.get_counterparty_node_id(); let outbound_scid_alias = channel.context.outbound_scid_alias(); - let mut htlc_forwards = None; - if !pending_forwards.is_empty() { - htlc_forwards = Some(( - outbound_scid_alias, channel.context.get_counterparty_node_id(), - channel.funding.get_funding_txo().unwrap(), channel.context.channel_id(), - channel.context.get_user_id(), pending_forwards - )); - } let mut decode_update_add_htlcs = None; if !pending_update_adds.is_empty() { decode_update_add_htlcs = Some((outbound_scid_alias, pending_update_adds)); @@ -9942,7 +9927,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, None => { debug_assert!(false, "Channel resumed without a funding txo, this should never happen!"); - return (htlc_forwards, decode_update_add_htlcs); + return decode_update_add_htlcs; } }; } else { @@ -10030,7 +10015,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ emit_initial_channel_ready_event!(pending_events, channel); } - (htlc_forwards, decode_update_add_htlcs) + decode_update_add_htlcs } #[rustfmt::skip] @@ -12114,12 +12099,11 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take(); - let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption( + let decode_update_add_htlcs = self.handle_channel_resumption( &mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.commitment_order, - Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs, + Vec::new(), None, responses.channel_ready, responses.announcement_sigs, responses.tx_signatures, responses.tx_abort, responses.channel_ready_order, ); - debug_assert!(htlc_forwards.is_none()); debug_assert!(decode_update_add_htlcs.is_none()); if let Some(upd) = channel_update { peer_state.pending_msg_events.push(upd); From e2ea1edce5061e6f86fb51a9ad5b7df82515008f Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 7 Jan 2026 16:50:37 -0500 Subject: [PATCH 14/14] Delete now-unused PendingHTLCStatus We stopped using this struct a few commits ago when we removed support for HTLCs that were originally received on LDK 0.0.123-. --- lightning/src/ln/channelmanager.rs | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f72e0dec9dc..2dafadf3540 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -189,23 +189,6 @@ pub use crate::ln::outbound_payment::{ }; use crate::ln::script::ShutdownScript; -// We hold various information about HTLC relay in the HTLC objects in Channel itself: -// -// Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should -// forward the HTLC with information it will give back to us when it does so, or if it should Fail -// the HTLC with the relevant message for the Channel to handle giving to the remote peer. -// -// Once said HTLC is committed in the Channel, if the PendingHTLCStatus indicated Forward, the -// Channel will return the PendingHTLCInfo back to us, and we will create an HTLCForwardInfo -// with it to track where it came from (in case of onwards-forward error), waiting a random delay -// before we forward it. -// -// We will then use HTLCForwardInfo's PendingHTLCInfo to construct an outbound HTLC, with a -// relevant HTLCSource::PreviousHopData filled in to indicate where it came from (which we can use -// to either fail-backwards or fulfill the HTLC backwards along the relevant path). -// Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is -// our payment, which we can use to decode errors or inform the user that the payment was sent. - /// Information about where a received HTLC('s onion) has indicated the HTLC should go. #[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug #[cfg_attr(test, derive(Debug, PartialEq))] @@ -435,14 +418,6 @@ pub(super) enum HTLCFailureMsg { Malformed(msgs::UpdateFailMalformedHTLC), } -/// Stores whether we can't forward an HTLC or relevant forwarding info -#[cfg_attr(test, derive(Debug))] -#[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug -pub(super) enum PendingHTLCStatus { - Forward(PendingHTLCInfo), - Fail(HTLCFailureMsg), -} - #[cfg_attr(test, derive(Clone, Debug, PartialEq))] pub(super) struct PendingAddHTLCInfo { pub(super) forward_info: PendingHTLCInfo, @@ -16359,11 +16334,6 @@ impl Readable for HTLCFailureMsg { } } -impl_writeable_tlv_based_enum_legacy!(PendingHTLCStatus, ; - (0, Forward), - (1, Fail), -); - impl_writeable_tlv_based_enum!(BlindedFailure, (0, FromIntroductionNode) => {}, (2, FromBlindedNode) => {},