diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 5a0c37bd61d..b421114e911 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -3519,8 +3519,9 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode .node .handle_commitment_signed_batch_test(node_a_id, &as_htlc_fulfill.commitment_signed); check_added_monitors(&nodes[1], 1); - let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); + let (a, raa, holding_cell) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); assert!(a.is_none()); + assert!(holding_cell.is_empty()); nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); check_added_monitors(&nodes[1], 1); @@ -3938,7 +3939,12 @@ fn do_test_durable_preimages_on_closed_channel( let evs = nodes[1].node.get_and_clear_pending_events(); assert_eq!(evs.len(), if close_chans_before_reload { 2 } else { 1 }); for ev in evs { - if let Event::PaymentForwarded { .. } = ev { + if let Event::PaymentForwarded { claim_from_onchain_tx, next_user_channel_id, .. } = ev { + if !claim_from_onchain_tx { + // If the outbound channel is still open, the `next_user_channel_id` should be available. + // This was previously broken. + assert!(next_user_channel_id.is_some()) + } } else { panic!(); } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 3236ebdefed..4a0d1175b8e 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -50,10 +50,10 @@ use crate::ln::channel_state::{ OutboundHTLCDetails, OutboundHTLCStateDetails, }; use crate::ln::channelmanager::{ - self, ChannelReadyOrder, FundingConfirmedMessage, HTLCFailureMsg, HTLCPreviousHopData, - HTLCSource, OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, PendingHTLCStatus, - RAACommitmentOrder, SentHTLCId, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT, - MIN_CLTV_EXPIRY_DELTA, + self, BlindedFailure, ChannelReadyOrder, FundingConfirmedMessage, HTLCFailureMsg, + HTLCPreviousHopData, HTLCSource, OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, + PendingHTLCStatus, RAACommitmentOrder, SentHTLCId, BREAKDOWN_TIMEOUT, + MAX_LOCAL_BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, }; use crate::ln::funding::{FundingTxInput, SpliceContribution}; use crate::ln::interactivetxs::{ @@ -308,6 +308,32 @@ impl InboundHTLCState { } } +/// Information about the outbound hop for a forwarded HTLC. Useful for generating an accurate +/// [`Event::PaymentForwarded`] if we need to claim this HTLC post-restart. +/// +/// [`Event::PaymentForwarded`]: crate::events::Event::PaymentForwarded +#[derive(Debug, Copy, Clone)] +pub(super) struct OutboundHop { + /// The amount forwarded outbound. + pub(super) amt_msat: u64, + /// The outbound channel this HTLC was forwarded over. + pub(super) channel_id: ChannelId, + /// The next-hop recipient of this HTLC. + pub(super) node_id: PublicKey, + /// The outbound channel's funding outpoint. + pub(super) funding_txo: OutPoint, + /// The outbound channel's user channel ID. + pub(super) user_channel_id: u128, +} + +impl_writeable_tlv_based!(OutboundHop, { + (0, amt_msat, required), + (2, channel_id, required), + (4, node_id, required), + (6, funding_txo, required), + (8, user_channel_id, required), +}); + /// A field of `InboundHTLCState::Committed` containing the HTLC's `update_add_htlc` message. If /// the HTLC is a forward and gets irrevocably committed to the outbound edge, we convert to /// `InboundUpdateAdd::Forwarded`, thus pruning the onion and not persisting it on every @@ -315,20 +341,20 @@ impl InboundHTLCState { /// /// Useful for reconstructing the pending HTLC set on startup. #[derive(Debug, Clone)] -pub(super) enum InboundUpdateAdd { +enum InboundUpdateAdd { /// The inbound committed HTLC's update_add_htlc message. WithOnion { update_add_htlc: msgs::UpdateAddHTLC }, /// This inbound HTLC is a forward that was irrevocably committed to the outbound edge, allowing /// its onion to be pruned and no longer persisted. + /// + /// Contains data that is useful if we need to fail or claim this HTLC backwards after a restart + /// and it's missing in the outbound edge. Forwarded { - /// Useful if we need to fail or claim this HTLC backwards after restart, if it's missing in the - /// outbound edge. - hop_data: HTLCPreviousHopData, - /// Useful if we need to claim this HTLC backwards after a restart and it's missing in the - /// outbound edge, to generate an accurate [`Event::PaymentForwarded`]. - /// - /// [`Event::PaymentForwarded`]: crate::events::Event::PaymentForwarded - outbound_amt_msat: u64, + incoming_packet_shared_secret: [u8; 32], + phantom_shared_secret: Option<[u8; 32]>, + trampoline_shared_secret: Option<[u8; 32]>, + blinded_failure: Option, + outbound_hop: OutboundHop, }, /// This HTLC was received pre-LDK 0.3, before we started persisting the onion for inbound /// committed HTLCs. @@ -341,8 +367,11 @@ impl_writeable_tlv_based_enum_upgradable!(InboundUpdateAdd, }, (2, Legacy) => {}, (4, Forwarded) => { - (0, hop_data, required), - (2, outbound_amt_msat, required), + (0, incoming_packet_shared_secret, required), + (2, outbound_hop, required), + (4, phantom_shared_secret, option), + (6, trampoline_shared_secret, option), + (8, blinded_failure, option), }, ); @@ -7885,10 +7914,35 @@ where Ok(()) } - /// Useful for reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. - pub(super) fn inbound_committed_unresolved_htlcs( + /// Returns true if any committed inbound HTLCs were received pre-LDK 0.3 and cannot be used + /// during `ChannelManager` deserialization to reconstruct the set of pending HTLCs. + pub(super) fn has_legacy_inbound_htlcs(&self) -> bool { + self.context.pending_inbound_htlcs.iter().any(|htlc| { + matches!( + &htlc.state, + InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy } + ) + }) + } + + /// Returns committed inbound HTLCs whose onion has not yet been decoded and processed. Useful + /// for reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. + pub(super) fn inbound_htlcs_pending_decode( + &self, + ) -> impl Iterator + '_ { + self.context.pending_inbound_htlcs.iter().filter_map(|htlc| match &htlc.state { + InboundHTLCState::Committed { + update_add_htlc: InboundUpdateAdd::WithOnion { update_add_htlc }, + } => Some(update_add_htlc.clone()), + _ => None, + }) + } + + /// Returns committed inbound HTLCs that have been forwarded but not yet fully resolved. Useful + /// when reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. + pub(super) fn inbound_forwarded_htlcs( &self, - ) -> Vec<(PaymentHash, InboundUpdateAdd)> { + ) -> impl Iterator + '_ { // We don't want to return an HTLC as needing processing if it already has a resolution that's // pending in the holding cell. let htlc_resolution_in_holding_cell = |id: u64| -> bool { @@ -7902,19 +7956,45 @@ where }) }; - self.context - .pending_inbound_htlcs - .iter() - .filter_map(|htlc| match &htlc.state { - InboundHTLCState::Committed { update_add_htlc } => { - if htlc_resolution_in_holding_cell(htlc.htlc_id) { - return None; - } - Some((htlc.payment_hash, update_add_htlc.clone())) - }, - _ => None, - }) - .collect() + let prev_outbound_scid_alias = self.context.outbound_scid_alias(); + let user_channel_id = self.context.get_user_id(); + let channel_id = self.context.channel_id(); + let outpoint = self.funding_outpoint(); + let counterparty_node_id = self.context.get_counterparty_node_id(); + + self.context.pending_inbound_htlcs.iter().filter_map(move |htlc| match &htlc.state { + InboundHTLCState::Committed { + update_add_htlc: + InboundUpdateAdd::Forwarded { + incoming_packet_shared_secret, + phantom_shared_secret, + trampoline_shared_secret, + blinded_failure, + outbound_hop, + }, + } => { + if htlc_resolution_in_holding_cell(htlc.htlc_id) { + return None; + } + // The reconstructed `HTLCPreviousHopData` is used to fail or claim the HTLC backwards + // post-restart, if it is missing in the outbound edge. + let prev_hop_data = HTLCPreviousHopData { + prev_outbound_scid_alias, + user_channel_id: Some(user_channel_id), + htlc_id: htlc.htlc_id, + incoming_packet_shared_secret: *incoming_packet_shared_secret, + phantom_shared_secret: *phantom_shared_secret, + trampoline_shared_secret: *trampoline_shared_secret, + blinded_failure: *blinded_failure, + channel_id, + outpoint, + counterparty_node_id: Some(counterparty_node_id), + cltv_expiry: Some(htlc.cltv_expiry), + }; + Some((htlc.payment_hash, prev_hop_data, *outbound_hop)) + }, + _ => None, + }) } /// Useful when reconstructing the set of pending HTLC forwards when deserializing the @@ -7961,12 +8041,19 @@ where /// This inbound HTLC was irrevocably forwarded to the outbound edge, so we no longer need to /// persist its onion. pub(super) fn prune_inbound_htlc_onion( - &mut self, htlc_id: u64, hop_data: HTLCPreviousHopData, outbound_amt_msat: u64, + &mut self, htlc_id: u64, prev_hop_data: &HTLCPreviousHopData, + outbound_hop_data: OutboundHop, ) { for htlc in self.context.pending_inbound_htlcs.iter_mut() { if htlc.htlc_id == htlc_id { if let InboundHTLCState::Committed { ref mut update_add_htlc } = htlc.state { - *update_add_htlc = InboundUpdateAdd::Forwarded { hop_data, outbound_amt_msat }; + *update_add_htlc = InboundUpdateAdd::Forwarded { + incoming_packet_shared_secret: prev_hop_data.incoming_packet_shared_secret, + phantom_shared_secret: prev_hop_data.phantom_shared_secret, + trampoline_shared_secret: prev_hop_data.trampoline_shared_secret, + blinded_failure: prev_hop_data.blinded_failure, + outbound_hop: outbound_hop_data, + }; return; } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e840d705b8e..5a4f569d879 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -59,7 +59,7 @@ use crate::ln::chan_utils::selected_commitment_sat_per_1000_weight; use crate::ln::channel::QuiescentAction; use crate::ln::channel::{ self, hold_time_since, Channel, ChannelError, ChannelUpdateStatus, DisconnectResult, - FundedChannel, FundingTxSigned, InboundUpdateAdd, InboundV1Channel, OutboundV1Channel, + FundedChannel, FundingTxSigned, InboundV1Channel, OutboundHop, OutboundV1Channel, PendingV2Channel, ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, UpdateFulfillCommitFetch, WithChannelContext, }; @@ -1402,6 +1402,8 @@ enum PostMonitorUpdateChanResume { Unblocked { channel_id: ChannelId, counterparty_node_id: PublicKey, + funding_txo: OutPoint, + user_channel_id: u128, unbroadcasted_batch_funding_txid: Option, update_actions: Vec, htlc_forwards: Option, @@ -9582,8 +9584,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// Handles actions which need to complete after a [`ChannelMonitorUpdate`] has been applied /// which can happen after the per-peer state lock has been dropped. fn post_monitor_update_unlock( - &self, channel_id: ChannelId, counterparty_node_id: PublicKey, - unbroadcasted_batch_funding_txid: Option, + &self, channel_id: ChannelId, counterparty_node_id: PublicKey, funding_txo: OutPoint, + user_channel_id: u128, unbroadcasted_batch_funding_txid: Option, update_actions: Vec, htlc_forwards: Option, decode_update_add_htlcs: Option<(u64, Vec)>, @@ -9660,7 +9662,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }; self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver, None); } - self.prune_persisted_inbound_htlc_onions(committed_outbound_htlc_sources); + self.prune_persisted_inbound_htlc_onions( + channel_id, + counterparty_node_id, + funding_txo, + user_channel_id, + committed_outbound_htlc_sources, + ); } fn handle_monitor_update_completion_actions< @@ -10129,6 +10137,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ PostMonitorUpdateChanResume::Unblocked { channel_id: chan_id, counterparty_node_id, + funding_txo: chan.funding_outpoint(), + user_channel_id: chan.context.get_user_id(), unbroadcasted_batch_funding_txid, update_actions, htlc_forwards, @@ -10144,7 +10154,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// HTLC set on `ChannelManager` read. If an HTLC has been irrevocably forwarded to the outbound /// edge, we no longer need to persist the inbound edge's onion and can prune it here. fn prune_persisted_inbound_htlc_onions( - &self, committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, + &self, outbound_channel_id: ChannelId, outbound_node_id: PublicKey, + outbound_funding_txo: OutPoint, outbound_user_channel_id: u128, + committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, ) { let per_peer_state = self.per_peer_state.read().unwrap(); for (source, outbound_amt_msat) in committed_outbound_htlc_sources { @@ -10161,7 +10173,17 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(chan) = peer_state.channel_by_id.get_mut(&source.channel_id).and_then(|c| c.as_funded_mut()) { - chan.prune_inbound_htlc_onion(source.htlc_id, source, outbound_amt_msat); + chan.prune_inbound_htlc_onion( + source.htlc_id, + &source, + OutboundHop { + amt_msat: outbound_amt_msat, + channel_id: outbound_channel_id, + node_id: outbound_node_id, + funding_txo: outbound_funding_txo, + user_channel_id: outbound_user_channel_id, + }, + ); } } } @@ -10185,10 +10207,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state = per_peer_state.get(&cp_id).map(|state| state.lock().unwrap()).unwrap(); let chan = peer_state.channel_by_id.get(&chan_id).and_then(|c| c.as_funded()).unwrap(); - chan.inbound_committed_unresolved_htlcs() - .iter() - .filter(|(_, htlc)| matches!(htlc, InboundUpdateAdd::WithOnion { .. })) - .count() + chan.inbound_htlcs_pending_decode().count() } #[cfg(test)] @@ -10220,6 +10239,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ PostMonitorUpdateChanResume::Unblocked { channel_id, counterparty_node_id, + funding_txo, + user_channel_id, unbroadcasted_batch_funding_txid, update_actions, htlc_forwards, @@ -10231,6 +10252,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.post_monitor_update_unlock( channel_id, counterparty_node_id, + funding_txo, + user_channel_id, unbroadcasted_batch_funding_txid, update_actions, htlc_forwards, @@ -16593,7 +16616,7 @@ const MIN_SERIALIZATION_VERSION: u8 = 1; // // If 0.3 or 0.4 reads this manager version, it knows that the legacy maps were not written and // acts accordingly. -const RECONSTRUCT_HTLCS_FROM_CHANS_VERSION: u8 = 5; +const RECONSTRUCT_HTLCS_FROM_CHANS_VERSION: u8 = 2; impl_writeable_tlv_based!(PhantomRouteHints, { (2, channels, required_vec), @@ -18587,11 +18610,11 @@ impl< // that it is handled. let mut already_forwarded_htlcs: HashMap< (ChannelId, PaymentHash), - Vec<(HTLCPreviousHopData, u64)>, + Vec<(HTLCPreviousHopData, OutboundHop)>, > = new_hash_map(); let prune_forwarded_htlc = |already_forwarded_htlcs: &mut HashMap< (ChannelId, PaymentHash), - Vec<(HTLCPreviousHopData, u64)>, + Vec<(HTLCPreviousHopData, OutboundHop)>, >, prev_hop: &HTLCPreviousHopData, payment_hash: &PaymentHash| { @@ -18626,33 +18649,27 @@ impl< if reconstruct_manager_from_monitors { if let Some(chan) = peer_state.channel_by_id.get(channel_id) { if let Some(funded_chan) = chan.as_funded() { + // Legacy HTLCs are from pre-LDK 0.3 and cannot be reconstructed. + if funded_chan.has_legacy_inbound_htlcs() { + return Err(DecodeError::InvalidValue); + } + // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized + // `Channel` as part of removing the requirement to regularly persist the + // `ChannelManager`. let scid_alias = funded_chan.context.outbound_scid_alias(); - let inbound_committed_update_adds = - funded_chan.inbound_committed_unresolved_htlcs(); - for (payment_hash, htlc) in inbound_committed_update_adds { - match htlc { - InboundUpdateAdd::WithOnion { update_add_htlc } => { - // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized - // `Channel` as part of removing the requirement to regularly persist the - // `ChannelManager`. - decode_update_add_htlcs - .entry(scid_alias) - .or_insert_with(Vec::new) - .push(update_add_htlc); - }, - InboundUpdateAdd::Forwarded { - hop_data, - outbound_amt_msat, - } => { - already_forwarded_htlcs - .entry((hop_data.channel_id, payment_hash)) - .or_insert_with(Vec::new) - .push((hop_data, outbound_amt_msat)); - }, - InboundUpdateAdd::Legacy => { - return Err(DecodeError::InvalidValue) - }, - } + for update_add_htlc in funded_chan.inbound_htlcs_pending_decode() { + decode_update_add_htlcs + .entry(scid_alias) + .or_insert_with(Vec::new) + .push(update_add_htlc); + } + for (payment_hash, prev_hop, next_hop) in + funded_chan.inbound_forwarded_htlcs() + { + already_forwarded_htlcs + .entry((prev_hop.channel_id, payment_hash)) + .or_insert_with(Vec::new) + .push((prev_hop, next_hop)); } } } @@ -18690,14 +18707,16 @@ impl< } } for (channel_id, monitor) in args.channel_monitors.iter() { - let mut is_channel_closed = true; + let (mut is_channel_closed, mut user_channel_id_opt) = (true, None); let counterparty_node_id = monitor.get_counterparty_node_id(); if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; - is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); - if reconstruct_manager_from_monitors && !is_channel_closed { - if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + is_channel_closed = false; + user_channel_id_opt = Some(chan.context().get_user_id()); + + if reconstruct_manager_from_monitors { if let Some(funded_chan) = chan.as_funded() { for (payment_hash, prev_hop) in funded_chan.outbound_htlc_forwards() { @@ -18997,7 +19016,7 @@ impl< Some((htlc_source, payment_preimage, htlc.amount_msat, is_channel_closed, monitor.get_counterparty_node_id(), - monitor.get_funding_txo(), monitor.channel_id())) + monitor.get_funding_txo(), monitor.channel_id(), user_channel_id_opt)) } else { None } } else { // If it was an outbound payment, we've handled it above - if a preimage @@ -19361,33 +19380,33 @@ impl< if let Some(forwarded_htlcs) = already_forwarded_htlcs.remove(&(*channel_id, payment_hash)) { - for (hop_data, outbound_amt_msat) in forwarded_htlcs { + for (prev_hop, next_hop) in forwarded_htlcs { let new_pending_claim = - !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _)| { - matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == hop_data.htlc_id && hop.channel_id == hop_data.channel_id) + !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _, _)| { + matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == prev_hop.htlc_id && hop.channel_id == prev_hop.channel_id) }); if new_pending_claim { - let counterparty_node_id = monitor.get_counterparty_node_id(); - let is_channel_closed = channel_manager + let is_downstream_closed = channel_manager .per_peer_state .read() .unwrap() - .get(&counterparty_node_id) + .get(&next_hop.node_id) .map_or(true, |peer_state_mtx| { !peer_state_mtx .lock() .unwrap() .channel_by_id - .contains_key(channel_id) + .contains_key(&next_hop.channel_id) }); pending_claims_to_replay.push(( - HTLCSource::PreviousHopData(hop_data), + HTLCSource::PreviousHopData(prev_hop), payment_preimage, - outbound_amt_msat, - is_channel_closed, - counterparty_node_id, - monitor.get_funding_txo(), - *channel_id, + next_hop.amt_msat, + is_downstream_closed, + next_hop.node_id, + next_hop.funding_txo, + next_hop.channel_id, + Some(next_hop.user_channel_id), )); } } @@ -19657,6 +19676,7 @@ impl< downstream_node_id, downstream_funding, downstream_channel_id, + downstream_user_channel_id, ) in pending_claims_to_replay { // We use `downstream_closed` in place of `from_onchain` here just as a guess - we @@ -19672,7 +19692,7 @@ impl< downstream_node_id, downstream_funding, downstream_channel_id, - None, + downstream_user_channel_id, None, None, ); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index a5461154a02..d3902b26201 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1383,7 +1383,7 @@ macro_rules! _reload_node_inner { ); $node.chain_monitor = &$new_chain_monitor; - $new_channelmanager = _reload_node( + $new_channelmanager = $crate::ln::functional_test_utils::_reload_node( &$node, $new_config, &chanman_encoded, @@ -1401,7 +1401,7 @@ macro_rules! reload_node { // Reload the node using the node's current config ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { let config = $node.node.get_current_config(); - _reload_node_inner!( + $crate::_reload_node_inner!( $node, config, $chanman_encoded, @@ -1414,7 +1414,7 @@ macro_rules! reload_node { }; // Reload the node with the new provided config ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { - _reload_node_inner!( + $crate::_reload_node_inner!( $node, $new_config, $chanman_encoded, @@ -1431,7 +1431,7 @@ macro_rules! reload_node { ident, $new_chain_monitor: ident, $new_channelmanager: ident, $reconstruct_pending_htlcs: expr ) => { let config = $node.node.get_current_config(); - _reload_node_inner!( + $crate::_reload_node_inner!( $node, config, $chanman_encoded, @@ -2672,20 +2672,23 @@ pub fn commitment_signed_dance_through_cp_raa( node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, fail_backwards: bool, includes_claim: bool, ) -> Option { - let (extra_msg_option, bs_revoke_and_ack) = + let (extra_msg_option, bs_revoke_and_ack, node_b_holding_cell_htlcs) = do_main_commitment_signed_dance(node_a, node_b, fail_backwards); + assert!(node_b_holding_cell_htlcs.is_empty()); node_a.node.handle_revoke_and_ack(node_b.node.get_our_node_id(), &bs_revoke_and_ack); check_added_monitors(node_a, if includes_claim { 0 } else { 1 }); extra_msg_option } /// Does the main logic in the commitment_signed dance. After the first `commitment_signed` has -/// been delivered, this method picks up and delivers the response `revoke_and_ack` and -/// `commitment_signed`, returning the recipient's `revoke_and_ack` and any extra message it may -/// have included. +/// been delivered, delivers the response `revoke_and_ack` and `commitment_signed`, and returns: +/// - The recipient's `revoke_and_ack` +/// - The recipient's extra message (if any) after handling the commitment_signed +/// - Any messages released from the initiator's holding cell after handling the `revoke_and_ack` +/// (e.g., a second HTLC on the same channel) pub fn do_main_commitment_signed_dance( node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, fail_backwards: bool, -) -> (Option, msgs::RevokeAndACK) { +) -> (Option, msgs::RevokeAndACK, Vec) { let node_a_id = node_a.node.get_our_node_id(); let node_b_id = node_b.node.get_our_node_id(); @@ -2693,7 +2696,9 @@ pub fn do_main_commitment_signed_dance( check_added_monitors(&node_b, 0); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); - assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); + // Handling the RAA may release HTLCs from node_b's holding cell (e.g., if multiple HTLCs + // were sent over the same channel and the second was queued behind the first). + let node_b_holding_cell_htlcs = node_b.node.get_and_clear_pending_msg_events(); check_added_monitors(&node_b, 1); node_b.node.handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed); let (bs_revoke_and_ack, extra_msg_option) = { @@ -2716,7 +2721,7 @@ pub fn do_main_commitment_signed_dance( assert!(node_a.node.get_and_clear_pending_events().is_empty()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); } - (extra_msg_option, bs_revoke_and_ack) + (extra_msg_option, bs_revoke_and_ack, node_b_holding_cell_htlcs) } /// Runs the commitment_signed dance by delivering the commitment_signed and handling the @@ -2733,9 +2738,10 @@ pub fn commitment_signed_dance_return_raa( .node .handle_commitment_signed_batch_test(node_b.node.get_our_node_id(), commitment_signed); check_added_monitors(&node_a, 1); - let (extra_msg_option, bs_revoke_and_ack) = + let (extra_msg_option, bs_revoke_and_ack, node_b_holding_cell_htlcs) = do_main_commitment_signed_dance(&node_a, &node_b, fail_backwards); assert!(extra_msg_option.is_none()); + assert!(node_b_holding_cell_htlcs.is_empty()); bs_revoke_and_ack } @@ -2971,7 +2977,7 @@ pub fn check_payment_claimable( #[cfg(any(test, ldk_bench, feature = "_test_utils"))] macro_rules! expect_payment_claimable { ($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr) => { - expect_payment_claimable!( + $crate::expect_payment_claimable!( $node, $expected_payment_hash, $expected_payment_secret, diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index c7e7175602d..d1e34cb7c71 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1958,14 +1958,8 @@ fn test_reload_node_with_preimage_in_monitor_claims_htlc() { ); // When the claim is reconstructed during reload, a PaymentForwarded event is generated. - // This event has next_user_channel_id as None since the outbound HTLC was already removed. // Fetching events triggers the pending monitor update (adding preimage) to be applied. - let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match &events[0] { - Event::PaymentForwarded { total_fee_earned_msat: Some(1000), .. } => {}, - _ => panic!("Expected PaymentForwarded event"), - } + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); check_added_monitors(&nodes[1], 1); // Reconnect nodes[1] to nodes[0]. The claim should be in nodes[1]'s holding cell. @@ -2088,3 +2082,148 @@ fn test_reload_node_without_preimage_fails_htlc() { // nodes[0] should now have received the failure and generate PaymentFailed. expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new()); } + +#[test] +fn test_reload_with_mpp_claims_on_same_channel() { + // Test that if a forwarding node has two HTLCs for the same MPP payment that were both + // irrevocably removed on the outbound edge via claim but are still forwarded-and-unresolved + // on the inbound edge, both HTLCs will be claimed backwards on restart. + // + // Topology: + // nodes[0] ----chan_0_1----> nodes[1] ----chan_1_2_a----> nodes[2] + // \----chan_1_2_b---/ + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let node_2_id = nodes[2].node.get_our_node_id(); + + let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 2_000_000, 0); + let chan_1_2_a = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0); + let chan_1_2_b = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0); + + let chan_id_0_1 = chan_0_1.2; + let chan_id_1_2_a = chan_1_2_a.2; + let chan_id_1_2_b = chan_1_2_b.2; + + // Send an MPP payment large enough that the router must split it across both outbound channels. + // Each 1M sat outbound channel has 100M msat max in-flight, so 150M msat requires splitting. + let amt_msat = 150_000_000; + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + + let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); + nodes[0].node.send_payment_with_route( + route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id, + ).unwrap(); + check_added_monitors(&nodes[0], 1); + + // Forward the first HTLC nodes[0] -> nodes[1] -> nodes[2]. Note that the second HTLC is released + // from the holding cell during the first HTLC's commitment_signed_dance. + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event_1 = SendEvent::from_event(events.remove(0)); + + nodes[1].node.handle_update_add_htlc(node_0_id, &payment_event_1.msgs[0]); + check_added_monitors(&nodes[1], 0); + nodes[1].node.handle_commitment_signed_batch_test(node_0_id, &payment_event_1.commitment_msg); + check_added_monitors(&nodes[1], 1); + let (_, raa, holding_cell_htlcs) = + do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); + assert_eq!(holding_cell_htlcs.len(), 1); + let payment_event_2 = holding_cell_htlcs.into_iter().next().unwrap(); + nodes[1].node.handle_revoke_and_ack(node_0_id, &raa); + check_added_monitors(&nodes[1], 1); + + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors(&nodes[1], 1); + let mut events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev_1_2 = events.remove(0); + pass_along_path( + &nodes[1], &[&nodes[2]], amt_msat, payment_hash, Some(payment_secret), ev_1_2, false, None, + ); + + // Second HTLC: full path nodes[0] -> nodes[1] -> nodes[2]. PaymentClaimable expected at end. + pass_along_path( + &nodes[0], &[&nodes[1], &nodes[2]], amt_msat, payment_hash, Some(payment_secret), + payment_event_2, true, None, + ); + + // Claim the HTLCs such that they're fully removed from the outbound edge, but disconnect + // node_0<>node_1 so that they can't be claimed backwards by node_1. + nodes[2].node.claim_funds(payment_preimage); + check_added_monitors(&nodes[2], 2); + expect_payment_claimed!(nodes[2], payment_hash, amt_msat); + + nodes[0].node.peer_disconnected(node_1_id); + nodes[1].node.peer_disconnected(node_0_id); + + let mut events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + for ev in events { + match ev { + MessageSendEvent::UpdateHTLCs { ref node_id, ref updates, .. } => { + assert_eq!(*node_id, node_1_id); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(node_2_id, updates.update_fulfill_htlcs[0].clone()); + check_added_monitors(&nodes[1], 1); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); + }, + _ => panic!("Unexpected event"), + } + } + + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + for event in events { + expect_payment_forwarded( + event, &nodes[1], &nodes[0], &nodes[2], Some(1000), None, false, false, false, + ); + } + + // Clear the holding cell's claim entries on chan_0_1 before serialization. + // This simulates a crash where both HTLCs were fully removed on the outbound edges but are + // still present on the inbound edge without a resolution. + nodes[1].node.test_clear_channel_holding_cell(node_0_id, chan_id_0_1); + + let node_1_serialized = nodes[1].node.encode(); + let mon_0_1_serialized = get_monitor!(nodes[1], chan_id_0_1).encode(); + let mon_1_2_a_serialized = get_monitor!(nodes[1], chan_id_1_2_a).encode(); + let mon_1_2_b_serialized = get_monitor!(nodes[1], chan_id_1_2_b).encode(); + + reload_node!( + nodes[1], + node_1_serialized, + &[&mon_0_1_serialized, &mon_1_2_a_serialized, &mon_1_2_b_serialized], + persister, + new_chain_monitor, + nodes_1_deserialized, + Some(true) + ); + + // When the claims are reconstructed during reload, PaymentForwarded events are regenerated. + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + for event in events { + expect_payment_forwarded( + event, &nodes[1], &nodes[0], &nodes[2], Some(1000), None, false, false, false, + ); + } + // Fetching events triggers the pending monitor updates (one for each HTLC preimage) to be applied. + check_added_monitors(&nodes[1], 2); + + // Reconnect nodes[1] to nodes[0]. Both claims should be in nodes[1]'s holding cell. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[0]); + reconnect_args.pending_cell_htlc_claims = (0, 2); + reconnect_nodes(reconnect_args); + + // nodes[0] should now have received both fulfills and generate PaymentSent. + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +}