From 05f428288f07a6c9793aa1e2f4c82d1f1794202c Mon Sep 17 00:00:00 2001 From: okekefrancis112 Date: Thu, 12 Feb 2026 23:44:09 +0100 Subject: [PATCH] feat: prefer outbound SCID alias over real SCID for spliced channels --- lightning/src/ln/channel_state.rs | 6 +- lightning/src/ln/onion_route_tests.rs | 270 +++++++++--- lightning/src/ln/payment_tests.rs | 122 +++--- lightning/src/ln/priv_short_conf_tests.rs | 475 +++++++++++++++++----- lightning/src/routing/router.rs | 2 +- 5 files changed, 664 insertions(+), 211 deletions(-) diff --git a/lightning/src/ln/channel_state.rs b/lightning/src/ln/channel_state.rs index d10327b259a..eaae536db5a 100644 --- a/lightning/src/ln/channel_state.rs +++ b/lightning/src/ln/channel_state.rs @@ -501,12 +501,12 @@ impl ChannelDetails { /// This should be used in [`Route`]s to describe the first hop or in other contexts where /// we're sending or forwarding a payment outbound over this channel. /// - /// This is either the [`ChannelDetails::short_channel_id`], if set, or the - /// [`ChannelDetails::outbound_scid_alias`]. See those for more information. + /// This is either the [`ChannelDetails::outbound_scid_alias`], if set, or the + /// [`ChannelDetails::short_channel_id`]. See those for more information. /// /// [`Route`]: crate::routing::router::Route pub fn get_outbound_payment_scid(&self) -> Option { - self.short_channel_id.or(self.outbound_scid_alias) + self.outbound_scid_alias.or(self.short_channel_id) } /// Gets the funding output for this channel, if available. diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index f9b4ab28e88..2eddf90d99e 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -16,8 +16,7 @@ use crate::events::{Event, HTLCHandlingFailureType, PathFailure, PaymentFailureR use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS; use crate::ln::channelmanager::{ FailureCode, HTLCForwardInfo, PaymentId, PendingAddHTLCInfo, PendingHTLCInfo, - PendingHTLCRouting, RecipientOnionFields, CLTV_FAR_FAR_AWAY, DISABLE_GOSSIP_TICKS, - MIN_CLTV_EXPIRY_DELTA, + PendingHTLCRouting, CLTV_FAR_FAR_AWAY, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA, }; use crate::ln::functional_test_utils::test_default_channel_config; use crate::ln::msgs; @@ -28,6 +27,7 @@ use crate::ln::msgs::{ use crate::ln::onion_utils::{ self, build_onion_payloads, construct_onion_keys, LocalHTLCFailureReason, }; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::wire::Encode; use crate::routing::gossip::{NetworkUpdate, RoutingFees}; use crate::routing::router::{ @@ -133,7 +133,7 @@ fn run_onion_failure_test_with_fail_intercept( .node .send_payment_with_route(route.clone(), *payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); // temper update_add (0 => 1) let mut update_add_0 = update_0.update_add_htlcs[0].clone(); @@ -170,7 +170,7 @@ fn run_onion_failure_test_with_fail_intercept( expect_htlc_forward!(&nodes[1]); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert_eq!(update_1.update_add_htlcs.len(), 1); // tamper update_add (1 => 2) let mut update_add_1 = update_1.update_add_htlcs[0].clone(); @@ -202,7 +202,7 @@ fn run_onion_failure_test_with_fail_intercept( }, _ => {}, } - check_added_monitors!(&nodes[2], 1); + check_added_monitors(&nodes[2], 1); let update_2_1 = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); assert!(update_2_1.update_fail_htlcs.len() == 1); @@ -405,7 +405,7 @@ fn test_fee_failures() { .node .send_payment_with_route(route.clone(), payment_hash_success, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route( &nodes[0], &[&[&nodes[1], &nodes[2]]], @@ -419,7 +419,14 @@ fn test_fee_failures() { // malleated the payment before forwarding, taking funds when they shouldn't have. However, // because we ignore channel update contents, we will still blame the 2nd channel. let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); - let short_channel_id = channels[1].0.contents.short_channel_id; + let short_channel_id = nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap(); run_onion_failure_test( "fee_insufficient", 100, @@ -456,7 +463,7 @@ fn test_fee_failures() { .node .send_payment_with_route(route, payment_hash_success, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route( &nodes[0], &[&[&nodes[1], &nodes[2]]], @@ -510,7 +517,14 @@ fn test_onion_failure() { }; // intermediate node failure - let short_channel_id = channels[1].0.contents.short_channel_id; + let short_channel_id = nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap(); run_onion_failure_test( "invalid_realm", 0, @@ -552,7 +566,14 @@ fn test_onion_failure() { ); // final node failure - let short_channel_id = channels[1].0.contents.short_channel_id; + let short_channel_id = nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap(); run_onion_failure_test( "invalid_realm", 3, @@ -633,6 +654,14 @@ fn test_onion_failure() { ); // final node failure + let hop_1_scid = nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap(); run_onion_failure_test_with_fail_intercept( "temporary_node_failure", 200, @@ -664,7 +693,7 @@ fn test_onion_failure() { node_id: route.paths[0].hops[1].pubkey, is_permanent: false, }), - Some(route.paths[0].hops[1].short_channel_id), + Some(hop_1_scid), None, ); let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); @@ -735,7 +764,16 @@ fn test_onion_failure() { node_id: route.paths[0].hops[1].pubkey, is_permanent: true, }), - Some(route.paths[0].hops[1].short_channel_id), + Some( + nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap(), + ), None, ); let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); @@ -808,14 +846,23 @@ fn test_onion_failure() { node_id: route.paths[0].hops[1].pubkey, is_permanent: true, }), - Some(route.paths[0].hops[1].short_channel_id), + Some( + nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap(), + ), None, ); let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); // Our immediate peer sent UpdateFailMalformedHTLC because it couldn't understand the onion in // the UpdateAddHTLC that we sent. - let short_channel_id = channels[0].0.contents.short_channel_id; + let short_channel_id = route.paths[0].hops[0].short_channel_id; run_onion_failure_test( "invalid_onion_version", 0, @@ -870,7 +917,14 @@ fn test_onion_failure() { Some(HTLCHandlingFailureType::InvalidOnion), ); - let short_channel_id = channels[1].0.contents.short_channel_id; + let short_channel_id = nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap(); let chan_update = ChannelUpdate::dummy(short_channel_id); let mut err_data = Vec::new(); @@ -941,7 +995,14 @@ fn test_onion_failure() { Some(next_hop_failure.clone()), ); - let short_channel_id = channels[1].0.contents.short_channel_id; + let short_channel_id = nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap(); run_onion_failure_test_with_fail_intercept( "permanent_channel_failure", 100, @@ -974,7 +1035,14 @@ fn test_onion_failure() { Some(next_hop_failure.clone()), ); - let short_channel_id = channels[1].0.contents.short_channel_id; + let short_channel_id = nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap(); run_onion_failure_test_with_fail_intercept( "required_channel_feature_missing", 100, @@ -1008,8 +1076,16 @@ fn test_onion_failure() { ); let mut bogus_route = route.clone(); - bogus_route.paths[0].hops[1].short_channel_id -= 1; - let short_channel_id = bogus_route.paths[0].hops[1].short_channel_id; + let short_channel_id = nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap() + - 1; + bogus_route.paths[0].hops[1].short_channel_id = short_channel_id; run_onion_failure_test( "unknown_next_peer", 100, @@ -1026,7 +1102,14 @@ fn test_onion_failure() { Some(HTLCHandlingFailureType::InvalidForward { requested_forward_scid: short_channel_id }), ); - let short_channel_id = channels[1].0.contents.short_channel_id; + let short_channel_id = nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap(); let amt_to_forward = { let (per_peer_state, mut peer_state); let chan = get_channel_ref!(nodes[1], nodes[2], per_peer_state, peer_state, channels[1].2); @@ -1064,7 +1147,14 @@ fn test_onion_failure() { // We ignore channel update contents in onion errors, so will blame the 2nd channel even though // the first node is the one that messed up. - let short_channel_id = channels[1].0.contents.short_channel_id; + let short_channel_id = nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap(); run_onion_failure_test( "fee_insufficient", 100, @@ -1083,7 +1173,14 @@ fn test_onion_failure() { Some(next_hop_failure.clone()), ); - let short_channel_id = channels[1].0.contents.short_channel_id; + let short_channel_id = nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap(); run_onion_failure_test( "incorrect_cltv_expiry", 100, @@ -1103,7 +1200,14 @@ fn test_onion_failure() { Some(next_hop_failure.clone()), ); - let short_channel_id = channels[1].0.contents.short_channel_id; + let short_channel_id = nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap(); run_onion_failure_test( "expiry_too_soon", 100, @@ -1190,7 +1294,16 @@ fn test_onion_failure() { true, Some(LocalHTLCFailureReason::FinalIncorrectCLTVExpiry), None, - Some(channels[1].0.contents.short_channel_id), + Some( + nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap(), + ), Some(HTLCHandlingFailureType::Receive { payment_hash }), ); @@ -1220,11 +1333,27 @@ fn test_onion_failure() { true, Some(LocalHTLCFailureReason::FinalIncorrectHTLCAmount), None, - Some(channels[1].0.contents.short_channel_id), + Some( + nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap(), + ), Some(HTLCHandlingFailureType::Receive { payment_hash }), ); - let short_channel_id = channels[1].0.contents.short_channel_id; + let short_channel_id = nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap(); run_onion_failure_test( "channel_disabled", 100, @@ -1386,7 +1515,16 @@ fn test_onion_failure() { node_id: route.paths[0].hops[1].pubkey, is_permanent: true, }), - Some(channels[1].0.contents.short_channel_id), + Some( + nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap(), + ), None, ); @@ -1455,10 +1593,26 @@ fn test_onion_failure() { true, Some(LocalHTLCFailureReason::TemporaryChannelFailure), Some(NetworkUpdate::ChannelFailure { - short_channel_id: channels[1].0.contents.short_channel_id, + short_channel_id: nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap(), is_permanent: false, }), - Some(channels[1].0.contents.short_channel_id), + Some( + nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap(), + ), Some(next_hop_failure.clone()), ); run_onion_failure_test_with_fail_intercept( @@ -1505,10 +1659,26 @@ fn test_onion_failure() { true, Some(LocalHTLCFailureReason::TemporaryChannelFailure), Some(NetworkUpdate::ChannelFailure { - short_channel_id: channels[1].0.contents.short_channel_id, + short_channel_id: nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap(), is_permanent: false, }), - Some(channels[1].0.contents.short_channel_id), + Some( + nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.counterparty.node_id == nodes[2].node.get_our_node_id()) + .unwrap() + .short_channel_id + .unwrap(), + ), None, ); run_onion_failure_test( @@ -1548,7 +1718,7 @@ fn test_overshoot_final_cltv() { .send_payment_with_route(route, payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add_0 = update_0.update_add_htlcs[0].clone(); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add_0); @@ -1567,7 +1737,7 @@ fn test_overshoot_final_cltv() { } expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); let mut update_add_1 = update_1.update_add_htlcs[0].clone(); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add_1); @@ -2285,7 +2455,7 @@ fn do_test_fail_htlc_backwards_with_reason(failure_code: FailureCode) { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); @@ -2300,7 +2470,7 @@ fn do_test_fail_htlc_backwards_with_reason(failure_code: FailureCode) { &nodes[1], &[HTLCHandlingFailureType::Receive { payment_hash }], ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2435,7 +2605,7 @@ fn test_phantom_onion_hmac_failure() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2470,7 +2640,7 @@ fn test_phantom_onion_hmac_failure() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2508,7 +2678,7 @@ fn test_phantom_invalid_onion_payload() { .node .send_payment_with_route(route.clone(), payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2571,7 +2741,7 @@ fn test_phantom_invalid_onion_payload() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2607,7 +2777,7 @@ fn test_phantom_final_incorrect_cltv_expiry() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2637,7 +2807,7 @@ fn test_phantom_final_incorrect_cltv_expiry() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2676,7 +2846,7 @@ fn test_phantom_failure_too_low_cltv() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2691,7 +2861,7 @@ fn test_phantom_failure_too_low_cltv() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2729,7 +2899,7 @@ fn test_phantom_failure_modified_cltv() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2784,7 +2954,7 @@ fn test_phantom_failure_expires_too_soon() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2834,7 +3004,7 @@ fn test_phantom_failure_too_low_recv_amt() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2851,7 +3021,7 @@ fn test_phantom_failure_too_low_recv_amt() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2904,7 +3074,7 @@ fn do_test_phantom_dust_exposure_failure(multiplier_dust_limit: bool) { .node .send_payment_with_route(route.clone(), payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2954,7 +3124,7 @@ fn test_phantom_failure_reject_payment() { .node .send_payment_with_route(route.clone(), payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2981,7 +3151,7 @@ fn test_phantom_failure_reject_payment() { nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 6c982738a52..06f1583da1d 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -245,12 +245,18 @@ fn mpp_retry_overpay() { get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, amt_msat, max_fee); // Check we overpay on the second path which we're about to fail. + // Find which path goes through each node (paths may be in any order). + let (path_to_b_idx, path_to_c_idx) = + if route.paths[0].hops[0].pubkey == node_b_id { (0, 1) } else { (1, 0) }; + assert_eq!(chan_1_update.contents.fee_proportional_millionths, 0); - let overpaid_amount_1 = route.paths[0].fee_msat() as u32 - chan_1_update.contents.fee_base_msat; + let overpaid_amount_1 = + route.paths[path_to_b_idx].fee_msat() as u32 - chan_1_update.contents.fee_base_msat; assert_eq!(overpaid_amount_1, 0); assert_eq!(chan_2_update.contents.fee_proportional_millionths, 0); - let overpaid_amount_2 = route.paths[1].fee_msat() as u32 - chan_2_update.contents.fee_base_msat; + let overpaid_amount_2 = + route.paths[path_to_c_idx].fee_msat() as u32 - chan_2_update.contents.fee_base_msat; let total_overpaid_amount = overpaid_amount_1 + overpaid_amount_2; @@ -299,11 +305,12 @@ fn mpp_retry_overpay() { send_payment(&nodes[3], &[&nodes[2]], 38_000_000); // Retry the second half of the payment and make sure it succeeds. - let first_path_value = route.paths[0].final_value_msat(); - assert_eq!(first_path_value, 36_000_000); + // The path to node B (path_to_b_idx) succeeded with 36M; remove it so we retry only the C path. + let succeeded_path_value = route.paths[path_to_b_idx].final_value_msat(); + assert_eq!(succeeded_path_value, 36_000_000); - route.paths.remove(0); - route_params.final_value_msat -= first_path_value; + route.paths.remove(path_to_b_idx); + route_params.final_value_msat -= succeeded_path_value; let chan_4_scid = chan_4_update.contents.short_channel_id; route_params.payment_params.previously_failed_channels.push(chan_4_scid); // Check the remaining max total routing fee for the second attempt accounts only for 1_000 msat @@ -1779,13 +1786,32 @@ fn preflight_probes_yield_event() { .unwrap(); let recv_value = 50_000_000; - let route_params = RouteParameters::from_payment_params_and_value(payment_params, recv_value); - let res = nodes[0].node.send_preflight_probes(route_params, None).unwrap(); + let route_params = + RouteParameters::from_payment_params_and_value(payment_params.clone(), recv_value); + + // Compute the route first to determine path ordering (probe results follow route path order). + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let usable_channels = nodes[0].node.list_usable_channels(); + let first_hops = usable_channels.iter().collect::>(); + let inflight_htlcs = nodes[0].node.compute_inflight_htlcs(); + let route = nodes[0] + .router + .router + .find_route(&node_0_id, &route_params, Some(&first_hops), inflight_htlcs) + .unwrap(); - let expected_route: &[(&[&Node], PaymentHash)] = - &[(&[&nodes[1], &nodes[3]], res[0].0), (&[&nodes[2], &nodes[3]], res[1].0)]; + // Determine which path index goes to node 1 vs node 2 + let (path_to_1_idx, path_to_2_idx) = + if route.paths[0].hops[0].pubkey == node_1_id { (0, 1) } else { (1, 0) }; - assert_eq!(res.len(), expected_route.len()); + let res = nodes[0].node.send_preflight_probes(route_params, None).unwrap(); + assert_eq!(res.len(), 2); + + let expected_route: &[(&[&Node], PaymentHash)] = &[ + (&[&nodes[1], &nodes[3]], res[path_to_1_idx].0), + (&[&nodes[2], &nodes[3]], res[path_to_2_idx].0), + ]; send_probe_along_route(&nodes[0], expected_route); @@ -2027,29 +2053,40 @@ fn test_trivial_inflight_htlc_tracking() { // Send and claim the payment. Inflight HTLCs should be empty. let (_, payment_hash, _, payment_id) = send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000); let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); - { - let mut per_peer_lock; - let mut peer_state_lock; - let channel_1 = - get_channel_ref!(&nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_1_id); + // Inflight HTLCs are tracked by the SCID used in routes: + // - First hop uses alias via get_outbound_payment_scid() + // - Subsequent hops use the real SCID from gossip/network graph + let chan_1_scid = nodes[0] + .node + .list_channels() + .iter() + .find(|c| c.channel_id == chan_1_id) + .unwrap() + .get_outbound_payment_scid() + .unwrap(); + let chan_2_scid = nodes[1] + .node + .list_channels() + .iter() + .find(|c| c.channel_id == chan_2_id) + .unwrap() + .short_channel_id + .unwrap(); + + { let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&node_a_id), &NodeId::from_pubkey(&node_b_id), - channel_1.funding().get_short_channel_id().unwrap(), + chan_1_scid, ); assert_eq!(chan_1_used_liquidity, None); } { - let mut per_peer_lock; - let mut peer_state_lock; - let channel_2 = - get_channel_ref!(&nodes[1], nodes[2], per_peer_lock, peer_state_lock, chan_2_id); - let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&node_b_id), &NodeId::from_pubkey(&node_c_id), - channel_2.funding().get_short_channel_id().unwrap(), + chan_2_scid, ); assert_eq!(chan_2_used_liquidity, None); @@ -2069,29 +2106,19 @@ fn test_trivial_inflight_htlc_tracking() { route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000); let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); { - let mut per_peer_lock; - let mut peer_state_lock; - let channel_1 = - get_channel_ref!(&nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_1_id); - let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&node_a_id), &NodeId::from_pubkey(&node_b_id), - channel_1.funding().get_short_channel_id().unwrap(), + chan_1_scid, ); // First hop accounts for expected 1000 msat fee assert_eq!(chan_1_used_liquidity, Some(501000)); } { - let mut per_peer_lock; - let mut peer_state_lock; - let channel_2 = - get_channel_ref!(&nodes[1], nodes[2], per_peer_lock, peer_state_lock, chan_2_id); - let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&node_b_id), &NodeId::from_pubkey(&node_c_id), - channel_2.funding().get_short_channel_id().unwrap(), + chan_2_scid, ); assert_eq!(chan_2_used_liquidity, Some(500000)); @@ -2111,28 +2138,18 @@ fn test_trivial_inflight_htlc_tracking() { let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); { - let mut per_peer_lock; - let mut peer_state_lock; - let channel_1 = - get_channel_ref!(&nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_1_id); - let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&node_a_id), &NodeId::from_pubkey(&node_b_id), - channel_1.funding().get_short_channel_id().unwrap(), + chan_1_scid, ); assert_eq!(chan_1_used_liquidity, None); } { - let mut per_peer_lock; - let mut peer_state_lock; - let channel_2 = - get_channel_ref!(&nodes[1], nodes[2], per_peer_lock, peer_state_lock, chan_2_id); - let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&node_b_id), &NodeId::from_pubkey(&node_c_id), - channel_2.funding().get_short_channel_id().unwrap(), + chan_2_scid, ); assert_eq!(chan_2_used_liquidity, None); } @@ -2174,15 +2191,16 @@ fn test_holding_cell_inflight_htlcs() { let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); { - let mut per_peer_lock; - let mut peer_state_lock; - let channel = - get_channel_ref!(&nodes[0], nodes[1], per_peer_lock, peer_state_lock, channel_id); + // Inflight HTLCs are tracked by the SCID used in routes, which is now the alias. + // Use the route's SCID (from get_outbound_payment_scid) to query inflight liquidity. + let chan_details = + nodes[0].node.list_channels().into_iter().find(|c| c.channel_id == channel_id).unwrap(); + let route_scid = chan_details.get_outbound_payment_scid().unwrap(); let used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&node_a_id), &NodeId::from_pubkey(&node_b_id), - channel.funding().get_short_channel_id().unwrap(), + route_scid, ); assert_eq!(used_liquidity, Some(2000000)); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index ea34e88f619..5b5f6051399 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -12,13 +12,15 @@ //! LSP). use crate::chain::ChannelMonitorUpdateStatus; -use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; -use crate::ln::channelmanager::{PaymentId, RecipientOnionFields, MIN_CLTV_EXPIRY_DELTA}; +use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaymentFailureReason}; +use crate::ln::channel::CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY; +use crate::ln::channelmanager::{PaymentId, MIN_CLTV_EXPIRY_DELTA}; use crate::ln::msgs; use crate::ln::msgs::{ BaseMessageHandler, ChannelMessageHandler, ErrorAction, MessageSendEvent, RoutingMessageHandler, }; use crate::ln::onion_utils::LocalHTLCFailureReason; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::types::ChannelId; use crate::routing::gossip::RoutingFees; use crate::routing::router::{PaymentParameters, RouteHint, RouteHintHop}; @@ -82,7 +84,7 @@ fn test_priv_forwarding_rejection() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route.clone(), our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); @@ -165,7 +167,7 @@ fn test_priv_forwarding_rejection() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route( &nodes[0], &[&[&nodes[1], &nodes[2]]], @@ -349,7 +351,7 @@ fn test_routed_scid_alias() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 100_000, payment_hash, payment_secret); @@ -446,7 +448,7 @@ fn test_scid_privacy_negotiation() { .as_ref() .unwrap() .supports_scid_privacy()); - nodes[1].node.handle_open_channel(node_a_id, &second_open_channel); + handle_and_accept_open_channel(&nodes[1], node_a_id, &second_open_channel); nodes[0].node.handle_accept_channel( node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), @@ -499,7 +501,7 @@ fn test_inbound_scid_privacy() { assert!(open_channel.common_fields.channel_type.as_ref().unwrap().requires_scid_privacy()); - nodes[2].node.handle_open_channel(node_b_id, &open_channel); + handle_and_accept_open_channel(&nodes[2], node_b_id, &open_channel); let accept_channel = get_event_msg!(nodes[2], MessageSendEvent::SendAcceptChannel, node_b_id); nodes[1].node.handle_accept_channel(node_c_id, &accept_channel); @@ -513,7 +515,7 @@ fn test_inbound_scid_privacy() { node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendFundingCreated, node_c_id), ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let cs_funding_signed = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, node_b_id); @@ -521,7 +523,7 @@ fn test_inbound_scid_privacy() { nodes[1].node.handle_funding_signed(node_c_id, &cs_funding_signed); expect_channel_pending_event(&nodes[1], &node_c_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let conf_height = core::cmp::max(nodes[1].best_block_info().1 + 1, nodes[2].best_block_info().1 + 1); @@ -579,7 +581,7 @@ fn test_inbound_scid_privacy() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 100_000, payment_hash, payment_secret); claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); @@ -600,7 +602,7 @@ fn test_inbound_scid_privacy() { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route_2, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let payment_event = SendEvent::from_node(&nodes[0]); assert_eq!(node_b_id, payment_event.node_id); @@ -697,7 +699,7 @@ fn test_scid_alias_returned() { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &as_updates.commitment_signed, false, true); @@ -709,7 +711,7 @@ fn test_scid_alias_returned() { channel_id: chan.0.channel_id, }]; expect_htlc_failure_conditions(events, &expected_failures); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_updates.update_fail_htlcs[0]); @@ -734,7 +736,7 @@ fn test_scid_alias_returned() { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &as_updates.commitment_signed, false, true); @@ -779,7 +781,6 @@ fn test_simple_0conf_channel() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -798,7 +799,6 @@ fn test_0conf_channel_with_async_monitor() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(chan_config.clone()), None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); @@ -844,7 +844,7 @@ fn test_0conf_channel_with_async_monitor() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_funding_created(node_a_id, &funding_created); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); let channel_id = ChannelId::v1_from_funding_outpoint(funding_output); @@ -859,7 +859,7 @@ fn test_0conf_channel_with_async_monitor() { MessageSendEvent::SendFundingSigned { node_id, msg } => { assert_eq!(*node_id, node_a_id); nodes[0].node.handle_funding_signed(node_b_id, &msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -937,26 +937,26 @@ fn test_0conf_channel_with_async_monitor() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_send = SendEvent::from_node(&nodes[0]); nodes[1].node.handle_update_add_htlc(node_a_id, &as_send.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_send.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_raa, bs_commitment_signed) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_revoke_and_ack( node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id), ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); @@ -970,10 +970,10 @@ fn test_0conf_channel_with_async_monitor() { .chain_monitor .channel_monitor_updated(bs_raa.channel_id, latest_update) .unwrap(); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_send = SendEvent::from_node(&nodes[1]); nodes[2].node.handle_update_add_htlc(node_b_id, &bs_send.msgs[0]); @@ -995,7 +995,6 @@ fn test_0conf_close_no_early_chan_update() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1010,7 +1009,7 @@ fn test_0conf_close_no_early_chan_update() { send_payment(&nodes[0], &[&nodes[1]], 100_000); nodes[0].node.force_close_all_channels_broadcasting_latest_txn(message.clone()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); let _ = get_err_msg(&nodes[0], &node_b_id); @@ -1022,7 +1021,6 @@ fn test_public_0conf_channel() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1078,66 +1076,242 @@ fn test_public_0conf_channel() { #[test] fn test_0conf_channel_reorg() { // If we accept a 0conf channel, which is then confirmed, but then changes SCID in a reorg, we - // have to make sure we handle this correctly (or, currently, just force-close the channel). + // have to ensure we still accept relays to the previous SCID, at least for some time, as well + // as send a fresh channel announcement. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config.clone())]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let node_a_id = nodes[0].node.get_our_node_id(); + let node_chanmgrs = + create_node_chanmgrs(3, &node_cfgs, &[None, None, Some(chan_config.clone())]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + create_chan_between_nodes(&nodes[0], &nodes[1]); + + // Make sure all nodes are at the same starting height + connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); // This is the default but we force it on anyway chan_config.channel_handshake_config.announce_for_forwarding = true; - let (tx, ..) = open_zero_conf_channel(&nodes[0], &nodes[1], Some(chan_config)); + let (tx, ..) = open_zero_conf_channel(&nodes[1], &nodes[2], Some(chan_config)); // We can use the channel immediately, but we can't announce it until we get 6+ confirmations - send_payment(&nodes[0], &[&nodes[1]], 100_000); + send_payment(&nodes[1], &[&nodes[2]], 100_000); - mine_transaction(&nodes[0], &tx); mine_transaction(&nodes[1], &tx); + mine_transaction(&nodes[2], &tx); // Send a payment using the channel's real SCID, which will be public in a few blocks once we // can generate a channel_announcement. - let real_scid = nodes[0].node.list_usable_channels()[0].short_channel_id.unwrap(); - assert_eq!(nodes[1].node.list_usable_channels()[0].short_channel_id.unwrap(), real_scid); + let bs_chans = nodes[1].node.list_usable_channels(); + let bs_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + let original_scid = bs_chan.short_channel_id.unwrap(); + let outbound_scid = bs_chan.get_outbound_payment_scid().unwrap(); + assert_eq!(nodes[2].node.list_usable_channels()[0].short_channel_id.unwrap(), original_scid); let (mut route, payment_hash, payment_preimage, payment_secret) = - get_route_and_payment_hash!(nodes[0], nodes[1], 10_000); - assert_eq!(route.paths[0].hops[0].short_channel_id, real_scid); + get_route_and_payment_hash!(nodes[1], nodes[2], 10_000); + assert_eq!(route.paths[0].hops[0].short_channel_id, outbound_scid); + send_along_route_with_secret( + &nodes[1], + route.clone(), + &[&[&nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[1], &[&nodes[2]], payment_preimage); + + // Check that we can forward a payment over the channel's SCID as well (i.e. as if node C + // generated an invoice with a route hint through the 0-conf channel). + let mut forwarded_route = route.clone(); + let (ab_route, ..) = get_route_and_payment_hash!(nodes[0], nodes[1], 10_000); + forwarded_route.paths[0].hops.insert(0, ab_route.paths[0].hops[0].clone()); + forwarded_route.paths[0].hops[0].fee_msat = 1000; + forwarded_route.paths[0].hops[0].cltv_expiry_delta = MIN_CLTV_EXPIRY_DELTA.into(); send_along_route_with_secret( &nodes[0], - route, - &[&[&nodes[1]]], + forwarded_route.clone(), + &[&[&nodes[1], &nodes[2]]], 10_000, payment_hash, payment_secret, ); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); - disconnect_blocks(&nodes[0], 1); + // Now disconnect blocks, checking that the SCID was wiped but that it still works both for a + // forwarded HTLC and a directly-sent one. disconnect_blocks(&nodes[1], 1); + disconnect_blocks(&nodes[2], 1); - // At this point the channel no longer has an SCID again. In the future we should likely - // support simply un-setting the SCID and waiting until the channel gets re-confirmed, but for - // now we force-close the channel here. - let reason = ClosureReason::ProcessingError { - err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs." - .to_owned(), - }; - check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); - check_closed_broadcast!(nodes[0], true); + let bs_chans = nodes[1].node.list_usable_channels(); + let bs_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + assert!(bs_chan.short_channel_id.is_none()); + assert!(nodes[2].node.list_usable_channels()[0].short_channel_id.is_none()); + + send_along_route_with_secret( + &nodes[1], + route.clone(), + &[&[&nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[1], &[&nodes[2]], payment_preimage); + + send_along_route_with_secret( + &nodes[0], + forwarded_route.clone(), + &[&[&nodes[1], &nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + + // Finally, connect an extra block then re-mine the funding tx, giving the channel a new SCID. + connect_blocks(&nodes[1], 1); + connect_blocks(&nodes[2], 1); + + mine_transaction(&nodes[1], &tx); + mine_transaction(&nodes[2], &tx); + + let bs_chans = nodes[1].node.list_usable_channels(); + let bs_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + let new_scid = bs_chan.short_channel_id.unwrap(); + assert_ne!(original_scid, new_scid); + assert_eq!(nodes[2].node.list_usable_channels()[0].short_channel_id.unwrap(), new_scid); + + // At this point, the channel should happily forward or send payments with either the old SCID + // or the new SCID... + send_along_route_with_secret( + &nodes[1], + route.clone(), + &[&[&nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[1], &[&nodes[2]], payment_preimage); + + send_along_route_with_secret( + &nodes[0], + forwarded_route.clone(), + &[&[&nodes[1], &nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + + let mut new_scid_route = route.clone(); + new_scid_route.paths[0].hops[0].short_channel_id = new_scid; + send_along_route_with_secret( + &nodes[1], + new_scid_route.clone(), + &[&[&nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[1], &[&nodes[2]], payment_preimage); + + let mut new_scid_forwarded_route = forwarded_route.clone(); + new_scid_forwarded_route.paths[0].hops[1].short_channel_id = new_scid; + send_along_route_with_secret( + &nodes[0], + new_scid_forwarded_route.clone(), + &[&[&nodes[1], &nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + + // However after CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY blocks, the old SCID should be removed + // and will no longer work for sent or forwarded payments (but the new one still will). + connect_blocks(&nodes[1], 5); + let bs_announcement_sigs = + get_event_msg!(nodes[1], MessageSendEvent::SendAnnouncementSignatures, node_c_id); + + connect_blocks(&nodes[2], 5); + let cs_announcement_sigs = + get_event_msg!(nodes[2], MessageSendEvent::SendAnnouncementSignatures, node_b_id); + + nodes[2].node.handle_announcement_signatures(node_b_id, &bs_announcement_sigs); + let cs_broadcast = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(cs_broadcast.len(), 1); + if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = cs_broadcast[0] { + } else { + panic!("Expected broadcast"); + } + + nodes[1].node.handle_announcement_signatures(node_c_id, &cs_announcement_sigs); + let bs_broadcast = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(bs_broadcast.len(), 1); + if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = bs_broadcast[0] { + } else { + panic!("Expected broadcast"); + } + + connect_blocks(&nodes[0], CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY); + connect_blocks(&nodes[1], CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY - 5); + connect_blocks(&nodes[2], CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY - 5); + + send_along_route_with_secret( + &nodes[1], + new_scid_route, + &[&[&nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[1], &[&nodes[2]], payment_preimage); + + send_along_route_with_secret( + &nodes[0], + new_scid_forwarded_route, + &[&[&nodes[1], &nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId([0; 32]); + // Use the original (real) SCID in the route to test that it no longer works after the + // channel announcement propagation delay. + let mut old_scid_route = route.clone(); + old_scid_route.paths[0].hops[0].short_channel_id = original_scid; + nodes[1].node.send_payment_with_route(old_scid_route, payment_hash, onion.clone(), id).unwrap(); + let mut conditions = PaymentFailedConditions::new(); + conditions.reason = Some(PaymentFailureReason::RouteNotFound); + expect_payment_failed_conditions(&nodes[1], payment_hash, false, conditions); + + let mut old_scid_forwarded_route = forwarded_route.clone(); + old_scid_forwarded_route.paths[0].hops[1].short_channel_id = original_scid; + nodes[0] + .node + .send_payment_with_route(old_scid_forwarded_route, payment_hash, onion, id) + .unwrap(); check_added_monitors(&nodes[0], 1); - let reason = ClosureReason::ProcessingError { - err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs." - .to_owned(), - }; - check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); - check_closed_broadcast!(nodes[1], true); - check_added_monitors(&nodes[1], 1); + let mut ev = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(ev.len(), 1); + let ev = ev.pop().unwrap(); + let path = &[&nodes[1]]; + let failure = HTLCHandlingFailureType::InvalidForward { requested_forward_scid: original_scid }; + let args = + PassAlongPathArgs::new(&nodes[0], path, 10_000, payment_hash, ev).expect_failure(failure); + do_pass_along_path(args); + fail_payment_along_path(&[&nodes[0], &nodes[1]]); + expect_payment_failed!(nodes[0], payment_hash, false); } #[test] @@ -1145,7 +1319,7 @@ fn test_zero_conf_accept_reject() { let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key(); channel_type_features.set_zero_conf_required(); - // 1. Check we reject zero conf channels by default + // Check we can accept zero conf channels via the right method let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -1153,6 +1327,7 @@ fn test_zero_conf_accept_reject() { let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); + // 1. First try the non-0conf method to manually accept nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); @@ -1161,41 +1336,6 @@ fn test_zero_conf_accept_reject() { nodes[1].node.handle_open_channel(node_a_id, &open_channel_msg); - let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); - match msg_events[0] { - MessageSendEvent::HandleError { - action: ErrorAction::SendErrorMessage { ref msg, .. }, - .. - } => { - assert_eq!(msg.data, "No zero confirmation channels accepted".to_owned()); - }, - _ => panic!(), - } - - // 2. Check we can manually accept zero conf channels via the right method - let mut manually_accept_conf = UserConfig::default(); - manually_accept_conf.manually_accept_inbound_channels = true; - - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = - create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - // 2.1 First try the non-0conf method to manually accept - nodes[0] - .node - .create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf.clone())) - .unwrap(); - let mut open_channel_msg = - get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - - open_channel_msg.common_fields.channel_type = Some(channel_type_features.clone()); - - nodes[1].node.handle_open_channel(node_a_id, &open_channel_msg); - // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in the `msg_events`. assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1223,11 +1363,8 @@ fn test_zero_conf_accept_reject() { _ => panic!(), } - // 2.2 Try again with the 0conf method to manually accept - nodes[0] - .node - .create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf)) - .unwrap(); + // 2. Try again with the 0conf method to manually accept + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); @@ -1270,7 +1407,6 @@ fn test_connect_before_funding() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let mut manually_accept_conf = test_default_channel_config(); - manually_accept_conf.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1323,7 +1459,6 @@ fn test_0conf_ann_sigs_racing_conf() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1352,3 +1487,133 @@ fn test_0conf_ann_sigs_racing_conf() { let as_announcement = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(as_announcement.len(), 1); } + +#[test] +fn test_channel_update_dont_forward_flag() { + // Test that the `dont_forward` bit (bit 1 of message_flags) is set correctly: + // - For private channels: message_flags should have bit 1 set (value 3 = must_be_one + dont_forward) + // - For public channels: message_flags should NOT have bit 1 set (value 1 = must_be_one only) + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + // Create a public (announced) channel between nodes[0] and nodes[1] + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); + + // Create a private (unannounced) channel between nodes[1] and nodes[2] + create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 500_000_000); + + // Get the channel details for both channels + let public_channel = nodes[0] + .node + .list_channels() + .into_iter() + .find(|c| c.counterparty.node_id == node_b_id) + .unwrap(); + let private_channel = nodes[1] + .node + .list_channels() + .into_iter() + .find(|c| c.counterparty.node_id == node_c_id) + .unwrap(); + + // Verify is_announced correctly reflects the channel type + assert!(public_channel.is_announced, "Public channel should have is_announced = true"); + assert!(!private_channel.is_announced, "Private channel should have is_announced = false"); + + // Trigger channel_update by changing config on the public channel + let mut new_config = public_channel.config.unwrap(); + new_config.forwarding_fee_base_msat += 10; + nodes[0] + .node + .update_channel_config(&node_b_id, &[public_channel.channel_id], &new_config) + .unwrap(); + + // Get the channel_update for the public channel and verify dont_forward is NOT set + let events = nodes[0].node.get_and_clear_pending_msg_events(); + let public_channel_update = events + .iter() + .find_map(|e| { + if let MessageSendEvent::BroadcastChannelUpdate { ref msg, .. } = e { + Some(msg.clone()) + } else { + None + } + }) + .expect("Expected BroadcastChannelUpdate for public channel"); + // message_flags should be 1 (only must_be_one bit set, dont_forward NOT set) + assert_eq!( + public_channel_update.contents.message_flags & (1 << 1), + 0, + "Public channel update should NOT have dont_forward bit set" + ); + assert_eq!( + public_channel_update.contents.message_flags & 1, + 1, + "Public channel update should have must_be_one bit set" + ); + + // Trigger channel_update by changing config on the private channel + let mut new_config = private_channel.config.unwrap(); + new_config.forwarding_fee_base_msat += 10; + nodes[1] + .node + .update_channel_config(&node_c_id, &[private_channel.channel_id], &new_config) + .unwrap(); + + // Get the channel_update for the private channel and verify dont_forward IS set + let private_channel_update = + get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_c_id); + // message_flags should have dont_forward bit set + assert_ne!( + private_channel_update.contents.message_flags & (1 << 1), + 0, + "Private channel update should have dont_forward bit set" + ); + assert_eq!( + private_channel_update.contents.message_flags & 1, + 1, + "Private channel update should have must_be_one bit set" + ); +} + +#[test] +fn test_unknown_channel_update_with_dont_forward_logs_debug() { + use bitcoin::constants::ChainHash; + use bitcoin::secp256k1::ecdsa::Signature; + use bitcoin::secp256k1::ffi::Signature as FFISignature; + use bitcoin::Network; + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let unknown_scid = 42; + let msg = msgs::ChannelUpdate { + signature: Signature::from(unsafe { FFISignature::new() }), + contents: msgs::UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: unknown_scid, + timestamp: 0, + message_flags: 1 | (1 << 1), // must_be_one + dont_forward + channel_flags: 0, + cltv_expiry_delta: 0, + htlc_minimum_msat: 0, + htlc_maximum_msat: msgs::MAX_VALUE_MSAT, + fee_base_msat: 0, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }, + }; + + nodes[0].node.handle_channel_update(nodes[1].node.get_our_node_id(), &msg); + nodes[0].logger.assert_log_contains( + "lightning::ln::channelmanager", + "Received channel_update for unknown channel", + 1, + ); +} diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index c06e5174263..98249f7fabd 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -9027,7 +9027,7 @@ mod tests { assert_eq!(route.paths.len(), 1); assert_eq!(route.get_total_amount(), amt_msat); assert_eq!(route.paths[0].hops.len(), 2); - assert_eq!(route.paths[0].hops[0].short_channel_id, 1); + assert_eq!(route.paths[0].hops[0].short_channel_id, 44); assert_eq!(route.paths[0].hops[1].short_channel_id, 45); assert_eq!(route.get_total_fees(), 123); }