diff --git a/lightning-tests/Cargo.toml b/lightning-tests/Cargo.toml index 4e8d330089d..7830792b121 100644 --- a/lightning-tests/Cargo.toml +++ b/lightning-tests/Cargo.toml @@ -15,6 +15,7 @@ lightning-types = { path = "../lightning-types", features = ["_test_utils"] } lightning-invoice = { path = "../lightning-invoice", default-features = false } lightning-macros = { path = "../lightning-macros" } lightning = { path = "../lightning", features = ["_test_utils"] } +lightning_0_3 = { package = "lightning", git = "https://github.com/valentinewallace/rust-lightning", branch = "2026-02-dedup-htlc-fwd-data", features = ["_test_utils"] } lightning_0_2 = { package = "lightning", version = "0.2.0", features = ["_test_utils"] } lightning_0_1 = { package = "lightning", version = "0.1.7", features = ["_test_utils"] } lightning_0_0_125 = { package = "lightning", version = "0.0.125", features = ["_test_utils"] } diff --git a/lightning-tests/src/lib.rs b/lightning-tests/src/lib.rs index c028193d692..4249c957dde 100644 --- a/lightning-tests/src/lib.rs +++ b/lightning-tests/src/lib.rs @@ -1,4 +1,3 @@ -#[cfg_attr(test, macro_use)] extern crate lightning; #[cfg(all(test, not(taproot)))] diff --git a/lightning-tests/src/upgrade_downgrade_tests.rs b/lightning-tests/src/upgrade_downgrade_tests.rs index 14b0a5c5822..2c9720f47e1 100644 --- a/lightning-tests/src/upgrade_downgrade_tests.rs +++ b/lightning-tests/src/upgrade_downgrade_tests.rs @@ -45,19 +45,46 @@ use lightning_0_0_125::ln::msgs::ChannelMessageHandler as _; use lightning_0_0_125::routing::router as router_0_0_125; use lightning_0_0_125::util::ser::Writeable as _; +use lightning_0_3::events::bump_transaction::sync::WalletSourceSync as _; +use lightning_0_3::events::{ + Event as Event_0_3, HTLCHandlingFailureType as HTLCHandlingFailureType_0_3, +}; +use lightning_0_3::expect_payment_claimable as expect_payment_claimable_0_3; +use lightning_0_3::get_event_msg as get_event_msg_0_3; +use lightning_0_3::get_monitor as get_monitor_0_3; +use lightning_0_3::ln::channelmanager::PaymentId as PaymentId_0_3; +use lightning_0_3::ln::functional_test_utils as lightning_0_3_utils; +use lightning_0_3::ln::functional_test_utils::{ + PaymentFailedConditions as PaymentFailedConditions_0_3, ReconnectArgs as ReconnectArgs_0_3, + SendEvent as SendEvent_0_3, +}; +use lightning_0_3::ln::funding::SpliceContribution as SpliceContribution_0_3; +use lightning_0_3::ln::msgs::BaseMessageHandler as _; +use lightning_0_3::ln::msgs::ChannelMessageHandler as _; +use lightning_0_3::ln::msgs::MessageSendEvent as MessageSendEvent_0_3; +use lightning_0_3::ln::outbound_payment::RecipientOnionFields as RecipientOnionFields_0_3; +use lightning_0_3::ln::splicing_tests::lock_splice as lock_splice_0_3; +use lightning_0_3::ln::splicing_tests::splice_channel as splice_channel_0_3; +use lightning_0_3::ln::types::ChannelId as ChannelId_0_3; +use lightning_0_3::reload_node as reload_node_0_3; +use lightning_0_3::routing::router as router_0_3; +use lightning_0_3::types::payment::{ + PaymentHash as PaymentHash_0_3, PaymentPreimage as PaymentPreimage_0_3, + PaymentSecret as PaymentSecret_0_3, +}; +use lightning_0_3::util::ser::Writeable as _; + use lightning::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER}; -use lightning::events::bump_transaction::sync::WalletSourceSync; -use lightning::events::{ClosureReason, Event, HTLCHandlingFailureType}; -use lightning::ln::functional_test_utils::*; -use lightning::ln::funding::SpliceContribution; -use lightning::ln::msgs::BaseMessageHandler as _; +use lightning::check_spends; +use lightning::events::{ClosureReason, Event}; +use lightning::expect_payment_claimable; +use lightning::ln::functional_test_utils as lightning_local_utils; +use lightning::ln::functional_test_utils::{ReconnectArgs, SendEvent}; use lightning::ln::msgs::ChannelMessageHandler as _; -use lightning::ln::msgs::MessageSendEvent; -use lightning::ln::splicing_tests::*; -use lightning::ln::types::ChannelId; +use lightning::reload_node; use lightning::sign::OutputSpender; - -use lightning_types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; +use lightning::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; +use lightning::util::ser::Writeable as _; use bitcoin::script::Builder; use bitcoin::secp256k1::Secp256k1; @@ -68,7 +95,7 @@ use std::sync::Arc; #[test] fn simple_upgrade() { // Tests a simple case of upgrading from LDK 0.1 with a pending payment - let (node_a_ser, node_b_ser, mon_a_ser, mon_b_ser, preimage); + let (node_a_ser, node_b_ser, mon_a_ser, mon_b_ser, preimage_bytes); { let chanmon_cfgs = lightning_0_1_utils::create_chanmon_cfgs(2); let node_cfgs = lightning_0_1_utils::create_node_cfgs(2, &chanmon_cfgs); @@ -79,7 +106,7 @@ fn simple_upgrade() { let payment_preimage = lightning_0_1_utils::route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - preimage = PaymentPreimage(payment_preimage.0 .0); + preimage_bytes = payment_preimage.0 .0; node_a_ser = nodes[0].node.encode(); node_b_ser = nodes[1].node.encode(); @@ -89,26 +116,43 @@ fn simple_upgrade() { // Create a dummy node to reload over with the 0.1 state - let mut chanmon_cfgs = create_chanmon_cfgs(2); + let mut chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(2); // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_cfgs = lightning_0_3_utils::create_node_cfgs(2, &chanmon_cfgs); let (persister_a, persister_b, chain_mon_a, chain_mon_b); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let node_chanmgrs = lightning_0_3_utils::create_node_chanmgrs(2, &node_cfgs, &[None, None]); let (node_a, node_b); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let mut nodes = lightning_0_3_utils::create_network(2, &node_cfgs, &node_chanmgrs); - let config = test_default_channel_config(); + let config = lightning_0_3_utils::test_default_channel_config(); let a_mons = &[&mon_a_ser[..]]; - reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); - reload_node!(nodes[1], config, &node_b_ser, &[&mon_b_ser], persister_b, chain_mon_b, node_b); - - reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - - claim_payment(&nodes[0], &[&nodes[1]], preimage); + reload_node_0_3!( + nodes[0], + config.clone(), + &node_a_ser, + a_mons, + persister_a, + chain_mon_a, + node_a + ); + reload_node_0_3!( + nodes[1], + config, + &node_b_ser, + &[&mon_b_ser], + persister_b, + chain_mon_b, + node_b + ); + + lightning_0_3_utils::reconnect_nodes(ReconnectArgs_0_3::new(&nodes[0], &nodes[1])); + + let preimage = PaymentPreimage_0_3(preimage_bytes); + lightning_0_3_utils::claim_payment(&nodes[0], &[&nodes[1]], preimage); } #[test] @@ -228,7 +272,7 @@ fn test_125_dangling_post_update_actions() { // Create a dummy node to reload over with the 0.0.125 state - let mut chanmon_cfgs = create_chanmon_cfgs(4); + let mut chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(4); // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; @@ -236,14 +280,15 @@ fn test_125_dangling_post_update_actions() { chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; chanmon_cfgs[3].keys_manager.disable_all_state_policy_checks = true; - let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let node_cfgs = lightning_local_utils::create_node_cfgs(4, &chanmon_cfgs); let (persister, chain_mon); - let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let node_chanmgrs = + lightning_local_utils::create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let node; - let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let mut nodes = lightning_local_utils::create_network(4, &node_cfgs, &node_chanmgrs); // Finally, reload the node in the latest LDK. This previously failed. - let config = test_default_channel_config(); + let config = lightning_local_utils::test_default_channel_config(); reload_node!(nodes[3], config, &node_d_ser, &[&mon_ser], persister, chain_mon, node); } @@ -283,14 +328,14 @@ fn test_0_1_legacy_remote_key_derivation() { } // Create a dummy node to reload over with the 0.1 state - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(2); + let node_cfgs = lightning_local_utils::create_node_cfgs(2, &chanmon_cfgs); let (persister_a, persister_b, chain_mon_a, chain_mon_b); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let node_chanmgrs = lightning_local_utils::create_node_chanmgrs(2, &node_cfgs, &[None, None]); let (node_a, node_b); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let mut nodes = lightning_local_utils::create_network(2, &node_cfgs, &node_chanmgrs); - let config = test_default_channel_config(); + let config = lightning_local_utils::test_default_channel_config(); let a_mons = &[&mon_a_ser[..]]; reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); reload_node!(nodes[1], config, &node_b_ser, &[&mon_b_ser], persister_b, chain_mon_b, node_b); @@ -299,13 +344,13 @@ fn test_0_1_legacy_remote_key_derivation() { let node_b_id = nodes[1].node.get_our_node_id(); - mine_transaction(&nodes[0], &commitment_tx[0]); + lightning_local_utils::mine_transaction(&nodes[0], &commitment_tx[0]); let reason = ClosureReason::CommitmentTxConfirmed; - check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100_000); - check_added_monitors(&nodes[0], 1); - check_closed_broadcast(&nodes[0], 1, false); + lightning_local_utils::check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100_000); + lightning_local_utils::check_added_monitors(&nodes[0], 1); + lightning_local_utils::check_closed_broadcast(&nodes[0], 1, false); - connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); + lightning_local_utils::connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); let mut spendable_event = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); assert_eq!(spendable_event.len(), 1); if let Event::SpendableOutputs { outputs, channel_id: ev_id, counterparty_node_id: _ } = @@ -422,7 +467,7 @@ fn do_test_0_1_htlc_forward_after_splice(fail_htlc: bool) { } // Create a dummy node to reload over with the 0.1 state - let mut chanmon_cfgs = create_chanmon_cfgs(3); + let mut chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(3); // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; @@ -433,73 +478,105 @@ fn do_test_0_1_htlc_forward_after_splice(fail_htlc: bool) { chanmon_cfgs[1].tx_broadcaster.blocks = node_b_blocks; chanmon_cfgs[2].tx_broadcaster.blocks = node_c_blocks; - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_cfgs = lightning_0_3_utils::create_node_cfgs(3, &chanmon_cfgs); let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let node_chanmgrs = + lightning_0_3_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let (node_a, node_b, node_c); - let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let mut nodes = lightning_0_3_utils::create_network(3, &node_cfgs, &node_chanmgrs); - let config = test_default_channel_config(); + let config = lightning_0_3_utils::test_default_channel_config(); let a_mons = &[&mon_a_1_ser[..]]; - reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); + reload_node_0_3!( + nodes[0], + config.clone(), + &node_a_ser, + a_mons, + persister_a, + chain_mon_a, + node_a + ); let b_mons = &[&mon_b_1_ser[..], &mon_b_2_ser[..]]; - reload_node!(nodes[1], config.clone(), &node_b_ser, b_mons, persister_b, chain_mon_b, node_b); + reload_node_0_3!( + nodes[1], + config.clone(), + &node_b_ser, + b_mons, + persister_b, + chain_mon_b, + node_b + ); let c_mons = &[&mon_c_1_ser[..]]; - reload_node!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); + reload_node_0_3!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); - reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - let mut reconnect_b_c_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + lightning_0_3_utils::reconnect_nodes(ReconnectArgs_0_3::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs_0_3::new(&nodes[1], &nodes[2]); reconnect_b_c_args.send_channel_ready = (true, true); reconnect_b_c_args.send_announcement_sigs = (true, true); - reconnect_nodes(reconnect_b_c_args); + lightning_0_3_utils::reconnect_nodes(reconnect_b_c_args); - let contribution = SpliceContribution::splice_out(vec![TxOut { + let contribution = SpliceContribution_0_3::splice_out(vec![TxOut { value: Amount::from_sat(1_000), script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), }]); - let splice_tx = splice_channel(&nodes[0], &nodes[1], ChannelId(chan_id_bytes_a), contribution); + let splice_tx = + splice_channel_0_3(&nodes[0], &nodes[1], ChannelId_0_3(chan_id_bytes_a), contribution); for node in nodes.iter() { - mine_transaction(node, &splice_tx); - connect_blocks(node, ANTI_REORG_DELAY - 1); + lightning_0_3_utils::mine_transaction(node, &splice_tx); + lightning_0_3_utils::connect_blocks(node, ANTI_REORG_DELAY - 1); } - let splice_locked = get_event_msg!(nodes[0], MessageSendEvent::SendSpliceLocked, node_b_id); - lock_splice(&nodes[0], &nodes[1], &splice_locked, false); + let splice_locked = + get_event_msg_0_3!(nodes[0], MessageSendEvent_0_3::SendSpliceLocked, node_b_id); + lock_splice_0_3(&nodes[0], &nodes[1], &splice_locked, false); for node in nodes.iter() { - connect_blocks(node, EXTRA_BLOCKS_BEFORE_FAIL - ANTI_REORG_DELAY); + lightning_0_3_utils::connect_blocks(node, EXTRA_BLOCKS_BEFORE_FAIL - ANTI_REORG_DELAY); } // Now release the HTLC to be failed back to node A nodes[1].node.process_pending_htlc_forwards(); - let pay_secret = PaymentSecret(payment_secret_bytes); - let pay_hash = PaymentHash(payment_hash_bytes); - let pay_preimage = PaymentPreimage(payment_preimage_bytes); + let pay_secret = PaymentSecret_0_3(payment_secret_bytes); + let pay_hash = PaymentHash_0_3(payment_hash_bytes); + let pay_preimage = PaymentPreimage_0_3(payment_preimage_bytes); if fail_htlc { - let failure = HTLCHandlingFailureType::Forward { + let failure = HTLCHandlingFailureType_0_3::Forward { node_id: Some(node_c_id), - channel_id: ChannelId(chan_id_bytes_b), + channel_id: ChannelId_0_3(chan_id_bytes_b), }; - expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[failure]); - check_added_monitors(&nodes[1], 1); + lightning_0_3_utils::expect_and_process_pending_htlcs_and_htlc_handling_failed( + &nodes[1], + &[failure], + ); + lightning_0_3_utils::check_added_monitors(&nodes[1], 1); - let updates = get_htlc_update_msgs(&nodes[1], &node_a_id); + let updates = lightning_0_3_utils::get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); - do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); - let conditions = PaymentFailedConditions::new(); - expect_payment_failed_conditions(&nodes[0], pay_hash, false, conditions); + lightning_0_3_utils::do_commitment_signed_dance( + &nodes[0], + &nodes[1], + &updates.commitment_signed, + false, + false, + ); + let conditions = PaymentFailedConditions_0_3::new(); + lightning_0_3_utils::expect_payment_failed_conditions( + &nodes[0], pay_hash, false, conditions, + ); } else { - check_added_monitors(&nodes[1], 1); - let forward_event = SendEvent::from_node(&nodes[1]); + lightning_0_3_utils::check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent_0_3::from_node(&nodes[1]); nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); let commitment = &forward_event.commitment_msg; - do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, false); + lightning_0_3_utils::do_commitment_signed_dance( + &nodes[2], &nodes[1], commitment, false, false, + ); - expect_and_process_pending_htlcs(&nodes[2], false); - expect_payment_claimable!(nodes[2], pay_hash, pay_secret, 1_000_000); - claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); + lightning_0_3_utils::expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable_0_3!(nodes[2], pay_hash, pay_secret, 1_000_000); + lightning_0_3_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); } } @@ -634,32 +711,49 @@ fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { } // Create a dummy node to reload over with the 0.2 state - let mut chanmon_cfgs = create_chanmon_cfgs(3); + let mut chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(3); // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_cfgs = lightning_0_3_utils::create_node_cfgs(3, &chanmon_cfgs); let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let node_chanmgrs = + lightning_0_3_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let (node_a, node_b, node_c); - let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let mut nodes = lightning_0_3_utils::create_network(3, &node_cfgs, &node_chanmgrs); - let config = test_default_channel_config(); + let config = lightning_0_3_utils::test_default_channel_config(); let a_mons = &[&mon_a_1_ser[..]]; - reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); + reload_node_0_3!( + nodes[0], + config.clone(), + &node_a_ser, + a_mons, + persister_a, + chain_mon_a, + node_a + ); let b_mons = &[&mon_b_1_ser[..], &mon_b_2_ser[..]]; - reload_node!(nodes[1], config.clone(), &node_b_ser, b_mons, persister_b, chain_mon_b, node_b); + reload_node_0_3!( + nodes[1], + config.clone(), + &node_b_ser, + b_mons, + persister_b, + chain_mon_b, + node_b + ); let c_mons = &[&mon_c_1_ser[..]]; - reload_node!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); + reload_node_0_3!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); - reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - let mut reconnect_b_c_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + lightning_0_3_utils::reconnect_nodes(ReconnectArgs_0_3::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs_0_3::new(&nodes[1], &nodes[2]); reconnect_b_c_args.send_channel_ready = (true, true); reconnect_b_c_args.send_announcement_sigs = (true, true); - reconnect_nodes(reconnect_b_c_args); + lightning_0_3_utils::reconnect_nodes(reconnect_b_c_args); // Now release the HTLC from node_b to node_c, to be claimed back to node_a nodes[1].node.process_pending_htlc_forwards(); @@ -668,7 +762,7 @@ fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); let (intercept_id, expected_outbound_amt_msat) = match events[0] { - Event::HTLCIntercepted { intercept_id, expected_outbound_amount_msat, .. } => { + Event_0_3::HTLCIntercepted { intercept_id, expected_outbound_amount_msat, .. } => { (intercept_id, expected_outbound_amount_msat) }, _ => panic!(), @@ -677,7 +771,7 @@ fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { .node .forward_intercepted_htlc( intercept_id, - &ChannelId(chan_id_bytes_b_c), + &ChannelId_0_3(chan_id_bytes_b_c), nodes[2].node.get_our_node_id(), expected_outbound_amt_msat, ) @@ -685,17 +779,647 @@ fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { nodes[1].node.process_pending_htlc_forwards(); } + let pay_secret = PaymentSecret_0_3(payment_secret_bytes); + let pay_hash = PaymentHash_0_3(payment_hash_bytes); + let pay_preimage = PaymentPreimage_0_3(payment_preimage_bytes); + + lightning_0_3_utils::check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent_0_3::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); + let commitment = &forward_event.commitment_msg; + lightning_0_3_utils::do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, false); + + lightning_0_3_utils::expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable_0_3!(nodes[2], pay_hash, pay_secret, 1_000_000); + lightning_0_3_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); +} + +#[test] +fn test_0_3_pending_forward_upgrade() { + // Tests upgrading from 0.3 with a pending HTLC forward. + // Phase 1: Create state in 0.3 with a pending forward (HTLC locked on inbound edge of node B) + // Phase 2: Reload with local lightning and complete the payment + let (node_a_ser, node_b_ser, node_c_ser, mon_a_1_ser, mon_b_1_ser, mon_b_2_ser, mon_c_1_ser); + let (node_a_id, node_b_id, _node_c_id); + let (payment_secret_bytes, payment_hash_bytes, payment_preimage_bytes); + + { + let chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(3); + let node_cfgs = lightning_0_3_utils::create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = + lightning_0_3_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = lightning_0_3_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + node_a_id = nodes[0].node.get_our_node_id(); + node_b_id = nodes[1].node.get_our_node_id(); + _node_c_id = nodes[2].node.get_our_node_id(); + let chan_id_a = lightning_0_3_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 10_000_000, 0, + ) + .2; + + let chan_id_b = lightning_0_3_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 2, 50_000, 0, + ) + .2; + + // Ensure all nodes are at the same initial height. + let node_max_height = nodes.iter().map(|node| node.best_block_info().1).max().unwrap(); + for node in &nodes { + let blocks_to_mine = node_max_height - node.best_block_info().1; + if blocks_to_mine > 0 { + lightning_0_3_utils::connect_blocks(node, blocks_to_mine); + } + } + + // Initiate an HTLC to be sent over node_a -> node_b -> node_c + let (preimage, hash, secret) = + lightning_0_3_utils::get_payment_preimage_hash(&nodes[2], Some(1_000_000), None); + payment_preimage_bytes = preimage.0; + payment_hash_bytes = hash.0; + payment_secret_bytes = secret.0; + + let pay_params = router_0_3::PaymentParameters::from_node_id( + _node_c_id, + lightning_0_3_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + router_0_3::RouteParameters::from_payment_params_and_value(pay_params, 1_000_000); + let route = lightning_0_3_utils::get_route(&nodes[0], &route_params).unwrap(); + + let onion = RecipientOnionFields_0_3::secret_only(secret); + let id = PaymentId_0_3(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + lightning_0_3_utils::check_added_monitors(&nodes[0], 1); + let send_event = SendEvent_0_3::from_node(&nodes[0]); + + // Lock in the HTLC on the inbound edge of node_b without initiating the outbound edge. + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + lightning_0_3_utils::do_commitment_signed_dance( + &nodes[1], + &nodes[0], + &send_event.commitment_msg, + false, + false, + ); + // Process the pending update_add_htlcs but don't forward yet + nodes[1].node.test_process_pending_update_add_htlcs(); + let events = nodes[1].node.get_and_clear_pending_events(); + assert!(events.is_empty()); + + node_a_ser = nodes[0].node.encode(); + node_b_ser = nodes[1].node.encode(); + node_c_ser = nodes[2].node.encode(); + mon_a_1_ser = get_monitor_0_3!(nodes[0], chan_id_a).encode(); + mon_b_1_ser = get_monitor_0_3!(nodes[1], chan_id_a).encode(); + mon_b_2_ser = get_monitor_0_3!(nodes[1], chan_id_b).encode(); + mon_c_1_ser = get_monitor_0_3!(nodes[2], chan_id_b).encode(); + } + + // Create a dummy node to reload over with the 0.3 state + let mut chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(3); + + // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = lightning_local_utils::create_node_cfgs(3, &chanmon_cfgs); + let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); + let node_chanmgrs = + lightning_local_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (node_a, node_b, node_c); + let mut nodes = lightning_local_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + let config = lightning_local_utils::test_default_channel_config(); + let a_mons = &[&mon_a_1_ser[..]]; + reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); + let b_mons = &[&mon_b_1_ser[..], &mon_b_2_ser[..]]; + reload_node!(nodes[1], config.clone(), &node_b_ser, b_mons, persister_b, chain_mon_b, node_b); + let c_mons = &[&mon_c_1_ser[..]]; + reload_node!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); + + lightning_local_utils::reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + reconnect_b_c_args.send_channel_ready = (true, true); + reconnect_b_c_args.send_announcement_sigs = (true, true); + lightning_local_utils::reconnect_nodes(reconnect_b_c_args); + + // Now release the HTLC from node_b to node_c, to be claimed back to node_a + nodes[1].node.process_pending_htlc_forwards(); + let pay_secret = PaymentSecret(payment_secret_bytes); let pay_hash = PaymentHash(payment_hash_bytes); let pay_preimage = PaymentPreimage(payment_preimage_bytes); - check_added_monitors(&nodes[1], 1); + lightning_local_utils::check_added_monitors(&nodes[1], 1); let forward_event = SendEvent::from_node(&nodes[1]); nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); let commitment = &forward_event.commitment_msg; - do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, false); + lightning_local_utils::do_commitment_signed_dance( + &nodes[2], &nodes[1], commitment, false, false, + ); - expect_and_process_pending_htlcs(&nodes[2], false); + lightning_local_utils::expect_and_process_pending_htlcs(&nodes[2], false); expect_payment_claimable!(nodes[2], pay_hash, pay_secret, 1_000_000); - claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); + lightning_local_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); +} + +#[test] +fn test_0_2_pending_forward_upgrade_fails() { + // Tests that upgrading directly from 0.2 to local lightning fails with DecodeError::InvalidValue + // when there's a pending HTLC forward, because in 0.5 we started requiring new pending_htlc + // fields that started being written in 0.3. + // XXX update this + use lightning::ln::channelmanager::ChannelManagerReadArgs; + use lightning::ln::msgs::DecodeError; + use lightning::util::ser::ReadableArgs; + + let (node_b_ser, mon_b_1_ser, mon_b_2_ser); + + { + let chanmon_cfgs = lightning_0_2_utils::create_chanmon_cfgs(3); + let node_cfgs = lightning_0_2_utils::create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = + lightning_0_2_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = lightning_0_2_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_a = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 10_000_000, 0, + ) + .2; + + let chan_id_b = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 2, 50_000, 0, + ) + .2; + + // Send HTLC from node_a to node_c, hold at node_b (don't call process_pending_htlc_forwards) + let node_a_id = nodes[0].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let (_preimage, hash, secret) = + lightning_0_2_utils::get_payment_preimage_hash(&nodes[2], Some(1_000_000), None); + + let pay_params = router_0_2::PaymentParameters::from_node_id( + node_c_id, + lightning_0_2_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + router_0_2::RouteParameters::from_payment_params_and_value(pay_params, 1_000_000); + let route = lightning_0_2_utils::get_route(&nodes[0], &route_params).unwrap(); + + let onion = RecipientOnionFields_0_2::secret_only(secret); + let id = PaymentId_0_2(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + lightning_0_2_utils::check_added_monitors(&nodes[0], 1); + let send_event = lightning_0_2_utils::SendEvent::from_node(&nodes[0]); + + // Lock in the HTLC on the inbound edge of node_b without initiating the outbound edge. + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + commitment_signed_dance_0_2!(nodes[1], nodes[0], send_event.commitment_msg, false); + + // Process the pending HTLC to create a pending forward (but don't actually forward it) + nodes[1].node.test_process_pending_update_add_htlcs(); + + // Serialize node_b with the pending forward + node_b_ser = nodes[1].node.encode(); + mon_b_1_ser = get_monitor_0_2!(nodes[1], chan_id_a).encode(); + mon_b_2_ser = get_monitor_0_2!(nodes[1], chan_id_b).encode(); + } + + // Try to reload using local lightning - this should fail with DecodeError::InvalidValue + let mut chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(1); + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = lightning_local_utils::create_node_cfgs(1, &chanmon_cfgs); + let node_chanmgrs = lightning_local_utils::create_node_chanmgrs(1, &node_cfgs, &[None]); + let nodes = lightning_local_utils::create_network(1, &node_cfgs, &node_chanmgrs); + + // Read the monitors first + use lightning::util::test_channel_signer::TestChannelSigner; + let mut monitor_read_1 = &mon_b_1_ser[..]; + let (_, monitor_1) = <( + bitcoin::BlockHash, + lightning::chain::channelmonitor::ChannelMonitor, + )>::read( + &mut monitor_read_1, (nodes[0].keys_manager, nodes[0].keys_manager) + ) + .unwrap(); + + let mut monitor_read_2 = &mon_b_2_ser[..]; + let (_, monitor_2) = <( + bitcoin::BlockHash, + lightning::chain::channelmonitor::ChannelMonitor, + )>::read( + &mut monitor_read_2, (nodes[0].keys_manager, nodes[0].keys_manager) + ) + .unwrap(); + + // Try to read the ChannelManager - this should fail + use lightning::util::hash_tables::new_hash_map; + let mut channel_monitors = new_hash_map(); + channel_monitors.insert(monitor_1.channel_id(), &monitor_1); + channel_monitors.insert(monitor_2.channel_id(), &monitor_2); + + let config = lightning_local_utils::test_default_channel_config(); + let mut node_read = &node_b_ser[..]; + let read_args = ChannelManagerReadArgs { + config, + entropy_source: nodes[0].keys_manager, + node_signer: nodes[0].keys_manager, + signer_provider: nodes[0].keys_manager, + fee_estimator: nodes[0].fee_estimator, + router: nodes[0].router, + message_router: nodes[0].message_router, + chain_monitor: nodes[0].chain_monitor, + tx_broadcaster: nodes[0].tx_broadcaster, + logger: nodes[0].logger, + channel_monitors, + }; + + let result = + <(bitcoin::BlockHash, lightning::ln::functional_test_utils::TestChannelManager)>::read( + &mut node_read, + read_args, + ); + + assert!(matches!(result, Err(DecodeError::InvalidValue))); +} + +#[test] +fn test_0_2_to_0_3_to_local_pending_forward_upgrade() { + // Tests that upgrading from 0.2 to 0.3 to local with a pending HTLC forward works. + // The key is that 0.3 can read 0.2's format and re-serialize it in the new format + // that local can then read. + let (node_a_ser_0_3, node_b_ser_0_3, node_c_ser_0_3); + let (mon_a_1_ser_0_3, mon_b_1_ser_0_3, mon_b_2_ser_0_3, mon_c_1_ser_0_3); + let (node_a_id, node_b_id); + let (payment_secret_bytes, payment_hash_bytes, payment_preimage_bytes); + let (chan_id_a_bytes, chan_id_b_bytes); + + // Phase 1: Create network on 0.2 with pending HTLC forward + let (node_a_ser_0_2, node_b_ser_0_2, node_c_ser_0_2); + let (mon_a_1_ser_0_2, mon_b_1_ser_0_2, mon_b_2_ser_0_2, mon_c_1_ser_0_2); + { + let chanmon_cfgs = lightning_0_2_utils::create_chanmon_cfgs(3); + let node_cfgs = lightning_0_2_utils::create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = + lightning_0_2_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = lightning_0_2_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + node_a_id = nodes[0].node.get_our_node_id(); + node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + let chan_id_a = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 10_000_000, 0, + ) + .2; + chan_id_a_bytes = chan_id_a.0; + + let chan_id_b = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 2, 50_000, 0, + ) + .2; + chan_id_b_bytes = chan_id_b.0; + + // Ensure all nodes are at the same initial height. + let node_max_height = nodes.iter().map(|node| node.best_block_info().1).max().unwrap(); + for node in &nodes { + let blocks_to_mine = node_max_height - node.best_block_info().1; + if blocks_to_mine > 0 { + lightning_0_2_utils::connect_blocks(node, blocks_to_mine); + } + } + + // Initiate an HTLC to be sent over node_a -> node_b -> node_c + let (preimage, hash, secret) = + lightning_0_2_utils::get_payment_preimage_hash(&nodes[2], Some(1_000_000), None); + payment_preimage_bytes = preimage.0; + payment_hash_bytes = hash.0; + payment_secret_bytes = secret.0; + + let pay_params = router_0_2::PaymentParameters::from_node_id( + node_c_id, + lightning_0_2_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + router_0_2::RouteParameters::from_payment_params_and_value(pay_params, 1_000_000); + let route = lightning_0_2_utils::get_route(&nodes[0], &route_params).unwrap(); + + let onion = RecipientOnionFields_0_2::secret_only(secret); + let id = PaymentId_0_2(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + lightning_0_2_utils::check_added_monitors(&nodes[0], 1); + let send_event = lightning_0_2_utils::SendEvent::from_node(&nodes[0]); + + // Lock in the HTLC on the inbound edge of node_b without initiating the outbound edge. + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + commitment_signed_dance_0_2!(nodes[1], nodes[0], send_event.commitment_msg, false); + // Process the pending update_add_htlcs to create pending forward state + nodes[1].node.test_process_pending_update_add_htlcs(); + let events = nodes[1].node.get_and_clear_pending_events(); + assert!(events.is_empty()); + + node_a_ser_0_2 = nodes[0].node.encode(); + node_b_ser_0_2 = nodes[1].node.encode(); + node_c_ser_0_2 = nodes[2].node.encode(); + mon_a_1_ser_0_2 = get_monitor_0_2!(nodes[0], chan_id_a).encode(); + mon_b_1_ser_0_2 = get_monitor_0_2!(nodes[1], chan_id_a).encode(); + mon_b_2_ser_0_2 = get_monitor_0_2!(nodes[1], chan_id_b).encode(); + mon_c_1_ser_0_2 = get_monitor_0_2!(nodes[2], chan_id_b).encode(); + } + + // Phase 2: Reload on 0.3, forward the HTLC (but don't claim), and re-serialize + { + let mut chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(3); + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = lightning_0_3_utils::create_node_cfgs(3, &chanmon_cfgs); + let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); + let node_chanmgrs = + lightning_0_3_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (node_a, node_b, node_c); + let mut nodes = lightning_0_3_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + let config = lightning_0_3_utils::test_default_channel_config(); + let a_mons = &[&mon_a_1_ser_0_2[..]]; + reload_node_0_3!( + nodes[0], + config.clone(), + &node_a_ser_0_2, + a_mons, + persister_a, + chain_mon_a, + node_a + ); + let b_mons = &[&mon_b_1_ser_0_2[..], &mon_b_2_ser_0_2[..]]; + reload_node_0_3!( + nodes[1], + config.clone(), + &node_b_ser_0_2, + b_mons, + persister_b, + chain_mon_b, + node_b + ); + let c_mons = &[&mon_c_1_ser_0_2[..]]; + reload_node_0_3!( + nodes[2], + config, + &node_c_ser_0_2, + c_mons, + persister_c, + chain_mon_c, + node_c + ); + + // Reconnect nodes after reload + lightning_0_3_utils::reconnect_nodes(ReconnectArgs_0_3::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs_0_3::new(&nodes[1], &nodes[2]); + reconnect_b_c_args.send_channel_ready = (true, true); + reconnect_b_c_args.send_announcement_sigs = (true, true); + lightning_0_3_utils::reconnect_nodes(reconnect_b_c_args); + + // Forward the HTLC from node_b to node_c (but don't claim yet) + nodes[1].node.process_pending_htlc_forwards(); + + let pay_hash = PaymentHash_0_3(payment_hash_bytes); + let pay_secret = PaymentSecret_0_3(payment_secret_bytes); + + lightning_0_3_utils::check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent_0_3::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); + let commitment = &forward_event.commitment_msg; + lightning_0_3_utils::do_commitment_signed_dance( + &nodes[2], &nodes[1], commitment, false, false, + ); + + lightning_0_3_utils::expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable_0_3!(nodes[2], pay_hash, pay_secret, 1_000_000); + + // Re-serialize in 0.3 format - now the HTLC has been forwarded (not just pending) + node_a_ser_0_3 = nodes[0].node.encode(); + node_b_ser_0_3 = nodes[1].node.encode(); + node_c_ser_0_3 = nodes[2].node.encode(); + + let chan_id_a = ChannelId_0_3(chan_id_a_bytes); + let chan_id_b = ChannelId_0_3(chan_id_b_bytes); + + mon_a_1_ser_0_3 = get_monitor_0_3!(nodes[0], chan_id_a).encode(); + mon_b_1_ser_0_3 = get_monitor_0_3!(nodes[1], chan_id_a).encode(); + mon_b_2_ser_0_3 = get_monitor_0_3!(nodes[1], chan_id_b).encode(); + mon_c_1_ser_0_3 = get_monitor_0_3!(nodes[2], chan_id_b).encode(); + } + + // Phase 3: Reload on local and claim the payment (HTLC already forwarded in Phase 2) + let mut chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(3); + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = lightning_local_utils::create_node_cfgs(3, &chanmon_cfgs); + let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); + let node_chanmgrs = + lightning_local_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (node_a, node_b, node_c); + let mut nodes = lightning_local_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + let config = lightning_local_utils::test_default_channel_config(); + let a_mons = &[&mon_a_1_ser_0_3[..]]; + reload_node!( + nodes[0], + config.clone(), + &node_a_ser_0_3, + a_mons, + persister_a, + chain_mon_a, + node_a + ); + let b_mons = &[&mon_b_1_ser_0_3[..], &mon_b_2_ser_0_3[..]]; + reload_node!( + nodes[1], + config.clone(), + &node_b_ser_0_3, + b_mons, + persister_b, + chain_mon_b, + node_b + ); + let c_mons = &[&mon_c_1_ser_0_3[..]]; + reload_node!(nodes[2], config, &node_c_ser_0_3, c_mons, persister_c, chain_mon_c, node_c); + + lightning_local_utils::reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + lightning_local_utils::reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2])); + + // The HTLC was already forwarded and claimable event generated in Phase 2. + // After reload, just claim the payment - the HTLC is already locked on node C. + let pay_preimage = PaymentPreimage(payment_preimage_bytes); + lightning_local_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); +} + +#[test] +fn test_local_to_0_3_pending_forward_downgrade() { + // Tests downgrading from local lightning to 0.3 with a pending HTLC forward works. + // Phase 1: Create state in local lightning with a pending forward + // Phase 2: Reload with 0.3 and complete the payment + use lightning::ln::types::ChannelId; + + let (node_a_ser, node_b_ser, node_c_ser, mon_a_1_ser, mon_b_1_ser, mon_b_2_ser, mon_c_1_ser); + let (node_a_id, node_b_id); + let (payment_secret_bytes, payment_hash_bytes, payment_preimage_bytes); + let (chan_id_a_bytes, chan_id_b_bytes); + + // Phase 1: Create network on local lightning with pending HTLC forward + { + let chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(3); + let node_cfgs = lightning_local_utils::create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = + lightning_local_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = lightning_local_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + node_a_id = nodes[0].node.get_our_node_id(); + node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + let chan_id_a = lightning_local_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 10_000_000, 0, + ) + .2; + chan_id_a_bytes = chan_id_a.0; + + let chan_id_b = lightning_local_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 2, 50_000, 0, + ) + .2; + chan_id_b_bytes = chan_id_b.0; + + // Ensure all nodes are at the same initial height. + let node_max_height = nodes.iter().map(|node| node.best_block_info().1).max().unwrap(); + for node in &nodes { + let blocks_to_mine = node_max_height - node.best_block_info().1; + if blocks_to_mine > 0 { + lightning_local_utils::connect_blocks(node, blocks_to_mine); + } + } + + // Initiate an HTLC to be sent over node_a -> node_b -> node_c + let (preimage, hash, secret) = + lightning_local_utils::get_payment_preimage_hash(&nodes[2], Some(1_000_000), None); + payment_preimage_bytes = preimage.0; + payment_hash_bytes = hash.0; + payment_secret_bytes = secret.0; + + use lightning::routing::router as local_router; + let pay_params = local_router::PaymentParameters::from_node_id( + node_c_id, + lightning_local_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + local_router::RouteParameters::from_payment_params_and_value(pay_params, 1_000_000); + let route = lightning_local_utils::get_route(&nodes[0], &route_params).unwrap(); + + use lightning::ln::channelmanager::PaymentId as LocalPaymentId; + use lightning::ln::outbound_payment::RecipientOnionFields as LocalRecipientOnionFields; + let onion = LocalRecipientOnionFields::secret_only(secret); + let id = LocalPaymentId(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + lightning_local_utils::check_added_monitors(&nodes[0], 1); + let send_event = SendEvent::from_node(&nodes[0]); + + // Lock in the HTLC on the inbound edge of node_b without initiating the outbound edge. + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + lightning_local_utils::do_commitment_signed_dance( + &nodes[1], + &nodes[0], + &send_event.commitment_msg, + false, + false, + ); + node_a_ser = nodes[0].node.encode(); + node_b_ser = nodes[1].node.encode(); + node_c_ser = nodes[2].node.encode(); + mon_a_1_ser = lightning::get_monitor!(nodes[0], ChannelId(chan_id_a_bytes)).encode(); + mon_b_1_ser = lightning::get_monitor!(nodes[1], ChannelId(chan_id_a_bytes)).encode(); + mon_b_2_ser = lightning::get_monitor!(nodes[1], ChannelId(chan_id_b_bytes)).encode(); + mon_c_1_ser = lightning::get_monitor!(nodes[2], ChannelId(chan_id_b_bytes)).encode(); + } + + // Phase 2: Reload on 0.3 and complete the payment + let mut chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(3); + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = lightning_0_3_utils::create_node_cfgs(3, &chanmon_cfgs); + let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); + let node_chanmgrs = + lightning_0_3_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (node_a, node_b, node_c); + let mut nodes = lightning_0_3_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + let config = lightning_0_3_utils::test_default_channel_config(); + let a_mons = &[&mon_a_1_ser[..]]; + reload_node_0_3!( + nodes[0], + config.clone(), + &node_a_ser, + a_mons, + persister_a, + chain_mon_a, + node_a + ); + let b_mons = &[&mon_b_1_ser[..], &mon_b_2_ser[..]]; + reload_node_0_3!( + nodes[1], + config.clone(), + &node_b_ser, + b_mons, + persister_b, + chain_mon_b, + node_b + ); + let c_mons = &[&mon_c_1_ser[..]]; + reload_node_0_3!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); + + lightning_0_3_utils::reconnect_nodes(ReconnectArgs_0_3::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs_0_3::new(&nodes[1], &nodes[2]); + reconnect_b_c_args.send_channel_ready = (true, true); + reconnect_b_c_args.send_announcement_sigs = (true, true); + lightning_0_3_utils::reconnect_nodes(reconnect_b_c_args); + + // Now release the HTLC from node_b to node_c, to be claimed back to node_a + nodes[1].node.process_pending_htlc_forwards(); + + let pay_secret = PaymentSecret_0_3(payment_secret_bytes); + let pay_hash = PaymentHash_0_3(payment_hash_bytes); + let pay_preimage = PaymentPreimage_0_3(payment_preimage_bytes); + + lightning_0_3_utils::check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent_0_3::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); + let commitment = &forward_event.commitment_msg; + lightning_0_3_utils::do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, false); + + lightning_0_3_utils::expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable_0_3!(nodes[2], pay_hash, pay_secret, 1_000_000); + lightning_0_3_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); } diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 5a0c37bd61d..b421114e911 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -3519,8 +3519,9 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode .node .handle_commitment_signed_batch_test(node_a_id, &as_htlc_fulfill.commitment_signed); check_added_monitors(&nodes[1], 1); - let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); + let (a, raa, holding_cell) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); assert!(a.is_none()); + assert!(holding_cell.is_empty()); nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); check_added_monitors(&nodes[1], 1); @@ -3938,7 +3939,12 @@ fn do_test_durable_preimages_on_closed_channel( let evs = nodes[1].node.get_and_clear_pending_events(); assert_eq!(evs.len(), if close_chans_before_reload { 2 } else { 1 }); for ev in evs { - if let Event::PaymentForwarded { .. } = ev { + if let Event::PaymentForwarded { claim_from_onchain_tx, next_user_channel_id, .. } = ev { + if !claim_from_onchain_tx { + // If the outbound channel is still open, the `next_user_channel_id` should be available. + // This was previously broken. + assert!(next_user_channel_id.is_some()) + } } else { panic!(); } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 3236ebdefed..ccceef01796 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -50,10 +50,9 @@ use crate::ln::channel_state::{ OutboundHTLCDetails, OutboundHTLCStateDetails, }; use crate::ln::channelmanager::{ - self, ChannelReadyOrder, FundingConfirmedMessage, HTLCFailureMsg, HTLCPreviousHopData, - HTLCSource, OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, PendingHTLCStatus, - RAACommitmentOrder, SentHTLCId, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT, - MIN_CLTV_EXPIRY_DELTA, + self, BlindedFailure, ChannelReadyOrder, FundingConfirmedMessage, HTLCPreviousHopData, + HTLCSource, OpenChannelMessage, PaymentClaimDetails, RAACommitmentOrder, SentHTLCId, + BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, }; use crate::ln::funding::{FundingTxInput, SpliceContribution}; use crate::ln::interactivetxs::{ @@ -145,36 +144,11 @@ enum InboundHTLCRemovalReason { Fulfill { preimage: PaymentPreimage, attribution_data: Option }, } -/// Represents the resolution status of an inbound HTLC. -#[cfg_attr(test, derive(Debug))] -#[derive(Clone)] -enum InboundHTLCResolution { - /// Resolved implies the action we must take with the inbound HTLC has already been determined, - /// i.e., we already know whether it must be failed back or forwarded. - // - // TODO: Once this variant is removed, we should also clean up - // [`MonitorRestoreUpdates::accepted_htlcs`] as the path will be unreachable. - Resolved { pending_htlc_status: PendingHTLCStatus }, - /// Pending implies we will attempt to resolve the inbound HTLC once it has been fully committed - /// to by both sides of the channel, i.e., once a `revoke_and_ack` has been processed by both - /// nodes for the state update in which it was proposed. - Pending { update_add_htlc: msgs::UpdateAddHTLC }, -} - -impl_writeable_tlv_based_enum!(InboundHTLCResolution, - (0, Resolved) => { - (0, pending_htlc_status, required), - }, - (2, Pending) => { - (0, update_add_htlc, required), - }, -); - #[cfg_attr(test, derive(Debug))] enum InboundHTLCState { /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an /// update_add_htlc message for this HTLC. - RemoteAnnounced(InboundHTLCResolution), + RemoteAnnounced(msgs::UpdateAddHTLC), /// Included in a received commitment_signed message (implying we've /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous /// state (see the example below). We have not yet included this HTLC in a @@ -204,13 +178,13 @@ enum InboundHTLCState { /// Implies AwaitingRemoteRevoke. /// /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md - AwaitingRemoteRevokeToAnnounce(InboundHTLCResolution), + AwaitingRemoteRevokeToAnnounce(msgs::UpdateAddHTLC), /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it). /// We have also included this HTLC in our latest commitment_signed and are now just waiting /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the /// channel (before it can then get forwarded and/or removed). /// Implies AwaitingRemoteRevoke. - AwaitingAnnouncedRemoteRevoke(InboundHTLCResolution), + AwaitingAnnouncedRemoteRevoke(msgs::UpdateAddHTLC), /// An HTLC irrevocably committed in the latest commitment transaction, ready to be forwarded or /// removed. Committed { @@ -295,19 +269,42 @@ impl InboundHTLCState { /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc fn should_hold_htlc(&self) -> bool { match self { - InboundHTLCState::RemoteAnnounced(res) - | InboundHTLCState::AwaitingRemoteRevokeToAnnounce(res) - | InboundHTLCState::AwaitingAnnouncedRemoteRevoke(res) => match res { - InboundHTLCResolution::Pending { update_add_htlc } => { - update_add_htlc.hold_htlc.is_some() - }, - InboundHTLCResolution::Resolved { .. } => false, + InboundHTLCState::RemoteAnnounced(update_add_htlc) + | InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add_htlc) + | InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add_htlc) => { + update_add_htlc.hold_htlc.is_some() }, InboundHTLCState::Committed { .. } | InboundHTLCState::LocalRemoved(_) => false, } } } +/// Information about the outbound hop for a forwarded HTLC. Useful for generating an accurate +/// [`Event::PaymentForwarded`] if we need to claim this HTLC post-restart. +/// +/// [`Event::PaymentForwarded`]: crate::events::Event::PaymentForwarded +#[derive(Debug, Copy, Clone)] +pub(super) struct OutboundHop { + /// The amount forwarded outbound. + pub(super) amt_msat: u64, + /// The outbound channel this HTLC was forwarded over. + pub(super) channel_id: ChannelId, + /// The next-hop recipient of this HTLC. + pub(super) node_id: PublicKey, + /// The outbound channel's funding outpoint. + pub(super) funding_txo: OutPoint, + /// The outbound channel's user channel ID. + pub(super) user_channel_id: u128, +} + +impl_writeable_tlv_based!(OutboundHop, { + (0, amt_msat, required), + (2, channel_id, required), + (4, node_id, required), + (6, funding_txo, required), + (8, user_channel_id, required), +}); + /// A field of `InboundHTLCState::Committed` containing the HTLC's `update_add_htlc` message. If /// the HTLC is a forward and gets irrevocably committed to the outbound edge, we convert to /// `InboundUpdateAdd::Forwarded`, thus pruning the onion and not persisting it on every @@ -315,34 +312,34 @@ impl InboundHTLCState { /// /// Useful for reconstructing the pending HTLC set on startup. #[derive(Debug, Clone)] -pub(super) enum InboundUpdateAdd { +enum InboundUpdateAdd { /// The inbound committed HTLC's update_add_htlc message. WithOnion { update_add_htlc: msgs::UpdateAddHTLC }, /// This inbound HTLC is a forward that was irrevocably committed to the outbound edge, allowing /// its onion to be pruned and no longer persisted. + /// + /// Contains data that is useful if we need to fail or claim this HTLC backwards after a restart + /// and it's missing in the outbound edge. Forwarded { - /// Useful if we need to fail or claim this HTLC backwards after restart, if it's missing in the - /// outbound edge. - hop_data: HTLCPreviousHopData, - /// Useful if we need to claim this HTLC backwards after a restart and it's missing in the - /// outbound edge, to generate an accurate [`Event::PaymentForwarded`]. - /// - /// [`Event::PaymentForwarded`]: crate::events::Event::PaymentForwarded - outbound_amt_msat: u64, + incoming_packet_shared_secret: [u8; 32], + phantom_shared_secret: Option<[u8; 32]>, + trampoline_shared_secret: Option<[u8; 32]>, + blinded_failure: Option, + outbound_hop: OutboundHop, }, - /// This HTLC was received pre-LDK 0.3, before we started persisting the onion for inbound - /// committed HTLCs. - Legacy, } impl_writeable_tlv_based_enum_upgradable!(InboundUpdateAdd, (0, WithOnion) => { (0, update_add_htlc, required), }, - (2, Legacy) => {}, + // 2 was previously used for the ::Legacy variant, used for HTLCs received on LDK 0.1-. (4, Forwarded) => { - (0, hop_data, required), - (2, outbound_amt_msat, required), + (0, incoming_packet_shared_secret, required), + (2, outbound_hop, required), + (4, phantom_shared_secret, option), + (6, trampoline_shared_secret, option), + (8, blinded_failure, option), }, ); @@ -1182,7 +1179,6 @@ pub(super) struct MonitorRestoreUpdates { /// A `CommitmentUpdate` to be sent to our channel peer. pub commitment_update: Option, pub commitment_order: RAACommitmentOrder, - pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>, pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pub finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, /// Inbound update_adds that are now irrevocably committed to this channel and are ready for the @@ -3092,7 +3088,6 @@ pub(super) struct ChannelContext { // responsible for some of the HTLCs here or not - we don't know whether the update in question // completed or not. We currently ignore these fields entirely when force-closing a channel, // but need to handle this somehow or we run the risk of losing HTLCs! - monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>, monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, monitor_pending_finalized_fulfills: Vec<(HTLCSource, Option)>, monitor_pending_update_adds: Vec, @@ -3796,7 +3791,6 @@ impl ChannelContext { monitor_pending_channel_ready: false, monitor_pending_revoke_and_ack: false, monitor_pending_commitment_signed: false, - monitor_pending_forwards: Vec::new(), monitor_pending_failures: Vec::new(), monitor_pending_finalized_fulfills: Vec::new(), monitor_pending_update_adds: Vec::new(), @@ -4030,7 +4024,6 @@ impl ChannelContext { monitor_pending_channel_ready: false, monitor_pending_revoke_and_ack: false, monitor_pending_commitment_signed: false, - monitor_pending_forwards: Vec::new(), monitor_pending_failures: Vec::new(), monitor_pending_finalized_fulfills: Vec::new(), monitor_pending_update_adds: Vec::new(), @@ -7618,7 +7611,6 @@ where false, Vec::new(), Vec::new(), - Vec::new(), logger, ); UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat } @@ -7878,17 +7870,29 @@ where amount_msat: msg.amount_msat, payment_hash: msg.payment_hash, cltv_expiry: msg.cltv_expiry, - state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Pending { - update_add_htlc: msg.clone(), - }), + state: InboundHTLCState::RemoteAnnounced(msg.clone()) }); Ok(()) } - /// Useful for reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. - pub(super) fn inbound_committed_unresolved_htlcs( + /// Returns committed inbound HTLCs whose onion has not yet been decoded and processed. Useful + /// for reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. + pub(super) fn inbound_htlcs_pending_decode( + &self, + ) -> impl Iterator + '_ { + self.context.pending_inbound_htlcs.iter().filter_map(|htlc| match &htlc.state { + InboundHTLCState::Committed { + update_add_htlc: InboundUpdateAdd::WithOnion { update_add_htlc }, + } => Some(update_add_htlc.clone()), + _ => None, + }) + } + + /// Returns committed inbound HTLCs that have been forwarded but not yet fully resolved. Useful + /// when reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. + pub(super) fn inbound_forwarded_htlcs( &self, - ) -> Vec<(PaymentHash, InboundUpdateAdd)> { + ) -> impl Iterator + '_ { // We don't want to return an HTLC as needing processing if it already has a resolution that's // pending in the holding cell. let htlc_resolution_in_holding_cell = |id: u64| -> bool { @@ -7902,19 +7906,45 @@ where }) }; - self.context - .pending_inbound_htlcs - .iter() - .filter_map(|htlc| match &htlc.state { - InboundHTLCState::Committed { update_add_htlc } => { - if htlc_resolution_in_holding_cell(htlc.htlc_id) { - return None; - } - Some((htlc.payment_hash, update_add_htlc.clone())) - }, - _ => None, - }) - .collect() + let prev_outbound_scid_alias = self.context.outbound_scid_alias(); + let user_channel_id = self.context.get_user_id(); + let channel_id = self.context.channel_id(); + let outpoint = self.funding_outpoint(); + let counterparty_node_id = self.context.get_counterparty_node_id(); + + self.context.pending_inbound_htlcs.iter().filter_map(move |htlc| match &htlc.state { + InboundHTLCState::Committed { + update_add_htlc: + InboundUpdateAdd::Forwarded { + incoming_packet_shared_secret, + phantom_shared_secret, + trampoline_shared_secret, + blinded_failure, + outbound_hop, + }, + } => { + if htlc_resolution_in_holding_cell(htlc.htlc_id) { + return None; + } + // The reconstructed `HTLCPreviousHopData` is used to fail or claim the HTLC backwards + // post-restart, if it is missing in the outbound edge. + let prev_hop_data = HTLCPreviousHopData { + prev_outbound_scid_alias, + user_channel_id: Some(user_channel_id), + htlc_id: htlc.htlc_id, + incoming_packet_shared_secret: *incoming_packet_shared_secret, + phantom_shared_secret: *phantom_shared_secret, + trampoline_shared_secret: *trampoline_shared_secret, + blinded_failure: *blinded_failure, + channel_id, + outpoint, + counterparty_node_id: Some(counterparty_node_id), + cltv_expiry: Some(htlc.cltv_expiry), + }; + Some((htlc.payment_hash, prev_hop_data, *outbound_hop)) + }, + _ => None, + }) } /// Useful when reconstructing the set of pending HTLC forwards when deserializing the @@ -7961,12 +7991,19 @@ where /// This inbound HTLC was irrevocably forwarded to the outbound edge, so we no longer need to /// persist its onion. pub(super) fn prune_inbound_htlc_onion( - &mut self, htlc_id: u64, hop_data: HTLCPreviousHopData, outbound_amt_msat: u64, + &mut self, htlc_id: u64, prev_hop_data: &HTLCPreviousHopData, + outbound_hop_data: OutboundHop, ) { for htlc in self.context.pending_inbound_htlcs.iter_mut() { if htlc.htlc_id == htlc_id { if let InboundHTLCState::Committed { ref mut update_add_htlc } = htlc.state { - *update_add_htlc = InboundUpdateAdd::Forwarded { hop_data, outbound_amt_msat }; + *update_add_htlc = InboundUpdateAdd::Forwarded { + incoming_packet_shared_secret: prev_hop_data.incoming_packet_shared_secret, + phantom_shared_secret: prev_hop_data.phantom_shared_secret, + trampoline_shared_secret: prev_hop_data.trampoline_shared_secret, + blinded_failure: prev_hop_data.blinded_failure, + outbound_hop: outbound_hop_data, + }; return; } } @@ -8111,15 +8148,7 @@ where &self.context.channel_id() ); - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), logger); self.context .interactive_tx_signing_session .as_mut() @@ -8223,15 +8252,7 @@ where .as_mut() .expect("Signing session must exist for negotiated pending splice") .received_commitment_signed(); - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), logger); self.context.monitor_pending_tx_signatures = true; Ok(self.push_ret_blockable_mon_update(monitor_update)) @@ -8426,11 +8447,10 @@ where } for htlc in self.context.pending_inbound_htlcs.iter_mut() { - if let &InboundHTLCState::RemoteAnnounced(ref htlc_resolution) = &htlc.state { + if let &InboundHTLCState::RemoteAnnounced(ref update_add) = &htlc.state { log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.", &htlc.payment_hash, &self.context.channel_id); - htlc.state = - InboundHTLCState::AwaitingRemoteRevokeToAnnounce(htlc_resolution.clone()); + htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add.clone()); need_commitment = true; } } @@ -8532,7 +8552,6 @@ where false, Vec::new(), Vec::new(), - Vec::new(), logger, ); return Ok(self.push_ret_blockable_mon_update(monitor_update)); @@ -8732,15 +8751,7 @@ where if update_fee.is_some() { "a fee update, " } else { "" }, update_add_count, update_fulfill_count, update_fail_count); - self.monitor_updating_paused( - false, - true, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), logger); (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail) } else { (None, Vec::new()) @@ -8871,12 +8882,9 @@ where } log_trace!(logger, "Updating HTLCs on receipt of RAA..."); - let mut to_forward_infos = Vec::new(); let mut pending_update_adds = Vec::new(); let mut revoked_htlcs = Vec::new(); let mut finalized_claimed_htlcs = Vec::new(); - let mut update_fail_htlcs = Vec::new(); - let mut update_fail_malformed_htlcs = Vec::new(); let mut static_invoices = Vec::new(); let mut require_commitment = false; let mut value_to_self_msat_diff: i64 = 0; @@ -8941,63 +8949,28 @@ where }; if swap { let mut state = - InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy }; + InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed { + sha256_of_onion: [0; 32], + failure_code: 0, + }); mem::swap(&mut state, &mut htlc.state); - if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) = state { + if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add) = state { log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash); - htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution); + htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add); require_commitment = true; - } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) = + } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add_htlc) = state { - match resolution { - InboundHTLCResolution::Resolved { pending_htlc_status } => { - match pending_htlc_status { - PendingHTLCStatus::Fail(fail_msg) => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash); - require_commitment = true; - match fail_msg { - HTLCFailureMsg::Relay(msg) => { - htlc.state = InboundHTLCState::LocalRemoved( - InboundHTLCRemovalReason::FailRelay( - msg.clone().into(), - ), - ); - update_fail_htlcs.push(msg) - }, - HTLCFailureMsg::Malformed(msg) => { - htlc.state = InboundHTLCState::LocalRemoved( - InboundHTLCRemovalReason::FailMalformed { - sha256_of_onion: msg.sha256_of_onion, - failure_code: msg.failure_code, - }, - ); - update_fail_malformed_htlcs.push(msg) - }, - } - }, - PendingHTLCStatus::Forward(forward_info) => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed, attempting to forward", &htlc.payment_hash); - to_forward_infos.push((forward_info, htlc.htlc_id)); - htlc.state = InboundHTLCState::Committed { - // HTLCs will only be in state `InboundHTLCResolution::Resolved` if they were - // received on LDK 0.1-. - update_add_htlc: InboundUpdateAdd::Legacy, - }; - }, - } - }, - InboundHTLCResolution::Pending { update_add_htlc } => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash); - pending_update_adds.push(update_add_htlc.clone()); - htlc.state = InboundHTLCState::Committed { - update_add_htlc: InboundUpdateAdd::WithOnion { - update_add_htlc, - }, - }; - }, - } + log_trace!( + logger, + " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", + &htlc.payment_hash + ); + pending_update_adds.push(update_add_htlc.clone()); + htlc.state = InboundHTLCState::Committed { + update_add_htlc: InboundUpdateAdd::WithOnion { update_add_htlc }, + }; } } } @@ -9113,7 +9086,6 @@ where false, true, false, - to_forward_infos, revoked_htlcs, finalized_claimed_htlcs, logger, @@ -9140,9 +9112,11 @@ where release_state_str ); if self.context.channel_state.can_generate_new_commitment() { - log_debug!(logger, "Responding with a commitment update with {} HTLCs failed for channel {}", - update_fail_htlcs.len() + update_fail_malformed_htlcs.len(), - &self.context.channel_id); + log_debug!( + logger, + "Responding with a commitment update for channel {}", + &self.context.channel_id + ); } else { let reason = if self.context.channel_state.is_local_stfu_sent() { "exits quiescence" @@ -9158,7 +9132,6 @@ where false, true, false, - to_forward_infos, revoked_htlcs, finalized_claimed_htlcs, logger, @@ -9172,7 +9145,6 @@ where false, false, false, - to_forward_infos, revoked_htlcs, finalized_claimed_htlcs, logger, @@ -9482,7 +9454,6 @@ where /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress fn monitor_updating_paused( &mut self, resend_raa: bool, resend_commitment: bool, resend_channel_ready: bool, - pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pending_finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, logger: &L, ) { @@ -9491,7 +9462,6 @@ where self.context.monitor_pending_revoke_and_ack |= resend_raa; self.context.monitor_pending_commitment_signed |= resend_commitment; self.context.monitor_pending_channel_ready |= resend_channel_ready; - self.context.monitor_pending_forwards.extend(pending_forwards); self.context.monitor_pending_failures.extend(pending_fails); self.context.monitor_pending_finalized_fulfills.extend(pending_finalized_claimed_htlcs); self.context.channel_state.set_monitor_update_in_progress(); @@ -9578,8 +9548,6 @@ where let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger); - let mut accepted_htlcs = Vec::new(); - mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards); let mut failed_htlcs = Vec::new(); mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures); let mut finalized_claimed_htlcs = Vec::new(); @@ -9600,9 +9568,9 @@ where self.context.monitor_pending_commitment_signed = false; return MonitorRestoreUpdates { raa: None, commitment_update: None, commitment_order: RAACommitmentOrder::RevokeAndACKFirst, - accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_update_adds, - funding_broadcastable, channel_ready, announcement_sigs, tx_signatures: None, - channel_ready_order, committed_outbound_htlc_sources + failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, + channel_ready, announcement_sigs, tx_signatures: None, channel_ready_order, + committed_outbound_htlc_sources }; } @@ -9631,7 +9599,7 @@ where if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, match commitment_order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); MonitorRestoreUpdates { - raa, commitment_update, commitment_order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, + raa, commitment_update, commitment_order, failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures, channel_ready_order, committed_outbound_htlc_sources } @@ -10707,15 +10675,7 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), logger); self.push_ret_blockable_mon_update(monitor_update) } else { None @@ -11456,15 +11416,7 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), logger); let monitor_update = self.push_ret_blockable_mon_update(monitor_update); let announcement_sigs = @@ -12909,10 +12861,10 @@ where // is acceptable. for htlc in self.context.pending_inbound_htlcs.iter_mut() { let new_state = - if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = + if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref update_add) = &htlc.state { - Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone())) + Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add.clone())) } else { None }; @@ -13133,15 +13085,7 @@ where let can_add_htlc = send_res.map_err(|(_, msg)| ChannelError::Ignore(msg))?; if can_add_htlc { let monitor_update = self.build_commitment_no_status_check(logger); - self.monitor_updating_paused( - false, - true, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), logger); Ok(self.push_ret_blockable_mon_update(monitor_update)) } else { Ok(None) @@ -13261,15 +13205,7 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - &&logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), &&logger); self.push_ret_blockable_mon_update(monitor_update) } else { None @@ -13878,7 +13814,6 @@ impl OutboundV1Channel { need_channel_ready, Vec::new(), Vec::new(), - Vec::new(), logger, ); Ok((channel, channel_monitor)) @@ -14177,7 +14112,6 @@ impl InboundV1Channel { need_channel_ready, Vec::new(), Vec::new(), - Vec::new(), logger, ); @@ -14622,6 +14556,41 @@ impl Readable for AnnouncementSigsState { } } +/// Represents the resolution status of an inbound HTLC. Previously we had multiple variants here, +/// now we only use it for backwards compatibility when (de)serializing [`InboundHTLCState`]s. +enum WriteableLegacyHTLCResolution<'a> { + Pending { update_add_htlc: &'a msgs::UpdateAddHTLC }, +} + +// We can't use `impl_writeable_tlv_based_enum` due to the lifetime. +impl<'a> Writeable for WriteableLegacyHTLCResolution<'a> { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + match self { + Self::Pending { update_add_htlc } => { + 2u8.write(writer)?; + crate::_encode_varint_length_prefixed_tlv!(writer, { + (0, update_add_htlc, required) + }); + }, + } + + Ok(()) + } +} + +/// Represents the resolution status of an inbound HTLC. Previously we had multiple variants here, +/// now we only use it for backwards compatibility when (de)serializing [`InboundHTLCState`]s. +enum ReadableLegacyHTLCResolution { + Pending { update_add_htlc: msgs::UpdateAddHTLC }, +} + +impl_writeable_tlv_based_enum!(ReadableLegacyHTLCResolution, + // 0 used to be used for the ::Resolved variant in 0.2 and below. + (2, Pending) => { + (0, update_add_htlc, required), + }, +); + impl Writeable for FundedChannel { fn write(&self, writer: &mut W) -> Result<(), io::Error> { // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been @@ -14706,13 +14675,13 @@ impl Writeable for FundedChannel { htlc.payment_hash.write(writer)?; match &htlc.state { &InboundHTLCState::RemoteAnnounced(_) => unreachable!(), - &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution) => { + &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref update_add_htlc) => { 1u8.write(writer)?; - htlc_resolution.write(writer)?; + WriteableLegacyHTLCResolution::Pending { update_add_htlc }.write(writer)?; }, - &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => { + &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref update_add_htlc) => { 2u8.write(writer)?; - htlc_resolution.write(writer)?; + WriteableLegacyHTLCResolution::Pending { update_add_htlc }.write(writer)?; }, &InboundHTLCState::Committed { ref update_add_htlc } => { 3u8.write(writer)?; @@ -14888,11 +14857,8 @@ impl Writeable for FundedChannel { self.context.monitor_pending_revoke_and_ack.write(writer)?; self.context.monitor_pending_commitment_signed.write(writer)?; - (self.context.monitor_pending_forwards.len() as u64).write(writer)?; - for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() { - pending_forward.write(writer)?; - htlc_id.write(writer)?; - } + // Previously used for monitor_pending_forwards prior to LDK 0.5. + 0u64.write(writer)?; (self.context.monitor_pending_failures.len() as u64).write(writer)?; for &(ref htlc_source, ref payment_hash, ref fail_reason) in @@ -15111,13 +15077,13 @@ impl Writeable for FundedChannel { } } -impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> - ReadableArgs<(&'a ES, &'b SP, &'c ChannelTypeFeatures)> for FundedChannel +impl<'a, 'b, 'c, 'd, ES: EntropySource, SP: SignerProvider, L: Logger> + ReadableArgs<(&'a ES, &'b SP, &'c L, &'d ChannelTypeFeatures)> for FundedChannel { fn read( - reader: &mut R, args: (&'a ES, &'b SP, &'c ChannelTypeFeatures), + reader: &mut R, args: (&'a ES, &'b SP, &'c L, &'d ChannelTypeFeatures), ) -> Result { - let (entropy_source, signer_provider, our_supported_features) = args; + let (entropy_source, signer_provider, logger, our_supported_features) = args; let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); if ver <= 2 { return Err(DecodeError::UnknownVersion); @@ -15152,8 +15118,9 @@ impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> let counterparty_next_commitment_transaction_number = Readable::read(reader)?; let value_to_self_msat = Readable::read(reader)?; - let pending_inbound_htlc_count: u64 = Readable::read(reader)?; + let logger = WithContext::from(logger, None, Some(channel_id), None); + let pending_inbound_htlc_count: u64 = Readable::read(reader)?; let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min( pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize, @@ -15166,26 +15133,22 @@ impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> payment_hash: Readable::read(reader)?, state: match ::read(reader)? { 1 => { - let resolution = if ver <= 3 { - InboundHTLCResolution::Resolved { - pending_htlc_status: Readable::read(reader)?, - } - } else { - Readable::read(reader)? - }; - InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) + let ReadableLegacyHTLCResolution::Pending { update_add_htlc } = + Readable::read(reader).map_err(|e| { + log_error!(logger, "Found deprecated HTLC received on LDK 0.1 or earlier. HTLC must be resolved before upgrading to LDK 0.5+, see CHANGELOG.md"); + e + })?; + InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add_htlc) }, 2 => { - let resolution = if ver <= 3 { - InboundHTLCResolution::Resolved { - pending_htlc_status: Readable::read(reader)?, - } - } else { - Readable::read(reader)? - }; - InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) + let ReadableLegacyHTLCResolution::Pending { update_add_htlc } = + Readable::read(reader).map_err(|e| { + log_error!(logger, "Found deprecated HTLC received on LDK 0.1 or earlier. HTLC must be resolved before upgrading to LDK 0.5+, see CHANGELOG.md"); + e + })?; + InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add_htlc) }, - 3 => InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy }, + 3 => InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, 4 => { let reason = match ::read(reader)? { 0 => InboundHTLCRemovalReason::FailRelay(msgs::OnionErrorPacket { @@ -15317,13 +15280,10 @@ impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> let monitor_pending_revoke_and_ack = Readable::read(reader)?; let monitor_pending_commitment_signed = Readable::read(reader)?; - let monitor_pending_forwards_count: u64 = Readable::read(reader)?; - let mut monitor_pending_forwards = Vec::with_capacity(cmp::min( - monitor_pending_forwards_count as usize, - DEFAULT_MAX_HTLCS as usize, - )); - for _ in 0..monitor_pending_forwards_count { - monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?)); + let monitor_pending_forwards_count_legacy: u64 = Readable::read(reader)?; + if monitor_pending_forwards_count_legacy != 0 { + log_error!(logger, "Found deprecated HTLC received on LDK 0.1 or earlier. HTLC must be resolved before upgrading to LDK 0.5+, see CHANGELOG.md"); + return Err(DecodeError::InvalidValue); } let monitor_pending_failures_count: u64 = Readable::read(reader)?; @@ -15927,7 +15887,6 @@ impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> monitor_pending_channel_ready, monitor_pending_revoke_and_ack, monitor_pending_commitment_signed, - monitor_pending_forwards, monitor_pending_failures, monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(), monitor_pending_update_adds: monitor_pending_update_adds.unwrap_or_default(), @@ -16036,6 +15995,23 @@ pub(crate) fn hold_time_since(send_timestamp: Option) -> Option { }) } +fn dummy_inbound_update_add() -> InboundUpdateAdd { + let txid_hash = Sha256d::from_bytes_ref(&[0; 32]); + InboundUpdateAdd::Forwarded { + incoming_packet_shared_secret: [0; 32], + phantom_shared_secret: None, + trampoline_shared_secret: None, + blinded_failure: None, + outbound_hop: OutboundHop { + amt_msat: 0, + channel_id: ChannelId([0; 32]), + funding_txo: OutPoint { txid: Txid::from_raw_hash(*txid_hash), index: 0 }, + node_id: PublicKey::from_slice(&[2; 33]).unwrap(), + user_channel_id: 0, + }, + } +} + #[cfg(test)] mod tests { use crate::chain::chaininterface::LowerBoundedFeeEstimator; @@ -16043,8 +16019,8 @@ mod tests { use crate::chain::BestBlock; use crate::ln::chan_utils::{self, commit_tx_fee_sat, ChannelTransactionParameters}; use crate::ln::channel::{ - AwaitingChannelReadyFlags, ChannelState, FundedChannel, HTLCCandidate, HTLCInitiator, - HTLCUpdateAwaitingACK, InboundHTLCOutput, InboundHTLCState, InboundUpdateAdd, + dummy_inbound_update_add, AwaitingChannelReadyFlags, ChannelState, FundedChannel, + HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, InboundHTLCOutput, InboundHTLCState, InboundV1Channel, OutboundHTLCOutput, OutboundHTLCState, OutboundV1Channel, }; use crate::ln::channel::{ @@ -16088,10 +16064,6 @@ mod tests { use bitcoin::{ScriptBuf, WPubkeyHash, WitnessProgram, WitnessVersion}; use std::cmp; - fn dummy_inbound_update_add() -> InboundUpdateAdd { - InboundUpdateAdd::Legacy - } - #[test] #[rustfmt::skip] fn test_channel_state_order() { @@ -16867,9 +16839,11 @@ mod tests { let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64); let features = channelmanager::provided_channel_type_features(&config); - let decoded_chan = - FundedChannel::read(&mut reader, (&&keys_provider, &&keys_provider, &features)) - .unwrap(); + let decoded_chan = FundedChannel::read( + &mut reader, + (&&keys_provider, &&keys_provider, &&logger, &features), + ) + .unwrap(); assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs); assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates); } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e840d705b8e..58b0589af15 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -59,7 +59,7 @@ use crate::ln::chan_utils::selected_commitment_sat_per_1000_weight; use crate::ln::channel::QuiescentAction; use crate::ln::channel::{ self, hold_time_since, Channel, ChannelError, ChannelUpdateStatus, DisconnectResult, - FundedChannel, FundingTxSigned, InboundUpdateAdd, InboundV1Channel, OutboundV1Channel, + FundedChannel, FundingTxSigned, InboundV1Channel, OutboundHop, OutboundV1Channel, PendingV2Channel, ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, UpdateFulfillCommitFetch, WithChannelContext, }; @@ -188,23 +188,6 @@ use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use core::time::Duration; use core::{cmp, mem}; -// We hold various information about HTLC relay in the HTLC objects in Channel itself: -// -// Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should -// forward the HTLC with information it will give back to us when it does so, or if it should Fail -// the HTLC with the relevant message for the Channel to handle giving to the remote peer. -// -// Once said HTLC is committed in the Channel, if the PendingHTLCStatus indicated Forward, the -// Channel will return the PendingHTLCInfo back to us, and we will create an HTLCForwardInfo -// with it to track where it came from (in case of onwards-forward error), waiting a random delay -// before we forward it. -// -// We will then use HTLCForwardInfo's PendingHTLCInfo to construct an outbound HTLC, with a -// relevant HTLCSource::PreviousHopData filled in to indicate where it came from (which we can use -// to either fail-backwards or fulfill the HTLC backwards along the relevant path). -// Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is -// our payment, which we can use to decode errors or inform the user that the payment was sent. - /// Information about where a received HTLC('s onion) has indicated the HTLC should go. #[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug #[cfg_attr(test, derive(Debug, PartialEq))] @@ -437,14 +420,6 @@ pub(super) enum HTLCFailureMsg { Malformed(msgs::UpdateFailMalformedHTLC), } -/// Stores whether we can't forward an HTLC or relevant forwarding info -#[cfg_attr(test, derive(Debug))] -#[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug -pub(super) enum PendingHTLCStatus { - Forward(PendingHTLCInfo), - Fail(HTLCFailureMsg), -} - #[cfg_attr(test, derive(Clone, Debug, PartialEq))] pub(super) struct PendingAddHTLCInfo { pub(super) forward_info: PendingHTLCInfo, @@ -1402,9 +1377,10 @@ enum PostMonitorUpdateChanResume { Unblocked { channel_id: ChannelId, counterparty_node_id: PublicKey, + funding_txo: OutPoint, + user_channel_id: u128, unbroadcasted_batch_funding_txid: Option, update_actions: Vec, - htlc_forwards: Option, decode_update_add_htlcs: Option<(u64, Vec)>, finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, @@ -9582,10 +9558,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// Handles actions which need to complete after a [`ChannelMonitorUpdate`] has been applied /// which can happen after the per-peer state lock has been dropped. fn post_monitor_update_unlock( - &self, channel_id: ChannelId, counterparty_node_id: PublicKey, - unbroadcasted_batch_funding_txid: Option, + &self, channel_id: ChannelId, counterparty_node_id: PublicKey, funding_txo: OutPoint, + user_channel_id: u128, unbroadcasted_batch_funding_txid: Option, update_actions: Vec, - htlc_forwards: Option, decode_update_add_htlcs: Option<(u64, Vec)>, finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, @@ -9646,9 +9621,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.handle_monitor_update_completion_actions(update_actions); - if let Some(forwards) = htlc_forwards { - self.forward_htlcs(&mut [forwards][..]); - } if let Some(decode) = decode_update_add_htlcs { self.push_decode_update_add_htlcs(decode); } @@ -9660,7 +9632,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }; self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver, None); } - self.prune_persisted_inbound_htlc_onions(committed_outbound_htlc_sources); + self.prune_persisted_inbound_htlc_onions( + channel_id, + counterparty_node_id, + funding_txo, + user_channel_id, + committed_outbound_htlc_sources, + ); } fn handle_monitor_update_completion_actions< @@ -10104,13 +10082,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ None }; - let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption( + let decode_update_add_htlcs = self.handle_channel_resumption( pending_msg_events, chan, updates.raa, updates.commitment_update, updates.commitment_order, - updates.accepted_htlcs, updates.pending_update_adds, updates.funding_broadcastable, updates.channel_ready, @@ -10129,9 +10106,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ PostMonitorUpdateChanResume::Unblocked { channel_id: chan_id, counterparty_node_id, + funding_txo: chan.funding_outpoint(), + user_channel_id: chan.context.get_user_id(), unbroadcasted_batch_funding_txid, update_actions, - htlc_forwards, decode_update_add_htlcs, finalized_claimed_htlcs: updates.finalized_claimed_htlcs, failed_htlcs: updates.failed_htlcs, @@ -10144,7 +10122,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// HTLC set on `ChannelManager` read. If an HTLC has been irrevocably forwarded to the outbound /// edge, we no longer need to persist the inbound edge's onion and can prune it here. fn prune_persisted_inbound_htlc_onions( - &self, committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, + &self, outbound_channel_id: ChannelId, outbound_node_id: PublicKey, + outbound_funding_txo: OutPoint, outbound_user_channel_id: u128, + committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, ) { let per_peer_state = self.per_peer_state.read().unwrap(); for (source, outbound_amt_msat) in committed_outbound_htlc_sources { @@ -10161,7 +10141,17 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(chan) = peer_state.channel_by_id.get_mut(&source.channel_id).and_then(|c| c.as_funded_mut()) { - chan.prune_inbound_htlc_onion(source.htlc_id, source, outbound_amt_msat); + chan.prune_inbound_htlc_onion( + source.htlc_id, + &source, + OutboundHop { + amt_msat: outbound_amt_msat, + channel_id: outbound_channel_id, + node_id: outbound_node_id, + funding_txo: outbound_funding_txo, + user_channel_id: outbound_user_channel_id, + }, + ); } } } @@ -10185,10 +10175,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state = per_peer_state.get(&cp_id).map(|state| state.lock().unwrap()).unwrap(); let chan = peer_state.channel_by_id.get(&chan_id).and_then(|c| c.as_funded()).unwrap(); - chan.inbound_committed_unresolved_htlcs() - .iter() - .filter(|(_, htlc)| matches!(htlc, InboundUpdateAdd::WithOnion { .. })) - .count() + chan.inbound_htlcs_pending_decode().count() } #[cfg(test)] @@ -10220,9 +10207,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ PostMonitorUpdateChanResume::Unblocked { channel_id, counterparty_node_id, + funding_txo, + user_channel_id, unbroadcasted_batch_funding_txid, update_actions, - htlc_forwards, decode_update_add_htlcs, finalized_claimed_htlcs, failed_htlcs, @@ -10231,9 +10219,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.post_monitor_update_unlock( channel_id, counterparty_node_id, + funding_txo, + user_channel_id, unbroadcasted_batch_funding_txid, update_actions, - htlc_forwards, decode_update_add_htlcs, finalized_claimed_htlcs, failed_htlcs, @@ -10249,17 +10238,16 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn handle_channel_resumption(&self, pending_msg_events: &mut Vec, channel: &mut FundedChannel, raa: Option, commitment_update: Option, commitment_order: RAACommitmentOrder, - pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec, - funding_broadcastable: Option, + pending_update_adds: Vec, funding_broadcastable: Option, channel_ready: Option, announcement_sigs: Option, tx_signatures: Option, tx_abort: Option, channel_ready_order: ChannelReadyOrder, - ) -> (Option<(u64, PublicKey, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec)>) { + ) -> Option<(u64, Vec)> { let logger = WithChannelContext::from(&self.logger, &channel.context, None); - log_trace!(logger, "Handling channel resumption with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort", + log_trace!(logger, "Handling channel resumption with {} RAA, {} commitment update, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort", if raa.is_some() { "an" } else { "no" }, if commitment_update.is_some() { "a" } else { "no" }, - pending_forwards.len(), pending_update_adds.len(), + pending_update_adds.len(), if funding_broadcastable.is_some() { "" } else { "not " }, if channel_ready.is_some() { "sending" } else { "without" }, if announcement_sigs.is_some() { "sending" } else { "without" }, @@ -10270,14 +10258,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let counterparty_node_id = channel.context.get_counterparty_node_id(); let outbound_scid_alias = channel.context.outbound_scid_alias(); - let mut htlc_forwards = None; - if !pending_forwards.is_empty() { - htlc_forwards = Some(( - outbound_scid_alias, channel.context.get_counterparty_node_id(), - channel.funding.get_funding_txo().unwrap(), channel.context.channel_id(), - channel.context.get_user_id(), pending_forwards - )); - } let mut decode_update_add_htlcs = None; if !pending_update_adds.is_empty() { decode_update_add_htlcs = Some((outbound_scid_alias, pending_update_adds)); @@ -10364,7 +10344,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, None => { debug_assert!(false, "Channel resumed without a funding txo, this should never happen!"); - return (htlc_forwards, decode_update_add_htlcs); + return decode_update_add_htlcs; } }; } else { @@ -10382,7 +10362,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ emit_initial_channel_ready_event!(pending_events, channel); } - (htlc_forwards, decode_update_add_htlcs) + decode_update_add_htlcs } #[rustfmt::skip] @@ -12431,12 +12411,11 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take(); - let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption( + let decode_update_add_htlcs = self.handle_channel_resumption( &mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.commitment_order, - Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs, + Vec::new(), None, responses.channel_ready, responses.announcement_sigs, responses.tx_signatures, responses.tx_abort, responses.channel_ready_order, ); - debug_assert!(htlc_forwards.is_none()); debug_assert!(decode_update_add_htlcs.is_none()); if let Some(upd) = channel_update { peer_state.pending_msg_events.push(upd); @@ -16581,19 +16560,20 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures { features } -const SERIALIZATION_VERSION: u8 = 1; +// Bumped in 0.5 because we stop reading/writing legacy ChannelManager pending HTLC maps and +// instead reconstruct them from `Channel{Monitor}` data as part of removing the requirement to +// regularly persist the `ChannelManager`. +const SERIALIZATION_VERSION: u8 = 2; const MIN_SERIALIZATION_VERSION: u8 = 1; -// We plan to start writing this version in 0.5. -// -// LDK 0.5+ will reconstruct the set of pending HTLCs from `Channel{Monitor}` data that started +// LDK 0.5+ reconstructs the set of pending HTLCs from `Channel{Monitor}` data that started // being written in 0.3, ignoring legacy `ChannelManager` HTLC maps on read and not writing them. -// LDK 0.5+ will automatically fail to read if the pending HTLC set cannot be reconstructed, i.e. +// LDK 0.5+ automatically fails to read if the pending HTLC set cannot be reconstructed, i.e. // if we were last written with pending HTLCs on 0.2- or if the new 0.3+ fields are missing. // // If 0.3 or 0.4 reads this manager version, it knows that the legacy maps were not written and // acts accordingly. -const RECONSTRUCT_HTLCS_FROM_CHANS_VERSION: u8 = 5; +const RECONSTRUCT_HTLCS_FROM_CHANS_VERSION: u8 = 2; impl_writeable_tlv_based!(PhantomRouteHints, { (2, channels, required_vec), @@ -16730,11 +16710,6 @@ impl Readable for HTLCFailureMsg { } } -impl_writeable_tlv_based_enum_legacy!(PendingHTLCStatus, ; - (0, Forward), - (1, Fail), -); - impl_writeable_tlv_based_enum!(BlindedFailure, (0, FromIntroductionNode) => {}, (2, FromBlindedNode) => {}, @@ -17061,24 +17036,6 @@ impl< } } - { - let forward_htlcs = self.forward_htlcs.lock().unwrap(); - (forward_htlcs.len() as u64).write(writer)?; - for (short_channel_id, pending_forwards) in forward_htlcs.iter() { - short_channel_id.write(writer)?; - (pending_forwards.len() as u64).write(writer)?; - for forward in pending_forwards { - forward.write(writer)?; - } - } - } - - let mut decode_update_add_htlcs_opt = None; - let decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap(); - if !decode_update_add_htlcs.is_empty() { - decode_update_add_htlcs_opt = Some(decode_update_add_htlcs); - } - let claimable_payments = self.claimable_payments.lock().unwrap(); let pending_outbound_payments = self.pending_outbound_payments.pending_outbound_payments.lock().unwrap(); @@ -17125,8 +17082,6 @@ impl< } } - let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); - // Since some FundingNegotiation variants are not persisted, any splice in such state must // be failed upon reload. However, as the necessary information for the SpliceFailed event // is not persisted, the event itself needs to be persisted even though it hasn't been @@ -17222,11 +17177,6 @@ impl< } } - let mut pending_intercepted_htlcs = None; - if our_pending_intercepts.len() != 0 { - pending_intercepted_htlcs = Some(our_pending_intercepts); - } - let mut pending_claiming_payments = Some(&claimable_payments.pending_claiming_payments); if pending_claiming_payments.as_ref().unwrap().is_empty() { // LDK versions prior to 0.0.113 do not know how to read the pending claimed payments @@ -17249,7 +17199,7 @@ impl< write_tlv_fields!(writer, { (1, pending_outbound_payments_no_retry, required), - (2, pending_intercepted_htlcs, option), + // 2 was previously used for the pending_intercepted_htlcs map. (3, pending_outbound_payments, required), (4, pending_claiming_payments, option), (5, self.our_network_pubkey, required), @@ -17260,7 +17210,7 @@ impl< (10, legacy_in_flight_monitor_updates, option), (11, self.probing_cookie_secret, required), (13, htlc_onion_fields, optional_vec), - (14, decode_update_add_htlcs_opt, option), + // 14 was previously used for the decode_update_add_htlcs map. (15, self.inbound_payment_id_secret, required), (17, in_flight_monitor_updates, option), (19, peer_storage_dir, optional_vec), @@ -17340,12 +17290,6 @@ pub(super) struct ChannelManagerData { in_flight_monitor_updates: HashMap<(PublicKey, ChannelId), Vec>, peer_storage_dir: Vec<(PublicKey, Vec)>, async_receive_offer_cache: AsyncReceiveOfferCache, - // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of - // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from - // `Channel{Monitor}` data. - forward_htlcs_legacy: HashMap>, - pending_intercepted_htlcs_legacy: HashMap, - decode_update_add_htlcs_legacy: HashMap>, // The `ChannelManager` version that was written. version: u8, } @@ -17387,32 +17331,31 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> ( args.entropy_source, args.signer_provider, + args.logger, &provided_channel_type_features(&args.config), ), )?; channels.push(channel); } - let forward_htlcs_legacy: HashMap> = - if version < RECONSTRUCT_HTLCS_FROM_CHANS_VERSION { - let forward_htlcs_count: u64 = Readable::read(reader)?; - let mut fwds = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); - for _ in 0..forward_htlcs_count { - let short_channel_id = Readable::read(reader)?; - let pending_forwards_count: u64 = Readable::read(reader)?; - let mut pending_forwards = Vec::with_capacity(cmp::min( - pending_forwards_count as usize, - MAX_ALLOC_SIZE / mem::size_of::(), - )); - for _ in 0..pending_forwards_count { - pending_forwards.push(Readable::read(reader)?); - } - fwds.insert(short_channel_id, pending_forwards); + if version < RECONSTRUCT_HTLCS_FROM_CHANS_VERSION { + let forward_htlcs_count: u64 = Readable::read(reader)?; + // This map is written if version = 1 (LDK versions 0.4-) only. + let mut _forward_htlcs_legacy: HashMap> = + hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); + for _ in 0..forward_htlcs_count { + let short_channel_id = Readable::read(reader)?; + let pending_forwards_count: u64 = Readable::read(reader)?; + let mut pending_forwards = Vec::with_capacity(cmp::min( + pending_forwards_count as usize, + MAX_ALLOC_SIZE / mem::size_of::(), + )); + for _ in 0..pending_forwards_count { + pending_forwards.push(Readable::read(reader)?); } - fwds - } else { - new_hash_map() - }; + _forward_htlcs_legacy.insert(short_channel_id, pending_forwards); + } + } let claimable_htlcs_count: u64 = Readable::read(reader)?; let mut claimable_htlcs_list = @@ -17498,9 +17441,13 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> }; } - let mut pending_intercepted_htlcs_legacy: Option> = - None; - let mut decode_update_add_htlcs_legacy: Option>> = + // Read for compatibility with 0.4- but no longer used in 0.5+, instead these maps are + // reconstructed during runtime from decode_update_add_htlcs, via the next call to + // process_pending_htlc_forwards. + let mut _pending_intercepted_htlcs_legacy: Option< + HashMap, + > = None; + let mut _decode_update_add_htlcs_legacy: Option>> = None; // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. let mut pending_outbound_payments_no_retry: Option>> = @@ -17528,7 +17475,7 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> let mut async_receive_offer_cache: AsyncReceiveOfferCache = AsyncReceiveOfferCache::new(); read_tlv_fields!(reader, { (1, pending_outbound_payments_no_retry, option), - (2, pending_intercepted_htlcs_legacy, option), + (2, _pending_intercepted_htlcs_legacy, option), (3, pending_outbound_payments, option), (4, pending_claiming_payments, option), (5, received_network_pubkey, option), @@ -17539,7 +17486,7 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> (10, legacy_in_flight_monitor_updates, option), (11, probing_cookie_secret, option), (13, claimable_htlc_onion_fields, optional_vec), - (14, decode_update_add_htlcs_legacy, option), + (14, _decode_update_add_htlcs_legacy, option), (15, inbound_payment_id_secret, option), (17, in_flight_monitor_updates, option), (19, peer_storage_dir, optional_vec), @@ -17672,13 +17619,10 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> best_block_height, best_block_hash, channels, - forward_htlcs_legacy, claimable_payments, peer_init_features, pending_events_read, highest_seen_timestamp, - pending_intercepted_htlcs_legacy: pending_intercepted_htlcs_legacy - .unwrap_or_else(new_hash_map), pending_outbound_payments, pending_claiming_payments: pending_claiming_payments.unwrap_or_else(new_hash_map), received_network_pubkey, @@ -17686,8 +17630,6 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> .unwrap_or_else(Vec::new), fake_scid_rand_bytes, probing_cookie_secret, - decode_update_add_htlcs_legacy: decode_update_add_htlcs_legacy - .unwrap_or_else(new_hash_map), inbound_payment_id_secret, in_flight_monitor_updates: in_flight_monitor_updates.unwrap_or_default(), peer_storage_dir: peer_storage_dir.unwrap_or_default(), @@ -17804,15 +17746,6 @@ pub struct ChannelManagerReadArgs< /// /// This is not exported to bindings users because we have no HashMap bindings pub channel_monitors: HashMap>, - - /// Whether the `ChannelManager` should attempt to reconstruct its set of pending HTLCs from - /// `Channel{Monitor}` data rather than its own persisted maps, which is planned to become - /// the default behavior in upcoming versions. - /// - /// If `None`, whether we reconstruct or use the legacy maps will be decided randomly during - /// `ChannelManager::from_channel_manager_data`. - #[cfg(test)] - pub reconstruct_manager_from_monitors: Option, } impl< @@ -17850,8 +17783,6 @@ impl< channel_monitors: hash_map_from_iter( channel_monitors.drain(..).map(|monitor| (monitor.channel_id(), monitor)), ), - #[cfg(test)] - reconstruct_manager_from_monitors: None, } } } @@ -17978,19 +17909,16 @@ impl< best_block_height, best_block_hash, channels, - mut forward_htlcs_legacy, claimable_payments, peer_init_features, mut pending_events_read, highest_seen_timestamp, - mut pending_intercepted_htlcs_legacy, pending_outbound_payments, pending_claiming_payments, received_network_pubkey, monitor_update_blocked_actions_per_peer, mut fake_scid_rand_bytes, mut probing_cookie_secret, - mut decode_update_add_htlcs_legacy, mut inbound_payment_id_secret, mut in_flight_monitor_updates, peer_storage_dir, @@ -18541,40 +18469,6 @@ impl< pending_background_events.push(new_event); } - // In LDK 0.2 and below, the `ChannelManager` would track all payments and HTLCs internally and - // persist that state, relying on it being up-to-date on restart. Newer versions are moving - // towards reducing this reliance on regular persistence of the `ChannelManager`, and instead - // reconstruct HTLC/payment state based on `Channel{Monitor}` data if - // `reconstruct_manager_from_monitors` is set below. Currently we set in tests randomly to - // ensure the legacy codepaths also have test coverage. - #[cfg(not(test))] - let reconstruct_manager_from_monitors = _version >= RECONSTRUCT_HTLCS_FROM_CHANS_VERSION; - #[cfg(test)] - let reconstruct_manager_from_monitors = - args.reconstruct_manager_from_monitors.unwrap_or_else(|| { - use core::hash::{BuildHasher, Hasher}; - - match std::env::var("LDK_TEST_REBUILD_MGR_FROM_MONITORS") { - Ok(val) => match val.as_str() { - "1" => true, - "0" => false, - _ => panic!( - "LDK_TEST_REBUILD_MGR_FROM_MONITORS must be 0 or 1, got: {}", - val - ), - }, - Err(_) => { - let rand_val = - std::collections::hash_map::RandomState::new().build_hasher().finish(); - if rand_val % 2 == 0 { - true - } else { - false - } - }, - } - }); - // If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we // should ensure we try them again on the inbound edge. We put them here and do so after we // have a fully-constructed `ChannelManager` at the end. @@ -18587,11 +18481,11 @@ impl< // that it is handled. let mut already_forwarded_htlcs: HashMap< (ChannelId, PaymentHash), - Vec<(HTLCPreviousHopData, u64)>, + Vec<(HTLCPreviousHopData, OutboundHop)>, > = new_hash_map(); let prune_forwarded_htlc = |already_forwarded_htlcs: &mut HashMap< (ChannelId, PaymentHash), - Vec<(HTLCPreviousHopData, u64)>, + Vec<(HTLCPreviousHopData, OutboundHop)>, >, prev_hop: &HTLCPreviousHopData, payment_hash: &PaymentHash| { @@ -18623,37 +18517,25 @@ impl< let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); - if reconstruct_manager_from_monitors { - if let Some(chan) = peer_state.channel_by_id.get(channel_id) { - if let Some(funded_chan) = chan.as_funded() { - let scid_alias = funded_chan.context.outbound_scid_alias(); - let inbound_committed_update_adds = - funded_chan.inbound_committed_unresolved_htlcs(); - for (payment_hash, htlc) in inbound_committed_update_adds { - match htlc { - InboundUpdateAdd::WithOnion { update_add_htlc } => { - // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized - // `Channel` as part of removing the requirement to regularly persist the - // `ChannelManager`. - decode_update_add_htlcs - .entry(scid_alias) - .or_insert_with(Vec::new) - .push(update_add_htlc); - }, - InboundUpdateAdd::Forwarded { - hop_data, - outbound_amt_msat, - } => { - already_forwarded_htlcs - .entry((hop_data.channel_id, payment_hash)) - .or_insert_with(Vec::new) - .push((hop_data, outbound_amt_msat)); - }, - InboundUpdateAdd::Legacy => { - return Err(DecodeError::InvalidValue) - }, - } - } + if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + if let Some(funded_chan) = chan.as_funded() { + // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized + // `Channel` as part of removing the requirement to regularly persist the + // `ChannelManager`. + let scid_alias = funded_chan.context.outbound_scid_alias(); + for update_add_htlc in funded_chan.inbound_htlcs_pending_decode() { + decode_update_add_htlcs + .entry(scid_alias) + .or_insert_with(Vec::new) + .push(update_add_htlc); + } + for (payment_hash, prev_hop, next_hop) in + funded_chan.inbound_forwarded_htlcs() + { + already_forwarded_htlcs + .entry((prev_hop.channel_id, payment_hash)) + .or_insert_with(Vec::new) + .push((prev_hop, next_hop)); } } } @@ -18690,29 +18572,28 @@ impl< } } for (channel_id, monitor) in args.channel_monitors.iter() { - let mut is_channel_closed = true; + let (mut is_channel_closed, mut user_channel_id_opt) = (true, None); let counterparty_node_id = monitor.get_counterparty_node_id(); if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; - is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); - if reconstruct_manager_from_monitors && !is_channel_closed { - if let Some(chan) = peer_state.channel_by_id.get(channel_id) { - if let Some(funded_chan) = chan.as_funded() { - for (payment_hash, prev_hop) in funded_chan.outbound_htlc_forwards() - { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - &prev_hop, - "HTLC already forwarded to the outbound edge", - &args.logger, - ); - prune_forwarded_htlc( - &mut already_forwarded_htlcs, - &prev_hop, - &payment_hash, - ); - } + if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + is_channel_closed = false; + user_channel_id_opt = Some(chan.context().get_user_id()); + + if let Some(funded_chan) = chan.as_funded() { + for (payment_hash, prev_hop) in funded_chan.outbound_htlc_forwards() { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop, + "HTLC already forwarded to the outbound edge", + &args.logger, + ); + prune_forwarded_htlc( + &mut already_forwarded_htlcs, + &prev_hop, + &payment_hash, + ); } } } @@ -18730,65 +18611,20 @@ impl< let htlc_id = SentHTLCId::from_source(&htlc_source); match htlc_source { HTLCSource::PreviousHopData(prev_hop_data) => { - let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { - info.prev_funding_outpoint == prev_hop_data.outpoint - && info.prev_htlc_id == prev_hop_data.htlc_id - }; - - // If `reconstruct_manager_from_monitors` is set, we always add all inbound committed - // HTLCs to `decode_update_add_htlcs` in the above loop, but we need to prune from - // those added HTLCs if they were already forwarded to the outbound edge. Otherwise, - // we'll double-forward. - if reconstruct_manager_from_monitors { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - &prev_hop_data, - "HTLC already forwarded to the outbound edge", - &&logger, - ); - prune_forwarded_htlc( - &mut already_forwarded_htlcs, - &prev_hop_data, - &htlc.payment_hash, - ); - } - - // The ChannelMonitor is now responsible for this HTLC's - // failure/success and will let us know what its outcome is. If we - // still have an entry for this HTLC in `forward_htlcs_legacy`, - // `pending_intercepted_htlcs_legacy`, or - // `decode_update_add_htlcs_legacy`, we were apparently not persisted - // after the monitor was when forwarding the payment. + // We always add all inbound committed HTLCs to `decode_update_add_htlcs` in the + // above loop, but we need to prune from those added HTLCs if they were already + // forwarded to the outbound edge. Otherwise, we'll double-forward. dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs_legacy, + &mut decode_update_add_htlcs, &prev_hop_data, - "HTLC was forwarded to the closed channel", + "HTLC already forwarded to the outbound edge", &&logger, ); - forward_htlcs_legacy.retain(|_, forwards| { - forwards.retain(|forward| { - if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - false - } else { true } - } else { true } - }); - !forwards.is_empty() - }); - pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - pending_events_read.retain(|(event, _)| { - if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { - intercepted_id != ev_id - } else { true } - }); - false - } else { true } - }); + prune_forwarded_htlc( + &mut already_forwarded_htlcs, + &prev_hop_data, + &htlc.payment_hash, + ); }, HTLCSource::OutboundRoute { payment_id, @@ -18997,7 +18833,7 @@ impl< Some((htlc_source, payment_preimage, htlc.amount_msat, is_channel_closed, monitor.get_counterparty_node_id(), - monitor.get_funding_txo(), monitor.channel_id())) + monitor.get_funding_txo(), monitor.channel_id(), user_channel_id_opt)) } else { None } } else { // If it was an outbound payment, we've handled it above - if a preimage @@ -19195,55 +19031,41 @@ impl< } } - if reconstruct_manager_from_monitors { - // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`. - // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs. - for (src, payment_hash, _, _, _, _) in failed_htlcs.iter() { - if let HTLCSource::PreviousHopData(prev_hop_data) = src { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - prev_hop_data, - "HTLC was failed backwards during manager read", - &args.logger, - ); - prune_forwarded_htlc(&mut already_forwarded_htlcs, prev_hop_data, payment_hash); - } + // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`. + // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs. + for (src, payment_hash, _, _, _, _) in failed_htlcs.iter() { + if let HTLCSource::PreviousHopData(prev_hop_data) = src { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was failed backwards during manager read", + &args.logger, + ); + prune_forwarded_htlc(&mut already_forwarded_htlcs, prev_hop_data, payment_hash); } + } - // See above comment on `failed_htlcs`. - for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { - for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - prev_hop_data, - "HTLC was already decoded and marked as a claimable payment", - &args.logger, - ); - } + // See above comment on `failed_htlcs`. + for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { + for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was already decoded and marked as a claimable payment", + &args.logger, + ); } } - let (decode_update_add_htlcs, forward_htlcs, pending_intercepted_htlcs) = - if reconstruct_manager_from_monitors { - (decode_update_add_htlcs, new_hash_map(), new_hash_map()) - } else { - ( - decode_update_add_htlcs_legacy, - forward_htlcs_legacy, - pending_intercepted_htlcs_legacy, - ) - }; - - // If we have a pending intercept HTLC present but no corresponding event, add that now rather - // than relying on the user having persisted the event prior to shutdown. - for (id, fwd) in pending_intercepted_htlcs.iter() { - if !pending_events_read.iter().any( - |(ev, _)| matches!(ev, Event::HTLCIntercepted { intercept_id, .. } if intercept_id == id), - ) { - match create_htlc_intercepted_event(*id, fwd) { - Ok(ev) => pending_events_read.push_back((ev, None)), - Err(()) => debug_assert!(false), - } + // See above comment on `failed_htlcs`. + for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { + for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was already decoded and marked as a claimable payment", + &args.logger, + ); } } @@ -19299,9 +19121,9 @@ impl< inbound_payment_key: expanded_inbound_key, pending_outbound_payments: pending_outbounds, - pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs), + pending_intercepted_htlcs: Mutex::new(new_hash_map()), - forward_htlcs: Mutex::new(forward_htlcs), + forward_htlcs: Mutex::new(new_hash_map()), decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, @@ -19361,33 +19183,33 @@ impl< if let Some(forwarded_htlcs) = already_forwarded_htlcs.remove(&(*channel_id, payment_hash)) { - for (hop_data, outbound_amt_msat) in forwarded_htlcs { + for (prev_hop, next_hop) in forwarded_htlcs { let new_pending_claim = - !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _)| { - matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == hop_data.htlc_id && hop.channel_id == hop_data.channel_id) + !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _, _)| { + matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == prev_hop.htlc_id && hop.channel_id == prev_hop.channel_id) }); if new_pending_claim { - let counterparty_node_id = monitor.get_counterparty_node_id(); - let is_channel_closed = channel_manager + let is_downstream_closed = channel_manager .per_peer_state .read() .unwrap() - .get(&counterparty_node_id) + .get(&next_hop.node_id) .map_or(true, |peer_state_mtx| { !peer_state_mtx .lock() .unwrap() .channel_by_id - .contains_key(channel_id) + .contains_key(&next_hop.channel_id) }); pending_claims_to_replay.push(( - HTLCSource::PreviousHopData(hop_data), + HTLCSource::PreviousHopData(prev_hop), payment_preimage, - outbound_amt_msat, - is_channel_closed, - counterparty_node_id, - monitor.get_funding_txo(), - *channel_id, + next_hop.amt_msat, + is_downstream_closed, + next_hop.node_id, + next_hop.funding_txo, + next_hop.channel_id, + Some(next_hop.user_channel_id), )); } } @@ -19657,6 +19479,7 @@ impl< downstream_node_id, downstream_funding, downstream_channel_id, + downstream_user_channel_id, ) in pending_claims_to_replay { // We use `downstream_closed` in place of `from_onchain` here just as a guess - we @@ -19672,7 +19495,7 @@ impl< downstream_node_id, downstream_funding, downstream_channel_id, - None, + downstream_user_channel_id, None, None, ); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index a5461154a02..8fbcfcd12e6 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -911,8 +911,6 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { tx_broadcaster: &broadcaster, logger: &self.logger, channel_monitors, - #[cfg(test)] - reconstruct_manager_from_monitors: None, }, ) .unwrap(); @@ -1311,7 +1309,7 @@ fn check_claimed_htlcs_match_route<'a, 'b, 'c>( pub fn _reload_node<'a, 'b, 'c>( node: &'a Node<'a, 'b, 'c>, config: UserConfig, chanman_encoded: &[u8], - monitors_encoded: &[&[u8]], _reconstruct_manager_from_monitors: Option, + monitors_encoded: &[&[u8]], ) -> TestChannelManager<'b, 'c> { let mut monitors_read = Vec::with_capacity(monitors_encoded.len()); for encoded in monitors_encoded { @@ -1345,8 +1343,6 @@ pub fn _reload_node<'a, 'b, 'c>( tx_broadcaster: node.tx_broadcaster, logger: node.logger, channel_monitors, - #[cfg(test)] - reconstruct_manager_from_monitors: _reconstruct_manager_from_monitors, }, ) .unwrap() @@ -1368,7 +1364,7 @@ pub fn _reload_node<'a, 'b, 'c>( #[macro_export] macro_rules! _reload_node_inner { ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: - ident, $new_chain_monitor: ident, $new_channelmanager: ident, $reconstruct_pending_htlcs: expr + ident, $new_chain_monitor: ident, $new_channelmanager: ident ) => { let chanman_encoded = $chanman_encoded; @@ -1383,12 +1379,11 @@ macro_rules! _reload_node_inner { ); $node.chain_monitor = &$new_chain_monitor; - $new_channelmanager = _reload_node( + $new_channelmanager = $crate::ln::functional_test_utils::_reload_node( &$node, $new_config, &chanman_encoded, $monitors_encoded, - $reconstruct_pending_htlcs, ); $node.node = &$new_channelmanager; $node.onion_messenger.set_offers_handler(&$new_channelmanager); @@ -1401,45 +1396,26 @@ macro_rules! reload_node { // Reload the node using the node's current config ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { let config = $node.node.get_current_config(); - _reload_node_inner!( + $crate::_reload_node_inner!( $node, config, $chanman_encoded, $monitors_encoded, $persister, $new_chain_monitor, - $new_channelmanager, - None + $new_channelmanager ); }; // Reload the node with the new provided config ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { - _reload_node_inner!( + $crate::_reload_node_inner!( $node, $new_config, $chanman_encoded, $monitors_encoded, $persister, $new_chain_monitor, - $new_channelmanager, - None - ); - }; - // Reload the node and have the `ChannelManager` use new codepaths that reconstruct its set of - // pending HTLCs from `Channel{Monitor}` data. - ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: - ident, $new_chain_monitor: ident, $new_channelmanager: ident, $reconstruct_pending_htlcs: expr - ) => { - let config = $node.node.get_current_config(); - _reload_node_inner!( - $node, - config, - $chanman_encoded, - $monitors_encoded, - $persister, - $new_chain_monitor, - $new_channelmanager, - $reconstruct_pending_htlcs + $new_channelmanager ); }; } @@ -2672,20 +2648,23 @@ pub fn commitment_signed_dance_through_cp_raa( node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, fail_backwards: bool, includes_claim: bool, ) -> Option { - let (extra_msg_option, bs_revoke_and_ack) = + let (extra_msg_option, bs_revoke_and_ack, node_b_holding_cell_htlcs) = do_main_commitment_signed_dance(node_a, node_b, fail_backwards); + assert!(node_b_holding_cell_htlcs.is_empty()); node_a.node.handle_revoke_and_ack(node_b.node.get_our_node_id(), &bs_revoke_and_ack); check_added_monitors(node_a, if includes_claim { 0 } else { 1 }); extra_msg_option } /// Does the main logic in the commitment_signed dance. After the first `commitment_signed` has -/// been delivered, this method picks up and delivers the response `revoke_and_ack` and -/// `commitment_signed`, returning the recipient's `revoke_and_ack` and any extra message it may -/// have included. +/// been delivered, delivers the response `revoke_and_ack` and `commitment_signed`, and returns: +/// - The recipient's `revoke_and_ack` +/// - The recipient's extra message (if any) after handling the commitment_signed +/// - Any messages released from the initiator's holding cell after handling the `revoke_and_ack` +/// (e.g., a second HTLC on the same channel) pub fn do_main_commitment_signed_dance( node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, fail_backwards: bool, -) -> (Option, msgs::RevokeAndACK) { +) -> (Option, msgs::RevokeAndACK, Vec) { let node_a_id = node_a.node.get_our_node_id(); let node_b_id = node_b.node.get_our_node_id(); @@ -2693,7 +2672,9 @@ pub fn do_main_commitment_signed_dance( check_added_monitors(&node_b, 0); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); - assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); + // Handling the RAA may release HTLCs from node_b's holding cell (e.g., if multiple HTLCs + // were sent over the same channel and the second was queued behind the first). + let node_b_holding_cell_htlcs = node_b.node.get_and_clear_pending_msg_events(); check_added_monitors(&node_b, 1); node_b.node.handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed); let (bs_revoke_and_ack, extra_msg_option) = { @@ -2716,7 +2697,7 @@ pub fn do_main_commitment_signed_dance( assert!(node_a.node.get_and_clear_pending_events().is_empty()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); } - (extra_msg_option, bs_revoke_and_ack) + (extra_msg_option, bs_revoke_and_ack, node_b_holding_cell_htlcs) } /// Runs the commitment_signed dance by delivering the commitment_signed and handling the @@ -2733,9 +2714,10 @@ pub fn commitment_signed_dance_return_raa( .node .handle_commitment_signed_batch_test(node_b.node.get_our_node_id(), commitment_signed); check_added_monitors(&node_a, 1); - let (extra_msg_option, bs_revoke_and_ack) = + let (extra_msg_option, bs_revoke_and_ack, node_b_holding_cell_htlcs) = do_main_commitment_signed_dance(&node_a, &node_b, fail_backwards); assert!(extra_msg_option.is_none()); + assert!(node_b_holding_cell_htlcs.is_empty()); bs_revoke_and_ack } @@ -2971,7 +2953,7 @@ pub fn check_payment_claimable( #[cfg(any(test, ldk_bench, feature = "_test_utils"))] macro_rules! expect_payment_claimable { ($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr) => { - expect_payment_claimable!( + $crate::expect_payment_claimable!( $node, $expected_payment_hash, $expected_payment_secret, diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index c7e7175602d..572f5120366 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -439,7 +439,6 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { tx_broadcaster: nodes[0].tx_broadcaster, logger: &logger, channel_monitors: node_0_stale_monitors.iter().map(|monitor| { (monitor.channel_id(), monitor) }).collect(), - reconstruct_manager_from_monitors: None, }) { } else { panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return"); }; @@ -458,7 +457,6 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { tx_broadcaster: nodes[0].tx_broadcaster, logger: &logger, channel_monitors: node_0_monitors.iter().map(|monitor| { (monitor.channel_id(), monitor) }).collect(), - reconstruct_manager_from_monitors: None, }).unwrap(); nodes_0_deserialized = nodes_0_deserialized_tmp; assert!(nodes_0_read.is_empty()); @@ -1953,19 +1951,12 @@ fn test_reload_node_with_preimage_in_monitor_claims_htlc() { &[&mon_0_1_serialized, &mon_1_2_serialized], persister, new_chain_monitor, - nodes_1_deserialized, - Some(true) + nodes_1_deserialized ); // When the claim is reconstructed during reload, a PaymentForwarded event is generated. - // This event has next_user_channel_id as None since the outbound HTLC was already removed. // Fetching events triggers the pending monitor update (adding preimage) to be applied. - let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match &events[0] { - Event::PaymentForwarded { total_fee_earned_msat: Some(1000), .. } => {}, - _ => panic!("Expected PaymentForwarded event"), - } + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); check_added_monitors(&nodes[1], 1); // Reconnect nodes[1] to nodes[0]. The claim should be in nodes[1]'s holding cell. @@ -2062,8 +2053,7 @@ fn test_reload_node_without_preimage_fails_htlc() { &[&mon_0_1_serialized, &mon_1_2_serialized], persister, new_chain_monitor, - nodes_1_deserialized, - Some(true) + nodes_1_deserialized ); // After reload, nodes[1] should have generated an HTLCHandlingFailed event. @@ -2088,3 +2078,147 @@ fn test_reload_node_without_preimage_fails_htlc() { // nodes[0] should now have received the failure and generate PaymentFailed. expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new()); } + +#[test] +fn test_reload_with_mpp_claims_on_same_channel() { + // Test that if a forwarding node has two HTLCs for the same MPP payment that were both + // irrevocably removed on the outbound edge via claim but are still forwarded-and-unresolved + // on the inbound edge, both HTLCs will be claimed backwards on restart. + // + // Topology: + // nodes[0] ----chan_0_1----> nodes[1] ----chan_1_2_a----> nodes[2] + // \----chan_1_2_b---/ + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let node_2_id = nodes[2].node.get_our_node_id(); + + let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 2_000_000, 0); + let chan_1_2_a = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0); + let chan_1_2_b = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0); + + let chan_id_0_1 = chan_0_1.2; + let chan_id_1_2_a = chan_1_2_a.2; + let chan_id_1_2_b = chan_1_2_b.2; + + // Send an MPP payment large enough that the router must split it across both outbound channels. + // Each 1M sat outbound channel has 100M msat max in-flight, so 150M msat requires splitting. + let amt_msat = 150_000_000; + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + + let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); + nodes[0].node.send_payment_with_route( + route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id, + ).unwrap(); + check_added_monitors(&nodes[0], 1); + + // Forward the first HTLC nodes[0] -> nodes[1] -> nodes[2]. Note that the second HTLC is released + // from the holding cell during the first HTLC's commitment_signed_dance. + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event_1 = SendEvent::from_event(events.remove(0)); + + nodes[1].node.handle_update_add_htlc(node_0_id, &payment_event_1.msgs[0]); + check_added_monitors(&nodes[1], 0); + nodes[1].node.handle_commitment_signed_batch_test(node_0_id, &payment_event_1.commitment_msg); + check_added_monitors(&nodes[1], 1); + let (_, raa, holding_cell_htlcs) = + do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); + assert_eq!(holding_cell_htlcs.len(), 1); + let payment_event_2 = holding_cell_htlcs.into_iter().next().unwrap(); + nodes[1].node.handle_revoke_and_ack(node_0_id, &raa); + check_added_monitors(&nodes[1], 1); + + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors(&nodes[1], 1); + let mut events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev_1_2 = events.remove(0); + pass_along_path( + &nodes[1], &[&nodes[2]], amt_msat, payment_hash, Some(payment_secret), ev_1_2, false, None, + ); + + // Second HTLC: full path nodes[0] -> nodes[1] -> nodes[2]. PaymentClaimable expected at end. + pass_along_path( + &nodes[0], &[&nodes[1], &nodes[2]], amt_msat, payment_hash, Some(payment_secret), + payment_event_2, true, None, + ); + + // Claim the HTLCs such that they're fully removed from the outbound edge, but disconnect + // node_0<>node_1 so that they can't be claimed backwards by node_1. + nodes[2].node.claim_funds(payment_preimage); + check_added_monitors(&nodes[2], 2); + expect_payment_claimed!(nodes[2], payment_hash, amt_msat); + + nodes[0].node.peer_disconnected(node_1_id); + nodes[1].node.peer_disconnected(node_0_id); + + let mut events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + for ev in events { + match ev { + MessageSendEvent::UpdateHTLCs { ref node_id, ref updates, .. } => { + assert_eq!(*node_id, node_1_id); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(node_2_id, updates.update_fulfill_htlcs[0].clone()); + check_added_monitors(&nodes[1], 1); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); + }, + _ => panic!("Unexpected event"), + } + } + + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + for event in events { + expect_payment_forwarded( + event, &nodes[1], &nodes[0], &nodes[2], Some(1000), None, false, false, false, + ); + } + + // Clear the holding cell's claim entries on chan_0_1 before serialization. + // This simulates a crash where both HTLCs were fully removed on the outbound edges but are + // still present on the inbound edge without a resolution. + nodes[1].node.test_clear_channel_holding_cell(node_0_id, chan_id_0_1); + + let node_1_serialized = nodes[1].node.encode(); + let mon_0_1_serialized = get_monitor!(nodes[1], chan_id_0_1).encode(); + let mon_1_2_a_serialized = get_monitor!(nodes[1], chan_id_1_2_a).encode(); + let mon_1_2_b_serialized = get_monitor!(nodes[1], chan_id_1_2_b).encode(); + + reload_node!( + nodes[1], + node_1_serialized, + &[&mon_0_1_serialized, &mon_1_2_a_serialized, &mon_1_2_b_serialized], + persister, + new_chain_monitor, + nodes_1_deserialized + ); + + // When the claims are reconstructed during reload, PaymentForwarded events are regenerated. + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + for event in events { + expect_payment_forwarded( + event, &nodes[1], &nodes[0], &nodes[2], Some(1000), None, false, false, false, + ); + } + // Fetching events triggers the pending monitor updates (one for each HTLC preimage) to be applied. + check_added_monitors(&nodes[1], 2); + + // Reconnect nodes[1] to nodes[0]. Both claims should be in nodes[1]'s holding cell. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[0]); + reconnect_args.pending_cell_htlc_claims = (0, 2); + reconnect_nodes(reconnect_args); + + // nodes[0] should now have received both fulfills and generate PaymentSent. + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} diff --git a/pending_changelog/upgrade-0.2-pending-htlc.txt b/pending_changelog/upgrade-0.2-pending-htlc.txt new file mode 100644 index 00000000000..6ca11755ee9 --- /dev/null +++ b/pending_changelog/upgrade-0.2-pending-htlc.txt @@ -0,0 +1,9 @@ +Backwards Compatibility +======================= + + * Upgrading directly from 0.2 to 0.5 with pending HTLC forwards is not supported. + If you have pending HTLC forwards when upgrading from 0.2, you must first upgrade + to 0.3, forward the HTLCs (completing at least the outbound commitment), then + upgrade to 0.5. Attempting to upgrade directly from 0.2 to 0.5 with pending HTLC + forwards will result in a `DecodeError::InvalidValue` when reading the + `ChannelManager`.