diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 7e6ee7f2c35..a11742507f3 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1176,6 +1176,18 @@ pub enum UpdateFulfillCommitFetch { DuplicateClaim {}, } +/// Error returned when processing an invalid interactive-tx message from our counterparty. +pub(super) struct InteractiveTxMsgError { + /// The underlying error. + pub(super) err: ChannelError, + /// If a splice was in progress when processing the message, this contains the splice funding + /// information for emitting a `SpliceFailed` event. + pub(super) splice_funding_failed: Option, + /// Whether we were quiescent when we received the message, and are no longer due to aborting + /// the session. + pub(super) exited_quiescence: bool, +} + /// The return value of `monitor_updating_restored` pub(super) struct MonitorRestoreUpdates { pub raa: Option, @@ -1818,104 +1830,118 @@ where fn fail_interactive_tx_negotiation( &mut self, reason: AbortReason, logger: &L, - ) -> (ChannelError, Option) { + ) -> InteractiveTxMsgError { let logger = WithChannelContext::from(logger, &self.context(), None); log_info!(logger, "Failed interactive transaction negotiation: {reason}"); - let splice_funding_failed = match &mut self.phase { + let (splice_funding_failed, exited_quiescence) = match &mut self.phase { ChannelPhase::Undefined => unreachable!(), - ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => None, + ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => { + (None, false) + }, ChannelPhase::UnfundedV2(pending_v2_channel) => { pending_v2_channel.interactive_tx_constructor.take(); - None + (None, false) }, ChannelPhase::Funded(funded_channel) => { if funded_channel.should_reset_pending_splice_state(false) { - funded_channel.reset_pending_splice_state() + (funded_channel.reset_pending_splice_state(), true) } else { debug_assert!(false, "We should never fail an interactive funding negotiation once we're exchanging tx_signatures"); - None + (None, false) } }, }; - (ChannelError::Abort(reason), splice_funding_failed) + InteractiveTxMsgError { + err: ChannelError::Abort(reason), + splice_funding_failed, + exited_quiescence, + } } pub fn tx_add_input( &mut self, msg: &msgs::TxAddInput, logger: &L, - ) -> Result)> { + ) -> Result { match self.interactive_tx_constructor_mut() { Some(interactive_tx_constructor) => interactive_tx_constructor .handle_tx_add_input(msg) .map_err(|reason| self.fail_interactive_tx_negotiation(reason, logger)), - None => Err(( - ChannelError::WarnAndDisconnect( + None => Err(InteractiveTxMsgError { + err: ChannelError::WarnAndDisconnect( "Received unexpected interactive transaction negotiation message".to_owned(), ), - None, - )), + splice_funding_failed: None, + exited_quiescence: false, + }), } } pub fn tx_add_output( &mut self, msg: &msgs::TxAddOutput, logger: &L, - ) -> Result)> { + ) -> Result { match self.interactive_tx_constructor_mut() { Some(interactive_tx_constructor) => interactive_tx_constructor .handle_tx_add_output(msg) .map_err(|reason| self.fail_interactive_tx_negotiation(reason, logger)), - None => Err(( - ChannelError::WarnAndDisconnect( + None => Err(InteractiveTxMsgError { + err: ChannelError::WarnAndDisconnect( "Received unexpected interactive transaction negotiation message".to_owned(), ), - None, - )), + splice_funding_failed: None, + exited_quiescence: false, + }), } } pub fn tx_remove_input( &mut self, msg: &msgs::TxRemoveInput, logger: &L, - ) -> Result)> { + ) -> Result { match self.interactive_tx_constructor_mut() { Some(interactive_tx_constructor) => interactive_tx_constructor .handle_tx_remove_input(msg) .map_err(|reason| self.fail_interactive_tx_negotiation(reason, logger)), - None => Err(( - ChannelError::WarnAndDisconnect( + None => Err(InteractiveTxMsgError { + err: ChannelError::WarnAndDisconnect( "Received unexpected interactive transaction negotiation message".to_owned(), ), - None, - )), + splice_funding_failed: None, + exited_quiescence: false, + }), } } pub fn tx_remove_output( &mut self, msg: &msgs::TxRemoveOutput, logger: &L, - ) -> Result)> { + ) -> Result { match self.interactive_tx_constructor_mut() { Some(interactive_tx_constructor) => interactive_tx_constructor .handle_tx_remove_output(msg) .map_err(|reason| self.fail_interactive_tx_negotiation(reason, logger)), - None => Err(( - ChannelError::WarnAndDisconnect( + None => Err(InteractiveTxMsgError { + err: ChannelError::WarnAndDisconnect( "Received unexpected interactive transaction negotiation message".to_owned(), ), - None, - )), + splice_funding_failed: None, + exited_quiescence: false, + }), } } pub fn tx_complete( &mut self, msg: &msgs::TxComplete, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> Result)> { + ) -> Result { let tx_complete_action = match self.interactive_tx_constructor_mut() { Some(interactive_tx_constructor) => interactive_tx_constructor .handle_tx_complete(msg) .map_err(|reason| self.fail_interactive_tx_negotiation(reason, logger))?, None => { let err = "Received unexpected interactive transaction negotiation message"; - return Err((ChannelError::WarnAndDisconnect(err.to_owned()), None)); + return Err(InteractiveTxMsgError { + err: ChannelError::WarnAndDisconnect(err.to_owned()), + splice_funding_failed: None, + exited_quiescence: false, + }); }, }; @@ -1975,13 +2001,13 @@ where pub fn tx_abort( &mut self, msg: &msgs::TxAbort, logger: &L, - ) -> Result<(Option, Option), ChannelError> { + ) -> Result<(Option, Option, bool), ChannelError> { // If we have not sent a `tx_abort` message for this negotiation previously, we need to echo // back a tx_abort message according to the spec: // https://github.com/lightning/bolts/blob/247e83d/02-peer-protocol.md?plain=1#L560-L561 // For rationale why we echo back `tx_abort`: // https://github.com/lightning/bolts/blob/247e83d/02-peer-protocol.md?plain=1#L578-L580 - let (should_ack, splice_funding_failed) = match &mut self.phase { + let (should_ack, splice_funding_failed, exited_quiescence) = match &mut self.phase { ChannelPhase::Undefined => unreachable!(), ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => { let err = "Got an unexpected tx_abort message: This is an unfunded channel created with V1 channel establishment"; @@ -1990,7 +2016,7 @@ where ChannelPhase::UnfundedV2(pending_v2_channel) => { let had_constructor = pending_v2_channel.interactive_tx_constructor.take().is_some(); - (had_constructor, None) + (had_constructor, None, false) }, ChannelPhase::Funded(funded_channel) => { if funded_channel.has_pending_splice_awaiting_signatures() @@ -2018,11 +2044,11 @@ where .unwrap_or(false); debug_assert!(has_funding_negotiation); let splice_funding_failed = funded_channel.reset_pending_splice_state(); - (true, splice_funding_failed) + (true, splice_funding_failed, true) } else { // We were not tracking the pending funding negotiation state anymore, likely // due to a disconnection or already having sent our own `tx_abort`. - (false, None) + (false, None, false) } }, }; @@ -2038,7 +2064,7 @@ where } }); - Ok((tx_abort, splice_funding_failed)) + Ok((tx_abort, splice_funding_failed, exited_quiescence)) } #[rustfmt::skip] diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a21456f0fd5..e39b3aedd12 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -59,8 +59,8 @@ use crate::ln::chan_utils::selected_commitment_sat_per_1000_weight; use crate::ln::channel::QuiescentAction; use crate::ln::channel::{ self, hold_time_since, Channel, ChannelError, ChannelUpdateStatus, DisconnectResult, - FundedChannel, FundingTxSigned, InboundUpdateAdd, InboundV1Channel, OutboundV1Channel, - PendingV2Channel, ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, + FundedChannel, FundingTxSigned, InboundUpdateAdd, InboundV1Channel, InteractiveTxMsgError, + OutboundV1Channel, PendingV2Channel, ReconnectionMsg, ShutdownResult, StfuResponse, UpdateFulfillCommitFetch, WithChannelContext, }; use crate::ln::channel_state::ChannelDetails; @@ -938,6 +938,7 @@ struct MsgHandleErrInternal { closes_channel: bool, shutdown_finish: Option<(ShutdownResult, Option<(msgs::ChannelUpdate, NodeId, NodeId)>)>, tx_abort: Option, + exited_quiescence: bool, } impl MsgHandleErrInternal { @@ -952,10 +953,12 @@ impl MsgHandleErrInternal { closes_channel: false, shutdown_finish: None, tx_abort: None, + exited_quiescence: false, } } - fn no_such_peer(counterparty_node_id: &PublicKey, channel_id: ChannelId) -> Self { + fn unreachable_no_such_peer(counterparty_node_id: &PublicKey, channel_id: ChannelId) -> Self { + debug_assert!(false); let err = format!("No such peer for the passed counterparty_node_id {counterparty_node_id}"); Self::send_err_msg_no_close(err, channel_id) @@ -970,7 +973,13 @@ impl MsgHandleErrInternal { } fn from_no_close(err: msgs::LightningError) -> Self { - Self { err, closes_channel: false, shutdown_finish: None, tx_abort: None } + Self { + err, + closes_channel: false, + shutdown_finish: None, + tx_abort: None, + exited_quiescence: false, + } } fn from_finish_shutdown( @@ -991,6 +1000,7 @@ impl MsgHandleErrInternal { closes_channel: true, shutdown_finish: Some((shutdown_res, channel_update)), tx_abort: None, + exited_quiescence: false, } } @@ -1026,7 +1036,13 @@ impl MsgHandleErrInternal { }, }, }; - Self { err, closes_channel: false, shutdown_finish: None, tx_abort } + Self { + err, + closes_channel: false, + shutdown_finish: None, + tx_abort, + exited_quiescence: false, + } } fn dont_send_error_message(&mut self) { @@ -1042,6 +1058,11 @@ impl MsgHandleErrInternal { fn closes_channel(&self) -> bool { self.closes_channel } + + fn with_exited_quiescence(mut self, exited_quiescence: bool) -> Self { + self.exited_quiescence = exited_quiescence; + self + } } /// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should @@ -4348,15 +4369,26 @@ impl< }); } - if let Some(msg_event) = msg_event { + let mut holding_cell_res = None; + if msg_event.is_some() || err_internal.exited_quiescence { let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state = peer_state_mutex.lock().unwrap(); - if peer_state.is_connected { - peer_state.pending_msg_events.push(msg_event); + if let Some(msg_event) = msg_event { + if peer_state.is_connected { + peer_state.pending_msg_events.push(msg_event); + } } + // We need to enqueue the `tx_abort` in `pending_msg_events` above before we + // enqueue any commitment updates generated by freeing holding cell HTLCs. + holding_cell_res = err_internal + .exited_quiescence + .then(|| self.check_free_peer_holding_cells(&mut peer_state)); } } + if let Some(res) = holding_cell_res { + self.handle_holding_cell_free_result(res); + } // Return error in case higher-API need one err_internal.err @@ -10793,8 +10825,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer( + MsgHandleErrInternal::unreachable_no_such_peer( counterparty_node_id, common_fields.temporary_channel_id, ) @@ -10864,11 +10895,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // likely to be lost on restart! let (value, output_script, user_id) = { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.common_fields.temporary_channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + MsgHandleErrInternal::unreachable_no_such_peer( + counterparty_node_id, + msg.common_fields.temporary_channel_id, + ) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.common_fields.temporary_channel_id) { @@ -10909,8 +10941,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.temporary_channel_id) + MsgHandleErrInternal::unreachable_no_such_peer( + counterparty_node_id, + msg.temporary_channel_id, + ) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -11104,11 +11138,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ #[rustfmt::skip] fn internal_peer_storage(&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorage) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(&counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(&counterparty_node_id, ChannelId([0; 32])) - })?; + let peer_state_mutex = per_peer_state.get(&counterparty_node_id).ok_or_else(|| { + MsgHandleErrInternal::unreachable_no_such_peer(&counterparty_node_id, ChannelId([0; 32])) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11142,11 +11174,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> { let best_block = *self.best_block.read().unwrap(); let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + MsgHandleErrInternal::unreachable_no_such_peer(counterparty_node_id, msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11192,17 +11222,14 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } fn internal_tx_msg< - HandleTxMsgFn: Fn( - &mut Channel, - ) -> Result)>, + HandleTxMsgFn: Fn(&mut Channel) -> Result, >( &self, counterparty_node_id: &PublicKey, channel_id: ChannelId, tx_msg_handler: HandleTxMsgFn, - ) -> Result { + ) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, channel_id) + MsgHandleErrInternal::unreachable_no_such_peer(counterparty_node_id, channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11213,9 +11240,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Ok(msg_send) => { let msg_send_event = msg_send.into_msg_send_event(*counterparty_node_id); peer_state.pending_msg_events.push(msg_send_event); - Ok(NotifyOption::SkipPersistHandleEvents) + Ok(()) }, - Err((error, splice_funding_failed)) => { + Err(InteractiveTxMsgError { + err, + splice_funding_failed, + exited_quiescence, + }) => { if let Some(splice_funding_failed) = splice_funding_failed { let pending_events = &mut self.pending_events.lock().unwrap(); pending_events.push_back(( @@ -11231,7 +11262,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ None, )); } - Err(MsgHandleErrInternal::from_chan_no_close(error, channel_id)) + debug_assert!(!exited_quiescence || matches!(err, ChannelError::Abort(_))); + + Err(MsgHandleErrInternal::from_chan_no_close(err, channel_id) + .with_exited_quiescence(exited_quiescence)) }, } }, @@ -11244,7 +11278,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn internal_tx_add_input( &self, counterparty_node_id: PublicKey, msg: &msgs::TxAddInput, - ) -> Result { + ) -> Result<(), MsgHandleErrInternal> { self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel| { channel.tx_add_input(msg, &self.logger) }) @@ -11252,7 +11286,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn internal_tx_add_output( &self, counterparty_node_id: PublicKey, msg: &msgs::TxAddOutput, - ) -> Result { + ) -> Result<(), MsgHandleErrInternal> { self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel| { channel.tx_add_output(msg, &self.logger) }) @@ -11260,7 +11294,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn internal_tx_remove_input( &self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveInput, - ) -> Result { + ) -> Result<(), MsgHandleErrInternal> { self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel| { channel.tx_remove_input(msg, &self.logger) }) @@ -11268,7 +11302,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn internal_tx_remove_output( &self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveOutput, - ) -> Result { + ) -> Result<(), MsgHandleErrInternal> { self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel| { channel.tx_remove_output(msg, &self.logger) }) @@ -11279,8 +11313,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ) -> Result { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(&counterparty_node_id).ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(&counterparty_node_id, msg.channel_id) + MsgHandleErrInternal::unreachable_no_such_peer(&counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11361,7 +11394,11 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Ok(persist) }, - Err((error, splice_funding_failed)) => { + Err(InteractiveTxMsgError { + err, + splice_funding_failed, + exited_quiescence, + }) => { if let Some(splice_funding_failed) = splice_funding_failed { let pending_events = &mut self.pending_events.lock().unwrap(); pending_events.push_back(( @@ -11377,7 +11414,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ None, )); } - Err(MsgHandleErrInternal::from_chan_no_close(error, msg.channel_id)) + debug_assert!(!exited_quiescence || matches!(err, ChannelError::Abort(_))); + + Err(MsgHandleErrInternal::from_chan_no_close(err, msg.channel_id) + .with_exited_quiescence(exited_quiescence)) }, } }, @@ -11391,144 +11431,171 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn internal_tx_signatures( &self, counterparty_node_id: &PublicKey, msg: &msgs::TxSignatures, ) -> Result<(), MsgHandleErrInternal> { - let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) - })?; - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan_entry) => { - match chan_entry.get_mut().as_funded_mut() { - Some(chan) => { - let best_block_height = self.best_block.read().unwrap().height; - let FundingTxSigned { - commitment_signed, - counterparty_initial_commitment_signed_result, - tx_signatures, - funding_tx, - splice_negotiated, - splice_locked, - } = try_channel_entry!( - self, - peer_state, - chan.tx_signatures(msg, best_block_height, &self.logger), - chan_entry - ); - - // We should never be sending a `commitment_signed` in response to their - // `tx_signatures`. - debug_assert!(commitment_signed.is_none()); - debug_assert!(counterparty_initial_commitment_signed_result.is_none()); - - if let Some(tx_signatures) = tx_signatures { - peer_state.pending_msg_events.push( - MessageSendEvent::SendTxSignatures { - node_id: *counterparty_node_id, - msg: tx_signatures, - }, - ); - } - if let Some(splice_locked) = splice_locked { - peer_state.pending_msg_events.push( - MessageSendEvent::SendSpliceLocked { - node_id: *counterparty_node_id, - msg: splice_locked, - }, - ); - } - if let Some((ref funding_tx, ref tx_type)) = funding_tx { - self.broadcast_interactive_funding( - chan, + let (result, holding_cell_res) = { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + MsgHandleErrInternal::unreachable_no_such_peer(counterparty_node_id, msg.channel_id) + })?; + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan_entry) => { + match chan_entry.get_mut().as_funded_mut() { + Some(chan) => { + let best_block_height = self.best_block.read().unwrap().height; + let FundingTxSigned { + commitment_signed, + counterparty_initial_commitment_signed_result, + tx_signatures, funding_tx, - Some(tx_type.clone()), - &self.logger, + splice_negotiated, + splice_locked, + } = try_channel_entry!( + self, + peer_state, + chan.tx_signatures(msg, best_block_height, &self.logger), + chan_entry ); - } - if let Some(splice_negotiated) = splice_negotiated { - self.pending_events.lock().unwrap().push_back(( - events::Event::SplicePending { - channel_id: msg.channel_id, - counterparty_node_id: *counterparty_node_id, - user_channel_id: chan.context.get_user_id(), - new_funding_txo: splice_negotiated.funding_txo, - channel_type: splice_negotiated.channel_type, - new_funding_redeem_script: splice_negotiated - .funding_redeem_script, - }, - None, - )); - } - }, - None => { - let msg = "Got an unexpected tx_signatures message"; - let reason = ClosureReason::ProcessingError { err: msg.to_owned() }; - let err = ChannelError::Close((msg.to_owned(), reason)); - try_channel_entry!(self, peer_state, Err(err), chan_entry) - }, - } - Ok(()) - }, - hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::no_such_channel_for_peer( - counterparty_node_id, - msg.channel_id, - )), - } + + // We should never be sending a `commitment_signed` in response to their + // `tx_signatures`. + debug_assert!(commitment_signed.is_none()); + debug_assert!(counterparty_initial_commitment_signed_result.is_none()); + + if let Some(tx_signatures) = tx_signatures { + peer_state.pending_msg_events.push( + MessageSendEvent::SendTxSignatures { + node_id: *counterparty_node_id, + msg: tx_signatures, + }, + ); + } + if let Some(splice_locked) = splice_locked { + peer_state.pending_msg_events.push( + MessageSendEvent::SendSpliceLocked { + node_id: *counterparty_node_id, + msg: splice_locked, + }, + ); + } + if let Some((ref funding_tx, ref tx_type)) = funding_tx { + self.broadcast_interactive_funding( + chan, + funding_tx, + Some(tx_type.clone()), + &self.logger, + ); + } + // We consider a splice negotiated when we exchange `tx_signatures`, + // which also terminates quiescence. + let exited_quiescence = splice_negotiated.is_some(); + if let Some(splice_negotiated) = splice_negotiated { + self.pending_events.lock().unwrap().push_back(( + events::Event::SplicePending { + channel_id: msg.channel_id, + counterparty_node_id: *counterparty_node_id, + user_channel_id: chan.context.get_user_id(), + new_funding_txo: splice_negotiated.funding_txo, + channel_type: splice_negotiated.channel_type, + new_funding_redeem_script: splice_negotiated + .funding_redeem_script, + }, + None, + )); + } + let holding_cell_res = if exited_quiescence { + self.check_free_peer_holding_cells(peer_state) + } else { + Vec::new() + }; + (Ok(()), holding_cell_res) + }, + None => { + let msg = "Got an unexpected tx_signatures message"; + let reason = ClosureReason::ProcessingError { err: msg.to_owned() }; + let err = ChannelError::Close((msg.to_owned(), reason)); + try_channel_entry!(self, peer_state, Err(err), chan_entry) + }, + } + }, + hash_map::Entry::Vacant(_) => ( + Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + msg.channel_id, + )), + Vec::new(), + ), + } + }; + + self.handle_holding_cell_free_result(holding_cell_res); + result } fn internal_tx_abort( &self, counterparty_node_id: &PublicKey, msg: &msgs::TxAbort, ) -> Result { - let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) - })?; - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan_entry) => { - let res = chan_entry.get_mut().tx_abort(msg, &self.logger); - let (tx_abort, splice_failed) = - try_channel_entry!(self, peer_state, res, chan_entry); + let (result, holding_cell_res) = { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + MsgHandleErrInternal::unreachable_no_such_peer(counterparty_node_id, msg.channel_id) + })?; + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan_entry) => { + let res = chan_entry.get_mut().tx_abort(msg, &self.logger); + let (tx_abort, splice_failed, exited_quiescence) = + try_channel_entry!(self, peer_state, res, chan_entry); - let persist = if tx_abort.is_some() || splice_failed.is_some() { - NotifyOption::DoPersist - } else { - NotifyOption::SkipPersistNoEvents - }; + let persist = if tx_abort.is_some() || splice_failed.is_some() { + NotifyOption::DoPersist + } else { + NotifyOption::SkipPersistNoEvents + }; - if let Some(tx_abort_msg) = tx_abort { - peer_state.pending_msg_events.push(MessageSendEvent::SendTxAbort { - node_id: *counterparty_node_id, - msg: tx_abort_msg, - }); - } + if let Some(tx_abort_msg) = tx_abort { + peer_state.pending_msg_events.push(MessageSendEvent::SendTxAbort { + node_id: *counterparty_node_id, + msg: tx_abort_msg, + }); + } - if let Some(splice_funding_failed) = splice_failed { - let pending_events = &mut self.pending_events.lock().unwrap(); - pending_events.push_back(( - events::Event::SpliceFailed { - channel_id: msg.channel_id, - counterparty_node_id: *counterparty_node_id, - user_channel_id: chan_entry.get().context().get_user_id(), - abandoned_funding_txo: splice_funding_failed.funding_txo, - channel_type: splice_funding_failed.channel_type, - contributed_inputs: splice_funding_failed.contributed_inputs, - contributed_outputs: splice_funding_failed.contributed_outputs, - }, - None, - )); - } + if let Some(splice_funding_failed) = splice_failed { + let pending_events = &mut self.pending_events.lock().unwrap(); + pending_events.push_back(( + events::Event::SpliceFailed { + channel_id: msg.channel_id, + counterparty_node_id: *counterparty_node_id, + user_channel_id: chan_entry.get().context().get_user_id(), + abandoned_funding_txo: splice_funding_failed.funding_txo, + channel_type: splice_funding_failed.channel_type, + contributed_inputs: splice_funding_failed.contributed_inputs, + contributed_outputs: splice_funding_failed.contributed_outputs, + }, + None, + )); + } - Ok(persist) - }, - hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::no_such_channel_for_peer( - counterparty_node_id, - msg.channel_id, - )), - } + let holding_cell_res = if exited_quiescence { + self.check_free_peer_holding_cells(peer_state) + } else { + Vec::new() + }; + (Ok(persist), holding_cell_res) + }, + hash_map::Entry::Vacant(_) => ( + Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + msg.channel_id, + )), + Vec::new(), + ), + } + }; + + self.handle_holding_cell_free_result(holding_cell_res); + result } #[rustfmt::skip] @@ -11536,11 +11603,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error // closing a channel), so any changes are likely to be lost on restart! let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + MsgHandleErrInternal::unreachable_no_such_peer(counterparty_node_id, msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -11602,8 +11667,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) + MsgHandleErrInternal::unreachable_no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11706,8 +11770,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) + MsgHandleErrInternal::unreachable_no_such_peer(counterparty_node_id, msg.channel_id) })?; let logger; let tx_err: Option<(_, Result)> = { @@ -11809,11 +11872,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // closing a channel), so any changes are likely to be lost on restart! let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + MsgHandleErrInternal::unreachable_no_such_peer(counterparty_node_id, msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -11838,8 +11899,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let (htlc_source, forwarded_htlc_value, skimmed_fee_msat, send_timestamp) = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) + MsgHandleErrInternal::unreachable_no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11920,11 +11980,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error // closing a channel), so any changes are likely to be lost on restart! let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + MsgHandleErrInternal::unreachable_no_such_peer(counterparty_node_id, msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -11946,11 +12004,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error // closing a channel), so any changes are likely to be lost on restart! let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + MsgHandleErrInternal::unreachable_no_such_peer(counterparty_node_id, msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -11977,8 +12033,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let best_block = *self.best_block.read().unwrap(); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) + MsgHandleErrInternal::unreachable_no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11987,18 +12042,15 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let chan = chan_entry.get_mut(); let logger = WithChannelContext::from(&self.logger, &chan.context(), None); let funding_txo = chan.funding().get_funding_txo(); - let (monitor_opt, monitor_update_opt) = try_channel_entry!( - self, - peer_state, - chan.commitment_signed( - msg, - best_block, - &self.signer_provider, - &self.fee_estimator, - &&logger - ), - chan_entry + let res = chan.commitment_signed( + msg, + best_block, + &self.signer_provider, + &self.fee_estimator, + &&logger, ); + let (monitor_opt, monitor_update_opt) = + try_channel_entry!(self, peer_state, res, chan_entry); if let Some(chan) = chan.as_funded_mut() { if let Some(monitor) = monitor_opt { @@ -12054,11 +12106,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ #[rustfmt::skip] fn internal_commitment_signed_batch(&self, counterparty_node_id: &PublicKey, channel_id: ChannelId, batch: Vec) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + MsgHandleErrInternal::unreachable_no_such_peer(counterparty_node_id, channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(channel_id) { @@ -12196,11 +12246,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> { let (htlcs_to_fail, static_invoices) = { let per_peer_state = self.per_peer_state.read().unwrap(); - let mut peer_state_lock = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) - }).map(|mtx| mtx.lock().unwrap())?; + let mut peer_state_lock = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + MsgHandleErrInternal::unreachable_no_such_peer(counterparty_node_id, msg.channel_id) + }).map(|mtx| mtx.lock().unwrap())?; let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_entry) => { @@ -12249,11 +12297,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ #[rustfmt::skip] fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + MsgHandleErrInternal::unreachable_no_such_peer(counterparty_node_id, msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -12275,9 +12321,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn internal_stfu(&self, counterparty_node_id: &PublicKey, msg: &msgs::Stfu) -> Result { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id - ) + MsgHandleErrInternal::unreachable_no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -12330,11 +12374,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ #[rustfmt::skip] fn internal_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + MsgHandleErrInternal::unreachable_no_such_peer(counterparty_node_id, msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -12429,12 +12471,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let (inferred_splice_locked, need_lnd_workaround, holding_cell_res) = { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id - ) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + MsgHandleErrInternal::unreachable_no_such_peer(counterparty_node_id, msg.channel_id) + })?; let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), None); let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -12546,8 +12585,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) + MsgHandleErrInternal::unreachable_no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -12604,8 +12642,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) + MsgHandleErrInternal::unreachable_no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -12650,8 +12687,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) + MsgHandleErrInternal::unreachable_no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -15923,48 +15959,36 @@ impl< fn handle_tx_add_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddInput) { let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || { let res = self.internal_tx_add_input(counterparty_node_id, msg); - let persist = match &res { - Err(_) => NotifyOption::DoPersist, - Ok(persist) => *persist, - }; + debug_assert!(res.as_ref().err().map_or(true, |err| !err.closes_channel())); let _ = self.handle_error(res, counterparty_node_id); - persist + NotifyOption::SkipPersistHandleEvents }); } fn handle_tx_add_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddOutput) { let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || { let res = self.internal_tx_add_output(counterparty_node_id, msg); - let persist = match &res { - Err(_) => NotifyOption::DoPersist, - Ok(persist) => *persist, - }; + debug_assert!(res.as_ref().err().map_or(true, |err| !err.closes_channel())); let _ = self.handle_error(res, counterparty_node_id); - persist + NotifyOption::SkipPersistHandleEvents }); } fn handle_tx_remove_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveInput) { let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || { let res = self.internal_tx_remove_input(counterparty_node_id, msg); - let persist = match &res { - Err(_) => NotifyOption::DoPersist, - Ok(persist) => *persist, - }; + debug_assert!(res.as_ref().err().map_or(true, |err| !err.closes_channel())); let _ = self.handle_error(res, counterparty_node_id); - persist + NotifyOption::SkipPersistHandleEvents }); } fn handle_tx_remove_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveOutput) { let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || { let res = self.internal_tx_remove_output(counterparty_node_id, msg); - let persist = match &res { - Err(_) => NotifyOption::DoPersist, - Ok(persist) => *persist, - }; + debug_assert!(res.as_ref().err().map_or(true, |err| !err.closes_channel())); let _ = self.handle_error(res, counterparty_node_id); - persist + NotifyOption::SkipPersistHandleEvents }); } @@ -15972,7 +15996,10 @@ impl< let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || { let res = self.internal_tx_complete(counterparty_node_id, msg); let persist = match &res { - Err(_) => NotifyOption::DoPersist, + Err(err) => { + debug_assert!(!err.closes_channel()); + NotifyOption::SkipPersistHandleEvents + }, Ok(persist) => *persist, }; let _ = self.handle_error(res, counterparty_node_id); diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index 4846f7137cc..ecc868b202c 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -1895,6 +1895,13 @@ fn fail_splice_on_interactive_tx_error() { // of sending tx_complete. The failure occurs because the serial id will have the wrong parity. let _ = complete_splice_handshake(initiator, acceptor, channel_id, contribution.clone()); + // Queue an outgoing HTLC to the holding cell. It should be freed once we exit quiescence. + let (route, payment_hash, _payment_preimage, payment_secret) = + get_route_and_payment_hash!(initiator, acceptor, 1_000_000); + let onion = RecipientOnionFields::secret_only(payment_secret); + let payment_id = PaymentId(payment_hash.0); + initiator.node.send_payment_with_route(route, payment_hash, onion, payment_id).unwrap(); + let tx_add_input = get_event_msg!(initiator, MessageSendEvent::SendTxAddInput, node_id_acceptor); acceptor.node.handle_tx_add_input(node_id_initiator, &tx_add_input); @@ -1912,11 +1919,28 @@ fn fail_splice_on_interactive_tx_error() { _ => panic!("Expected Event::SpliceFailed"), } - let tx_abort = get_event_msg!(initiator, MessageSendEvent::SendTxAbort, node_id_acceptor); - acceptor.node.handle_tx_abort(node_id_initiator, &tx_abort); + // We exit quiescence upon sending `tx_abort`, so we should see the holding cell be immediately + // freed. + let msg_events = initiator.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 2, "{msg_events:?}"); + let tx_abort = if let MessageSendEvent::SendTxAbort { msg, .. } = &msg_events[0] { + msg + } else { + panic!("Unexpected event {:?}", msg_events[0]); + }; + let update = if let MessageSendEvent::UpdateHTLCs { updates, .. } = &msg_events[1] { + updates + } else { + panic!("Unexpected event {:?}", msg_events[1]); + }; + check_added_monitors(initiator, 1); + acceptor.node.handle_tx_abort(node_id_initiator, tx_abort); let tx_abort = get_event_msg!(acceptor, MessageSendEvent::SendTxAbort, node_id_initiator); initiator.node.handle_tx_abort(node_id_acceptor, &tx_abort); + + acceptor.node.handle_update_add_htlc(node_id_initiator, &update.update_add_htlcs[0]); + do_commitment_signed_dance(acceptor, initiator, &update.commitment_signed, false, false); } #[test] @@ -1948,6 +1972,13 @@ fn fail_splice_on_tx_abort() { // tx_complete. let _ = complete_splice_handshake(initiator, acceptor, channel_id, contribution.clone()); + // Queue an outgoing HTLC to the holding cell. It should be freed once we exit quiescence. + let (route, payment_hash, _payment_preimage, payment_secret) = + get_route_and_payment_hash!(initiator, acceptor, 1_000_000); + let onion = RecipientOnionFields::secret_only(payment_secret); + let payment_id = PaymentId(payment_hash.0); + initiator.node.send_payment_with_route(route, payment_hash, onion, payment_id).unwrap(); + let tx_add_input = get_event_msg!(initiator, MessageSendEvent::SendTxAddInput, node_id_acceptor); acceptor.node.handle_tx_add_input(node_id_initiator, &tx_add_input); @@ -1968,8 +1999,189 @@ fn fail_splice_on_tx_abort() { _ => panic!("Expected Event::SpliceFailed"), } + // We exit quiescence upon receiving `tx_abort`, so we should see our `tx_abort` echo and the + // holding cell be immediately freed. + let msg_events = initiator.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 2, "{msg_events:?}"); + check_added_monitors(initiator, 1); + if let MessageSendEvent::SendTxAbort { msg, .. } = &msg_events[0] { + acceptor.node.handle_tx_abort(node_id_initiator, msg); + } else { + panic!("Unexpected event {:?}", msg_events[0]); + }; + if let MessageSendEvent::UpdateHTLCs { updates, .. } = &msg_events[1] { + acceptor.node.handle_update_add_htlc(node_id_initiator, &updates.update_add_htlcs[0]); + do_commitment_signed_dance(acceptor, initiator, &updates.commitment_signed, false, false); + } else { + panic!("Unexpected event {:?}", msg_events[1]); + }; +} + +#[test] +fn fail_splice_on_tx_complete_error() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let config = test_default_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let initiator = &nodes[1]; + let acceptor = &nodes[0]; + + let node_id_initiator = initiator.node.get_our_node_id(); + let node_id_acceptor = acceptor.node.get_our_node_id(); + + let (_, _, channel_id, _) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 50_000_000); + + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: acceptor.wallet_source.get_change_script().unwrap(), + }]); + let _ = complete_splice_handshake(initiator, acceptor, channel_id, contribution); + + // Queue an outgoing HTLC to the holding cell. It should be freed once we exit quiescence. + let (route, payment_hash, _payment_preimage, payment_secret) = + get_route_and_payment_hash!(initiator, acceptor, 1_000_000); + let onion = RecipientOnionFields::secret_only(payment_secret); + let payment_id = PaymentId(payment_hash.0); + acceptor.node.send_payment_with_route(route, payment_hash, onion, payment_id).unwrap(); + + let tx_add_input = + get_event_msg!(initiator, MessageSendEvent::SendTxAddInput, node_id_acceptor); + acceptor.node.handle_tx_add_input(node_id_initiator, &tx_add_input); + let tx_complete = get_event_msg!(acceptor, MessageSendEvent::SendTxComplete, node_id_initiator); + initiator.node.handle_tx_complete(node_id_acceptor, &tx_complete); + + // Tamper the shared funding output such that the acceptor fails upon `tx_complete`. + let mut tx_add_output = + get_event_msg!(initiator, MessageSendEvent::SendTxAddOutput, node_id_acceptor); + assert!(tx_add_output.script.is_p2wsh()); + tx_add_output.sats *= 2; + acceptor.node.handle_tx_add_output(node_id_initiator, &tx_add_output); + let tx_complete = get_event_msg!(acceptor, MessageSendEvent::SendTxComplete, node_id_initiator); + initiator.node.handle_tx_complete(node_id_acceptor, &tx_complete); + + let tx_add_output = + get_event_msg!(initiator, MessageSendEvent::SendTxAddOutput, node_id_acceptor); + acceptor.node.handle_tx_add_output(node_id_initiator, &tx_add_output); + let tx_complete = get_event_msg!(acceptor, MessageSendEvent::SendTxComplete, node_id_initiator); + initiator.node.handle_tx_complete(node_id_acceptor, &tx_complete); + + let _ = get_event!(initiator, Event::FundingTransactionReadyForSigning); + let tx_complete = get_event_msg!(initiator, MessageSendEvent::SendTxComplete, node_id_acceptor); + acceptor.node.handle_tx_complete(node_id_initiator, &tx_complete); + + let msg_events = acceptor.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 2, "{msg_events:?}"); + check_added_monitors(acceptor, 1); + let tx_abort = if let MessageSendEvent::SendTxAbort { msg, .. } = &msg_events[0] { + msg + } else { + panic!("Unexpected event {:?}", msg_events[0]); + }; + let update = if let MessageSendEvent::UpdateHTLCs { updates, .. } = &msg_events[1] { + updates + } else { + panic!("Unexpected event {:?}", msg_events[1]); + }; + + initiator.node.handle_tx_abort(node_id_acceptor, tx_abort); + let _ = get_event!(initiator, Event::SpliceFailed); let tx_abort = get_event_msg!(initiator, MessageSendEvent::SendTxAbort, node_id_acceptor); acceptor.node.handle_tx_abort(node_id_initiator, &tx_abort); + + initiator.node.handle_update_add_htlc(node_id_acceptor, &update.update_add_htlcs[0]); + do_commitment_signed_dance(initiator, acceptor, &update.commitment_signed, false, false); +} + +#[test] +fn free_holding_cell_on_tx_signatures_quiescence_exit() { + // Test that if there's an update in the holding cell while we're quiescent, that it gets freed + // upon exiting quiescence via the `tx_signatures` exchange. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let config = test_default_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let initiator = &nodes[0]; + let acceptor = &nodes[1]; + let node_id_initiator = initiator.node.get_our_node_id(); + let node_id_acceptor = acceptor.node.get_our_node_id(); + + let (_, _, channel_id, _) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0); + + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: initiator.wallet_source.get_change_script().unwrap(), + }]); + negotiate_splice_tx(initiator, acceptor, channel_id, contribution); + + // Queue an outgoing HTLC to the holding cell. It should be freed once we exit quiescence. + let (route, payment_hash, _payment_preimage, payment_secret) = + get_route_and_payment_hash!(initiator, acceptor, 1_000_000); + let onion = RecipientOnionFields::secret_only(payment_secret); + let payment_id = PaymentId(payment_hash.0); + initiator.node.send_payment_with_route(route, payment_hash, onion, payment_id).unwrap(); + assert!(initiator.node.get_and_clear_pending_msg_events().is_empty()); + + let event = get_event!(initiator, Event::FundingTransactionReadyForSigning); + if let Event::FundingTransactionReadyForSigning { + channel_id, + counterparty_node_id, + unsigned_transaction, + .. + } = event + { + let partially_signed_tx = initiator.wallet_source.sign_tx(unsigned_transaction).unwrap(); + initiator + .node + .funding_transaction_signed(&channel_id, &counterparty_node_id, partially_signed_tx) + .unwrap(); + } else { + unreachable!(); + } + + let update = get_htlc_update_msgs(initiator, &node_id_acceptor); + acceptor.node.handle_commitment_signed(node_id_initiator, &update.commitment_signed[0]); + check_added_monitors(&acceptor, 1); + + let msg_events = acceptor.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 2, "{msg_events:?}"); + if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = &msg_events[0] { + let commitment_signed = &updates.commitment_signed[0]; + initiator.node.handle_commitment_signed(node_id_acceptor, commitment_signed); + check_added_monitors(&initiator, 1); + } else { + panic!("Unexpected event {:?}", &msg_events[0]); + } + if let MessageSendEvent::SendTxSignatures { ref msg, .. } = &msg_events[1] { + initiator.node.handle_tx_signatures(node_id_acceptor, msg); + } else { + panic!("Unexpected event {:?}", &msg_events[1]); + } + + // With `tx_signatures` exchanged, we've exited quiescence and should now see the outgoing HTLC + // update be sent. + let msg_events = initiator.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 2, "{msg_events:?}"); + check_added_monitors(initiator, 1); // Outgoing HTLC monitor update + if let MessageSendEvent::SendTxSignatures { ref msg, .. } = &msg_events[0] { + acceptor.node.handle_tx_signatures(node_id_initiator, msg); + } else { + panic!("Unexpected event {:?}", &msg_events[0]); + } + if let MessageSendEvent::UpdateHTLCs { updates, .. } = &msg_events[1] { + acceptor.node.handle_update_add_htlc(node_id_initiator, &updates.update_add_htlcs[0]); + do_commitment_signed_dance(acceptor, initiator, &updates.commitment_signed, false, false); + } else { + panic!("Unexpected event {:?}", &msg_events[1]); + } + + expect_splice_pending_event(initiator, &node_id_acceptor); + expect_splice_pending_event(acceptor, &node_id_initiator); } #[test]