diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index 488c5ac4826..67e954fc8e5 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -48,6 +48,11 @@ cargo test -p lightning --verbose --color always --features dnssec cargo check -p lightning --verbose --color always --features dnssec cargo doc -p lightning --document-private-items --features dnssec +echo -e "\n\nChecking and testing lightning with safe_channels" +cargo test -p lightning --verbose --color always --features safe_channels +cargo check -p lightning --verbose --color always --features safe_channels +cargo doc -p lightning --document-private-items --features safe_channels + echo -e "\n\nChecking and testing Block Sync Clients with features" cargo test -p lightning-block-sync --verbose --color always --features rest-client diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index b3b597029da..d258d28066c 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -23,6 +23,7 @@ _externalize_tests = ["inventory", "_test_utils"] # Allow signing of local transactions that may have been revoked or will be revoked, for functional testing (e.g. justice tx handling). # This is unsafe to use in production because it may result in the counterparty publishing taking our funds. unsafe_revoked_tx_signing = [] +safe_channels = [] std = [] diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 515a3dc5f1d..36dd6457df2 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -50,6 +50,8 @@ use crate::ln::chan_utils::{ self, ChannelTransactionParameters, CommitmentTransaction, CounterpartyCommitmentSecrets, HTLCClaim, HTLCOutputInCommitment, HolderCommitmentTransaction, }; +#[cfg(feature = "safe_channels")] +use crate::ln::channel::FundedChannelState; use crate::ln::channel::INITIAL_COMMITMENT_NUMBER; use crate::ln::channel_keys::{ DelayedPaymentBasepoint, DelayedPaymentKey, HtlcBasepoint, HtlcKey, RevocationBasepoint, @@ -111,8 +113,28 @@ pub struct ChannelMonitorUpdate { /// Will be `None` for `ChannelMonitorUpdate`s constructed on LDK versions prior to 0.0.121 and /// always `Some` otherwise. pub channel_id: Option, + + /// The channel state associated with this ChannelMonitorUpdate, if any. + #[cfg(feature = "safe_channels")] + pub channel_state: Option, +} + +/// The state of a channel to be stored alongside a ChannelMonitor. For closed channels, no state is stored. +#[cfg(feature = "safe_channels")] +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum UpdateChannelState { + /// Open channel in funded state. + Funded(FundedChannelState), + /// Closed channel. + Closed, } +#[cfg(feature = "safe_channels")] +impl_writeable_tlv_based_enum!(UpdateChannelState, + (1, Closed) => {}, + {0, Funded} => (), +); + impl ChannelMonitorUpdate { pub(crate) fn internal_renegotiated_funding_data( &self, @@ -156,10 +178,17 @@ impl Writeable for ChannelMonitorUpdate { for update_step in self.updates.iter() { update_step.write(w)?; } + #[cfg(not(feature = "safe_channels"))] write_tlv_fields!(w, { // 1 was previously used to store `counterparty_node_id` (3, self.channel_id, option), }); + #[cfg(feature = "safe_channels")] + write_tlv_fields!(w, { + // 1 was previously used to store `counterparty_node_id` + (3, self.channel_id, option), + (5, self.channel_state, option) + }); Ok(()) } } @@ -176,11 +205,24 @@ impl Readable for ChannelMonitorUpdate { } } let mut channel_id = None; + #[cfg(not(feature = "safe_channels"))] read_tlv_fields!(r, { // 1 was previously used to store `counterparty_node_id` (3, channel_id, option), }); - Ok(Self { update_id, updates, channel_id }) + #[cfg(feature = "safe_channels")] + let mut channel_state = None; + #[cfg(feature = "safe_channels")] + read_tlv_fields!(r, { + // 1 was previously used to store `counterparty_node_id` + (3, channel_id, option), + (5, channel_state, option) + }); + Ok(Self { + update_id, updates, channel_id, + #[cfg(feature = "safe_channels")] + channel_state + }) } } @@ -1402,6 +1444,11 @@ pub(crate) struct ChannelMonitorImpl { /// make deciding whether to do so simple, here we track whether this monitor was last written /// prior to 0.1. written_by_0_1_or_later: bool, + + /// The channel state as provided via the last `ChannelMonitorUpdate` or via a call to + /// [`ChannelMonitor::update_channel_state`]. + #[cfg(feature = "safe_channels")] + channel_state: Option, } // Returns a `&FundingScope` for the one we are currently observing/handling commitment transactions @@ -1521,6 +1568,12 @@ const MIN_SERIALIZATION_VERSION: u8 = 1; pub(crate) fn write_chanmon_internal( channel_monitor: &ChannelMonitorImpl, _is_stub: bool, writer: &mut W, ) -> Result<(), Error> { + // Check that the encoded channel (if present) is consistent with the rest of the monitor. This sets an invariant + // for the safe_channels feature. + #[cfg(feature = "safe_channels")] + if let Some(UpdateChannelState::Funded(ref channel_state)) = channel_monitor.channel_state { + channel_monitor.check_channel_state_consistency(channel_state); + } write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); channel_monitor.latest_update_id.write(writer)?; @@ -1733,6 +1786,7 @@ pub(crate) fn write_chanmon_internal( _ => channel_monitor.pending_monitor_events.clone(), }; + #[cfg(not(feature = "safe_channels"))] write_tlv_fields!(writer, { (1, channel_monitor.funding_spend_confirmed, option), (3, channel_monitor.htlcs_resolved_on_chain, required_vec), @@ -1757,6 +1811,32 @@ pub(crate) fn write_chanmon_internal( (37, channel_monitor.funding_seen_onchain, required), }); + #[cfg(feature = "safe_channels")] + write_tlv_fields!(writer, { + (1, channel_monitor.funding_spend_confirmed, option), + (3, channel_monitor.htlcs_resolved_on_chain, required_vec), + (5, pending_monitor_events, required_vec), + (7, channel_monitor.funding_spend_seen, required), + (9, channel_monitor.counterparty_node_id, required), + (11, channel_monitor.confirmed_commitment_tx_counterparty_output, option), + (13, channel_monitor.spendable_txids_confirmed, required_vec), + (15, channel_monitor.counterparty_fulfilled_htlcs, required), + (17, channel_monitor.initial_counterparty_commitment_info, option), + (19, channel_monitor.channel_id, required), + (21, channel_monitor.balances_empty_height, option), + (23, channel_monitor.holder_pays_commitment_tx_fee, option), + (25, channel_monitor.payment_preimages, required), + (27, channel_monitor.first_negotiated_funding_txo, required), + (29, channel_monitor.initial_counterparty_commitment_tx, option), + (31, channel_monitor.funding.channel_parameters, required), + (32, channel_monitor.pending_funding, optional_vec), + (33, channel_monitor.htlcs_resolved_to_user, required), + (34, channel_monitor.alternative_funding_confirmed, option), + (35, channel_monitor.is_manual_broadcast, required), + (37, channel_monitor.funding_seen_onchain, required), + (39, channel_monitor.channel_state, option), + }); + Ok(()) } @@ -1994,6 +2074,8 @@ impl ChannelMonitor { alternative_funding_confirmed: None, written_by_0_1_or_later: true, + #[cfg(feature = "safe_channels")] + channel_state: None, }) } @@ -2114,6 +2196,19 @@ impl ChannelMonitor { inner.update_monitor(updates, broadcaster, fee_estimator, &logger) } + /// Gets the encoded channel data, if any, associated with this ChannelMonitor. + #[cfg(feature = "safe_channels")] + pub fn get_channel_state(&self) -> Option { + self.inner.lock().unwrap().channel_state.clone() + } + + /// Updates the encoded channel data associated with this ChannelMonitor. To clear the encoded channel data (for + /// example after shut down of a channel), pass `None`. + #[cfg(feature = "safe_channels")] + pub fn update_channel_state(&self, channel_state: UpdateChannelState) { + self.inner.lock().unwrap().update_channel_state(channel_state); + } + /// Gets the update_id from the latest ChannelMonitorUpdate which was applied to this /// ChannelMonitor. /// @@ -2719,6 +2814,48 @@ impl ChannelMonitor { } impl ChannelMonitorImpl { + #[cfg(feature = "safe_channels")] + fn check_channel_state_consistency(&self, encoded: &FundedChannelState) { + debug_assert!( + encoded.get_cur_holder_commitment_transaction_number() + <= self.get_cur_holder_commitment_number(), + "cur_holder_commitment_transaction_number - channel: {} vs monitor: {}", + encoded.get_cur_holder_commitment_transaction_number(), + self.get_cur_holder_commitment_number() + ); + debug_assert!( + encoded.get_revoked_counterparty_commitment_transaction_number() + <= self.get_min_seen_secret(), + "revoked_counterparty_commitment_transaction_number - channel: {} vs monitor: {}", + encoded.get_revoked_counterparty_commitment_transaction_number(), + self.get_min_seen_secret() + ); + debug_assert!( + encoded.get_cur_counterparty_commitment_transaction_number() + <= self.get_cur_counterparty_commitment_number(), + "cur_counterparty_commitment_transaction_number - channel: {} vs monitor: {}", + encoded.get_cur_counterparty_commitment_transaction_number(), + self.get_cur_counterparty_commitment_number() + ); + debug_assert!( + encoded.latest_monitor_update_id >= self.get_latest_update_id(), + "latest_monitor_update_id - channel: {} vs monitor: {}", + encoded.latest_monitor_update_id, + self.get_latest_update_id() + ); + } + + #[cfg(feature = "safe_channels")] + fn update_channel_state(&mut self, encoded: UpdateChannelState) { + if let UpdateChannelState::Funded(ref channel) = encoded { + // Check that the encoded channel is consistent with the rest of the monitor. This sets an invariant for the + // safe_channels feature. + self.check_channel_state_consistency(channel); + } + + self.channel_state = Some(encoded); + } + /// Helper for get_claimable_balances which does the work for an individual HTLC, generating up /// to one `Balance` for the HTLC. #[rustfmt::skip] @@ -4405,6 +4542,13 @@ impl ChannelMonitorImpl { } } + // Assume that if the update contains no encoded channel, that the channel remained unchanged. We + // therefore do not update the monitor. + #[cfg(feature="safe_channels")] + if let Some(channel_state) = updates.channel_state.as_ref() { + self.update_channel_state(channel_state.clone()); + } + if ret.is_ok() && self.no_further_updates_allowed() && is_pre_close_update { log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent"); Err(()) @@ -6644,6 +6788,33 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP let mut alternative_funding_confirmed = None; let mut is_manual_broadcast = RequiredWrapper(None); let mut funding_seen_onchain = RequiredWrapper(None); + #[cfg(not(feature="safe_channels"))] + read_tlv_fields!(reader, { + (1, funding_spend_confirmed, option), + (3, htlcs_resolved_on_chain, optional_vec), + (5, pending_monitor_events, optional_vec), + (7, funding_spend_seen, option), + (9, counterparty_node_id, option), + (11, confirmed_commitment_tx_counterparty_output, option), + (13, spendable_txids_confirmed, optional_vec), + (15, counterparty_fulfilled_htlcs, option), + (17, initial_counterparty_commitment_info, option), + (19, channel_id, option), + (21, balances_empty_height, option), + (23, holder_pays_commitment_tx_fee, option), + (25, payment_preimages_with_info, option), + (27, first_negotiated_funding_txo, (default_value, outpoint)), + (29, initial_counterparty_commitment_tx, option), + (31, channel_parameters, (option: ReadableArgs, None)), + (32, pending_funding, optional_vec), + (33, htlcs_resolved_to_user, option), + (34, alternative_funding_confirmed, option), + (35, is_manual_broadcast, (default_value, false)), + (37, funding_seen_onchain, (default_value, true)), + }); + #[cfg(feature="safe_channels")] + let mut channel_state = None; + #[cfg(feature="safe_channels")] read_tlv_fields!(reader, { (1, funding_spend_confirmed, option), (3, htlcs_resolved_on_chain, optional_vec), @@ -6666,6 +6837,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP (34, alternative_funding_confirmed, option), (35, is_manual_broadcast, (default_value, false)), (37, funding_seen_onchain, (default_value, true)), + (39, channel_state, option), }); // Note that `payment_preimages_with_info` was added (and is always written) in LDK 0.1, so // we can use it to determine if this monitor was last written by LDK 0.1 or later. @@ -6843,6 +7015,8 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP alternative_funding_confirmed, written_by_0_1_or_later, + #[cfg(feature="safe_channels")] + channel_state, }); if counterparty_node_id.is_none() { diff --git a/lightning/src/ln/chan_utils.rs b/lightning/src/ln/chan_utils.rs index 431fdd2859c..3a39145d320 100644 --- a/lightning/src/ln/chan_utils.rs +++ b/lightning/src/ln/chan_utils.rs @@ -400,8 +400,7 @@ pub fn build_closing_transaction(to_holder_value_sat: Amount, to_counterparty_va /// /// Allows us to keep track of all of the revocation secrets of our counterparty in just 50*32 bytes /// or so. -#[derive(Clone)] -#[cfg_attr(test, derive(Debug))] +#[derive(Clone, Debug)] pub struct CounterpartyCommitmentSecrets { old_secrets: [([u8; 32], u64); 49], } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 128091ccdd5..c3b643a596b 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -30,6 +30,8 @@ use crate::blinded_path::message::BlindedMessagePath; use crate::chain::chaininterface::{ fee_for_weight, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator, }; +#[cfg(feature = "safe_channels")] +use crate::chain::channelmonitor::UpdateChannelState; use crate::chain::channelmonitor::{ ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, CommitmentHTLCData, LATENCY_GRACE_PERIOD_BLOCKS, @@ -123,7 +125,7 @@ pub struct AvailableBalances { pub next_outbound_htlc_minimum_msat: u64, } -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] enum FeeUpdateState { // Inbound states mirroring InboundHTLCState RemoteAnnounced, @@ -138,16 +140,33 @@ enum FeeUpdateState { Outbound, } -#[derive(Debug)] +impl_writeable_tlv_based_enum!(FeeUpdateState, + (0, RemoteAnnounced) => {}, + (1, AwaitingRemoteRevokeToAnnounce) => {}, + (2, Outbound) => {}, +); + +#[derive(Debug, Clone, PartialEq, Eq)] enum InboundHTLCRemovalReason { FailRelay(msgs::OnionErrorPacket), FailMalformed { sha256_of_onion: [u8; 32], failure_code: u16 }, Fulfill { preimage: PaymentPreimage, attribution_data: Option }, } +impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason, + (1, FailMalformed) => { + (0, sha256_of_onion, required), + (1, failure_code, required), + }, + (2, Fulfill) => { + (0, preimage, required), + (1, attribution_data, required), + }, + {0, FailRelay} => (), +); + /// Represents the resolution status of an inbound HTLC. -#[cfg_attr(test, derive(Debug))] -#[derive(Clone)] +#[derive(Clone, Debug, PartialEq, Eq)] enum InboundHTLCResolution { /// Resolved implies the action we must take with the inbound HTLC has already been determined, /// i.e., we already know whether it must be failed back or forwarded. @@ -170,7 +189,7 @@ impl_writeable_tlv_based_enum!(InboundHTLCResolution, }, ); -#[cfg_attr(test, derive(Debug))] +#[derive(Clone, Debug, PartialEq, Eq)] enum InboundHTLCState { /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an /// update_add_htlc message for this HTLC. @@ -232,6 +251,16 @@ enum InboundHTLCState { LocalRemoved(InboundHTLCRemovalReason), } +impl_writeable_tlv_based_enum!(InboundHTLCState, + (3, Committed) => { + (0, update_add_htlc_opt, required), + }, // Strangely this one needs to come first?!? + {0, RemoteAnnounced} => (), + {1, AwaitingRemoteRevokeToAnnounce} => (), + {2, AwaitingAnnouncedRemoteRevoke} => (), + {4, LocalRemoved} => (), +); + impl From<&InboundHTLCState> for Option { fn from(state: &InboundHTLCState) -> Option { match state { @@ -308,7 +337,7 @@ impl InboundHTLCState { } } -#[cfg_attr(test, derive(Debug))] +#[derive(Clone, Debug, PartialEq, Eq)] struct InboundHTLCOutput { htlc_id: u64, amount_msat: u64, @@ -317,8 +346,15 @@ struct InboundHTLCOutput { state: InboundHTLCState, } -#[derive(Debug)] -#[cfg_attr(test, derive(Clone, PartialEq))] +impl_writeable_tlv_based!(InboundHTLCOutput, { + (0, htlc_id, required), + (1, amount_msat, required), + (2, cltv_expiry, required), + (3, payment_hash, required), + (4, state, required), +}); + +#[derive(Debug, Clone, PartialEq, Eq)] enum OutboundHTLCState { /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we /// created it we would have put it in the holding cell instead). When they next revoke_and_ack @@ -351,6 +387,14 @@ enum OutboundHTLCState { AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome), } +impl_writeable_tlv_based_enum!(OutboundHTLCState, + (3, Committed) => {}, // Strangely this one needs to come first?!? + {0, LocalAnnounced} => (), + {1, RemoteRemoved} => (), + {2, AwaitingRemoteRevokeToRemove} => (), + {4, AwaitingRemovedRemoteRevoke} => (), +); + impl From<&OutboundHTLCState> for OutboundHTLCStateDetails { fn from(state: &OutboundHTLCState) -> OutboundHTLCStateDetails { match state { @@ -417,8 +461,7 @@ impl OutboundHTLCState { } } -#[derive(Clone, Debug)] -#[cfg_attr(test, derive(PartialEq))] +#[derive(Clone, Debug, PartialEq, Eq)] enum OutboundHTLCOutcome { /// We started always filling in the preimages here in 0.0.105, and the requirement /// that the preimages always be filled in was added in 0.2. @@ -429,6 +472,14 @@ enum OutboundHTLCOutcome { Failure(HTLCFailReason), } +impl_writeable_tlv_based_enum!(OutboundHTLCOutcome, + (0, Success) => { + (0, preimage, required), + (1, attribution_data, required), + }, + {1, Failure} => (), +); + impl<'a> Into> for &'a OutboundHTLCOutcome { fn into(self) -> Option<&'a HTLCFailReason> { match self { @@ -438,8 +489,7 @@ impl<'a> Into> for &'a OutboundHTLCOutcome { } } -#[derive(Debug)] -#[cfg_attr(test, derive(Clone, PartialEq))] +#[derive(Debug, Clone, PartialEq, Eq)] struct OutboundHTLCOutput { htlc_id: u64, amount_msat: u64, @@ -453,9 +503,21 @@ struct OutboundHTLCOutput { hold_htlc: Option<()>, } +impl_writeable_tlv_based!(OutboundHTLCOutput, { + (0, htlc_id, required), + (1, amount_msat, required), + (2, cltv_expiry, required), + (3, payment_hash, required), + (4, state, required), + (5, source, required), + (6, blinding_point, required), + (7, skimmed_fee_msat, required), + (8, send_timestamp, required), + (9, hold_htlc, required), +}); + /// See AwaitingRemoteRevoke ChannelState for more info -#[derive(Debug)] -#[cfg_attr(test, derive(Clone, PartialEq))] +#[derive(Debug, Clone, PartialEq, Eq)] enum HTLCUpdateAwaitingACK { AddHTLC { // TODO: Time out if we're getting close to cltv_expiry @@ -486,6 +548,33 @@ enum HTLCUpdateAwaitingACK { }, } +impl_writeable_tlv_based_enum!(HTLCUpdateAwaitingACK, + (0, AddHTLC) => { + (0, amount_msat, required), + (1, cltv_expiry, required), + (2, payment_hash, required), + (3, source, required), + (4, onion_routing_packet, required), + (5, skimmed_fee_msat, required), + (6, blinding_point, required), + (7, hold_htlc, required), + }, + (1, ClaimHTLC) => { + (0, payment_preimage, required), + (1, attribution_data, required), + (2, htlc_id, required), + }, + (2, FailHTLC) => { + (0, htlc_id, required), + (1, err_packet, required), + }, + (3, FailMalformedHTLC) => { + (0, htlc_id, required), + (1, failure_code, required), + (2, sha256_of_onion, required), + } +); + macro_rules! define_state_flags { ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr, $get: ident, $set: ident, $clear: ident)),*], $extra_flags: expr) => { #[doc = $flag_type_doc] @@ -725,6 +814,19 @@ enum ChannelState { ShutdownComplete, } +impl Writeable for ChannelState { + fn write(&self, w: &mut W) -> Result<(), io::Error> { + self.to_u32().write(w) + } +} + +impl Readable for ChannelState { + fn read(r: &mut R) -> Result { + let state_u32 = u32::read(r)?; + ChannelState::from_u32(state_u32).map_err(|_| DecodeError::InvalidValue) + } +} + macro_rules! impl_state_flag { ($get: ident, $set: ident, $clear: ident, [$($state: ident),+]) => { #[allow(unused)] @@ -1038,7 +1140,7 @@ macro_rules! secp_check { /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to /// our channel_update message and track the current state here. /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`]. -#[derive(Clone, Copy, PartialEq, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] pub(super) enum ChannelUpdateStatus { /// We've announced the channel as enabled and are connected to our peer. Enabled, @@ -1051,8 +1153,7 @@ pub(super) enum ChannelUpdateStatus { } /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here. -#[cfg_attr(test, derive(Debug))] -#[derive(PartialEq)] +#[derive(PartialEq, Clone, Debug, Eq)] pub enum AnnouncementSigsState { /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since /// we sent the last `AnnouncementSignatures`. @@ -1232,7 +1333,7 @@ pub(crate) struct DisconnectResult { /// Tracks the transaction number, along with current and next commitment points. /// This consolidates the logic to advance our commitment number and request new /// commitment points from our signer. -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] struct HolderCommitmentPoint { next_transaction_number: u64, current_point: Option, @@ -1247,6 +1348,15 @@ struct HolderCommitmentPoint { last_revoked_point: Option, } +impl_writeable_tlv_based!(HolderCommitmentPoint, { + (0, next_transaction_number, required), + (1, current_point, required), + (2, next_point, required), + (3, pending_next_point, required), + (4, previous_revoked_point, required), + (5, last_revoked_point, required), +}); + impl HolderCommitmentPoint { #[rustfmt::skip] pub fn new(signer: &ChannelSignerType, secp_ctx: &Secp256k1) -> Option @@ -1440,7 +1550,7 @@ pub(crate) const CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY: u32 = 14 * 24 * 6 * 4; #[cfg(test)] pub(crate) const CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY: u32 = 144; -#[derive(Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] struct PendingChannelMonitorUpdate { update: ChannelMonitorUpdate, } @@ -2391,6 +2501,53 @@ pub(super) struct FundingScope { minimum_depth_override: Option, } +impl Eq for FundingScope {} + +impl PartialEq for FundingScope { + fn eq(&self, other: &Self) -> bool { + self.value_to_self_msat == other.value_to_self_msat + && self.counterparty_selected_channel_reserve_satoshis + == other.counterparty_selected_channel_reserve_satoshis + && self.holder_selected_channel_reserve_satoshis + == other.holder_selected_channel_reserve_satoshis + && self.channel_transaction_parameters == other.channel_transaction_parameters + && self.funding_transaction == other.funding_transaction + && self.funding_tx_confirmed_in == other.funding_tx_confirmed_in + && self.funding_tx_confirmation_height == other.funding_tx_confirmation_height + && self.short_channel_id == other.short_channel_id + && self.minimum_depth_override == other.minimum_depth_override + } +} + +impl Clone for FundingScope { + fn clone(&self) -> Self { + FundingScope { + value_to_self_msat: self.value_to_self_msat, + counterparty_selected_channel_reserve_satoshis: self + .counterparty_selected_channel_reserve_satoshis, + holder_selected_channel_reserve_satoshis: self.holder_selected_channel_reserve_satoshis, + #[cfg(debug_assertions)] + holder_max_commitment_tx_output: Mutex::new( + *self.holder_max_commitment_tx_output.lock().unwrap(), + ), + #[cfg(debug_assertions)] + counterparty_max_commitment_tx_output: Mutex::new( + *self.counterparty_max_commitment_tx_output.lock().unwrap(), + ), + #[cfg(any(test, fuzzing))] + next_local_fee: Mutex::new(*self.next_local_fee.lock().unwrap()), + #[cfg(any(test, fuzzing))] + next_remote_fee: Mutex::new(*self.next_remote_fee.lock().unwrap()), + channel_transaction_parameters: self.channel_transaction_parameters.clone(), + funding_transaction: self.funding_transaction.clone(), + funding_tx_confirmed_in: self.funding_tx_confirmed_in, + funding_tx_confirmation_height: self.funding_tx_confirmation_height, + short_channel_id: self.short_channel_id, + minimum_depth_override: self.minimum_depth_override, + } + } +} + impl Writeable for FundingScope { fn write(&self, writer: &mut W) -> Result<(), io::Error> { write_tlv_fields!(writer, { @@ -2677,7 +2834,7 @@ impl FundingScope { /// Information about pending attempts at funding a channel. This includes funding currently under /// negotiation and any negotiated attempts waiting enough on-chain confirmations. More than one /// such attempt indicates use of RBF to increase the chances of confirmation. -#[derive(Debug)] +#[derive(Debug, Clone, Eq, PartialEq)] struct PendingFunding { funding_negotiation: Option, @@ -2699,7 +2856,7 @@ impl_writeable_tlv_based!(PendingFunding, { (7, received_funding_txid, option), }); -#[derive(Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] enum FundingNegotiation { AwaitingAck { context: FundingNegotiationContext, @@ -2779,7 +2936,7 @@ impl PendingFunding { } } -#[derive(Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct SpliceInstructions { adjusted_funding_contribution: SignedAmount, our_funding_inputs: Vec, @@ -2807,7 +2964,7 @@ impl_writeable_tlv_based!(SpliceInstructions, { (11, locktime, required), }); -#[derive(Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] pub(crate) enum QuiescentAction { Splice(SpliceInstructions), #[cfg(any(test, fuzzing))] @@ -6116,6 +6273,8 @@ where should_broadcast: broadcast, }], channel_id: Some(self.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: Some(UpdateChannelState::Closed), }; Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), update)) } else { @@ -6624,7 +6783,7 @@ fn check_v2_funding_inputs_sufficient( } /// Context for negotiating channels (dual-funded V2 open, splicing) -#[derive(Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] pub(super) struct FundingNegotiationContext { /// Whether we initiated the funding negotiation. pub is_initiator: bool, @@ -6783,6 +6942,313 @@ where quiescent_action: Option, } +#[cfg(feature = "safe_channels")] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct FundedChannelState { + funding: FundingScope, + config: LegacyChannelConfig, + user_id: u128, + channel_id: ChannelId, + temporary_channel_id: Option, + channel_state: ChannelState, + announcement_sigs_state_sent: bool, + pub(crate) latest_monitor_update_id: u64, + shutdown_scriptpubkey: Option, + destination_script: ScriptBuf, + counterparty_next_commitment_transaction_number: u64, + pending_inbound_htlcs: Vec, + pending_outbound_htlcs: Vec, + holding_cell_htlc_updates: Vec, + resend_order: RAACommitmentOrder, + monitor_pending_channel_ready: bool, + monitor_pending_revoke_and_ack: bool, + monitor_pending_commitment_signed: bool, + monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>, + monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + monitor_pending_finalized_fulfills: Vec<(HTLCSource, Option)>, + monitor_pending_update_adds: Vec, + pending_update_fee: Option<(u32, FeeUpdateState)>, + holding_cell_update_fee: Option, + next_holder_htlc_id: u64, + next_counterparty_htlc_id: u64, + feerate_per_kw: u32, + update_time_counter: u32, + target_closing_feerate_sats_per_kw: Option, + channel_creation_height: u32, + counterparty_dust_limit_satoshis: u64, + holder_dust_limit_satoshis: u64, + counterparty_max_htlc_value_in_flight_msat: u64, + holder_max_htlc_value_in_flight_msat: u64, + counterparty_htlc_minimum_msat: u64, + holder_htlc_minimum_msat: u64, + counterparty_max_accepted_htlcs: u16, + holder_max_accepted_htlcs: u16, + minimum_depth: Option, + counterparty_forwarding_info: Option, + is_manual_broadcast: bool, + is_batch_funding: Option<()>, + counterparty_next_commitment_point: Option, + counterparty_current_commitment_point: Option, + counterparty_node_id: PublicKey, + counterparty_shutdown_scriptpubkey: Option, + commitment_secrets: CounterpartyCommitmentSecrets, + channel_update_status_enabled: bool, + announcement_sigs: Option<(Signature, Signature)>, + latest_inbound_scid_alias: Option, + outbound_scid_alias: u64, + historical_scids: Vec, + channel_pending_event_emitted: bool, + funding_tx_broadcast_safe_event_emitted: bool, + initial_channel_ready_event_emitted: bool, + local_initiated_shutdown: Option<()>, + channel_keys_id: [u8; 32], + blocked_monitor_updates: Vec>, + interactive_tx_signing_session: Option, + holder_commitment_point: HolderCommitmentPoint, + pending_splice: Option, + quiescent_action: Option, +} + +#[cfg(feature = "safe_channels")] +impl FundedChannelState { + pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 { + self.holder_commitment_point.current_transaction_number() + } + + pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 { + self.counterparty_next_commitment_transaction_number + 1 + - if self.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 } + } + + pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 { + let ret = self.counterparty_next_commitment_transaction_number + 2; + debug_assert_eq!(self.commitment_secrets.get_min_seen_secret(), ret); + ret + } +} + +#[cfg(feature = "safe_channels")] +impl From<&mut FundedChannel> for FundedChannelState +where + SP::Target: SignerProvider, +{ + fn from(channel: &mut FundedChannel) -> Self { + // Replicate pre-serialization channel state change from original write method. + let mut channel_state = channel.context.channel_state; + match channel_state { + ChannelState::AwaitingChannelReady(_) => {}, + ChannelState::ChannelReady(_) => { + if channel.quiescent_action.is_some() { + // If we're trying to get quiescent to do something, try again when we + // reconnect to the peer. + channel_state.set_awaiting_quiescence(); + } + channel_state.clear_local_stfu_sent(); + channel_state.clear_remote_stfu_sent(); + if channel.should_reset_pending_splice_state(false) + || !channel.has_pending_splice_awaiting_signatures() + { + // We shouldn't be quiescent anymore upon reconnecting if: + // - We were in quiescence but a splice/RBF was never negotiated or + // - We were in quiescence but the splice negotiation failed due to + // disconnecting + channel_state.clear_quiescent(); + } + }, + ChannelState::FundingNegotiated(_) + if channel.context.interactive_tx_signing_session.is_some() => {}, + _ => debug_assert!(false, "Pre-funded/shutdown channels should not be written"), + } + channel_state.set_peer_disconnected(); + + // Replicate pre-serialization pending splice state change from original write method. Serialize the result + // already now because PendingFunding is difficult to clone. + // + // We don't have to worry about resetting the pending `FundingNegotiation` because we can only read + // `FundingNegotiation::AwaitingSignatures` variants anyway. + let pending_splice = channel + .pending_splice + .as_ref() + .filter(|_| !channel.should_reset_pending_splice_state(false)) + .cloned(); + + // Prevent recursive serialization compiler errors by storing the serialized updates. + let serialized_blocked_monitor_updates = + channel.context.blocked_monitor_updates.iter().map(|update| update.encode()).collect(); + + // We only care about writing out the current state as if we had just disconnected, at + // which point we always set anything but AnnouncementSigsReceived to NotSent. + let announcement_sigs_state_sent = match channel.context.announcement_sigs_state { + AnnouncementSigsState::NotSent => false, + AnnouncementSigsState::MessageSent => false, + AnnouncementSigsState::Committed => false, + AnnouncementSigsState::PeerReceived => true, + }; + + // We only care about writing out the current state as it was announced, ie only either Enabled or Disabled. In + // the case of DisabledStaged, we most recently announced the channel as enabled. + let channel_update_status_enabled = match channel.context.channel_update_status { + ChannelUpdateStatus::Enabled => true, + ChannelUpdateStatus::DisabledStaged(_) => true, + ChannelUpdateStatus::EnabledStaged(_) => false, + ChannelUpdateStatus::Disabled => false, + }; + + // Mirror existing [`LegacyChannelConfig`] behavior by resetting this flag. + let mut config = channel.context.config; + config.options.accept_underpaying_htlcs = false; + + FundedChannelState { + funding: channel.funding.clone(), + user_id: channel.context.user_id, + channel_id: channel.context.channel_id, + channel_state, + latest_monitor_update_id: channel.context.latest_monitor_update_id, + shutdown_scriptpubkey: channel.context.shutdown_scriptpubkey.clone(), + destination_script: channel.context.destination_script.clone(), + counterparty_next_commitment_transaction_number: channel + .context + .counterparty_next_commitment_transaction_number, + interactive_tx_signing_session: channel.context.interactive_tx_signing_session.clone(), + pending_inbound_htlcs: channel.context.pending_inbound_htlcs.clone(), + config, + temporary_channel_id: channel.context.temporary_channel_id, + announcement_sigs_state_sent, + pending_outbound_htlcs: channel.context.pending_outbound_htlcs.clone(), + holding_cell_htlc_updates: channel.context.holding_cell_htlc_updates.clone(), + resend_order: channel.context.resend_order.clone(), + monitor_pending_channel_ready: channel.context.monitor_pending_channel_ready, + monitor_pending_revoke_and_ack: channel.context.monitor_pending_revoke_and_ack, + monitor_pending_commitment_signed: channel.context.monitor_pending_commitment_signed, + monitor_pending_forwards: channel.context.monitor_pending_forwards.clone(), + monitor_pending_failures: channel.context.monitor_pending_failures.clone(), + monitor_pending_finalized_fulfills: channel + .context + .monitor_pending_finalized_fulfills + .clone(), + monitor_pending_update_adds: channel.context.monitor_pending_update_adds.clone(), + pending_update_fee: channel.context.pending_update_fee, + holding_cell_update_fee: channel.context.holding_cell_update_fee, + next_holder_htlc_id: channel.context.next_holder_htlc_id, + next_counterparty_htlc_id: channel.context.next_counterparty_htlc_id, + feerate_per_kw: channel.context.feerate_per_kw, + update_time_counter: channel.context.update_time_counter, + target_closing_feerate_sats_per_kw: channel.context.target_closing_feerate_sats_per_kw, + channel_creation_height: channel.context.channel_creation_height, + counterparty_dust_limit_satoshis: channel.context.counterparty_dust_limit_satoshis, + holder_dust_limit_satoshis: channel.context.holder_dust_limit_satoshis, + counterparty_max_htlc_value_in_flight_msat: channel + .context + .counterparty_max_htlc_value_in_flight_msat, + holder_max_htlc_value_in_flight_msat: channel + .context + .holder_max_htlc_value_in_flight_msat, + counterparty_htlc_minimum_msat: channel.context.counterparty_htlc_minimum_msat, + holder_htlc_minimum_msat: channel.context.holder_htlc_minimum_msat, + counterparty_max_accepted_htlcs: channel.context.counterparty_max_accepted_htlcs, + holder_max_accepted_htlcs: channel.context.holder_max_accepted_htlcs, + minimum_depth: channel.context.minimum_depth, + counterparty_forwarding_info: channel.context.counterparty_forwarding_info.clone(), + is_manual_broadcast: channel.context.is_manual_broadcast, + is_batch_funding: channel.context.is_batch_funding, + counterparty_next_commitment_point: channel.context.counterparty_next_commitment_point, + counterparty_current_commitment_point: channel + .context + .counterparty_current_commitment_point, + counterparty_node_id: channel.context.counterparty_node_id, + counterparty_shutdown_scriptpubkey: channel + .context + .counterparty_shutdown_scriptpubkey + .clone(), + commitment_secrets: channel.context.commitment_secrets.clone(), + channel_update_status_enabled, + announcement_sigs: channel.context.announcement_sigs, + latest_inbound_scid_alias: channel.context.latest_inbound_scid_alias, + outbound_scid_alias: channel.context.outbound_scid_alias, + historical_scids: channel.context.historical_scids.clone(), + channel_pending_event_emitted: channel.context.channel_pending_event_emitted, + funding_tx_broadcast_safe_event_emitted: channel + .context + .funding_tx_broadcast_safe_event_emitted, + initial_channel_ready_event_emitted: channel + .context + .initial_channel_ready_event_emitted, + local_initiated_shutdown: channel.context.local_initiated_shutdown, + channel_keys_id: channel.context.channel_keys_id, + blocked_monitor_updates: serialized_blocked_monitor_updates, + holder_commitment_point: channel.holder_commitment_point, + pending_splice, + quiescent_action: channel.quiescent_action.clone(), + } + } +} + +#[cfg(feature = "safe_channels")] +impl_writeable_tlv_based!(FundedChannelState, { + (0, funding, required), + (1, config, required), + (2, user_id, required), + (3, channel_id, required), + (4, temporary_channel_id, required), + (5, channel_state, required), + (6, announcement_sigs_state_sent, required), + (7, latest_monitor_update_id, required), + (8, shutdown_scriptpubkey, required), + (9, destination_script, required), + (10, counterparty_next_commitment_transaction_number, required), + (11, pending_inbound_htlcs, required_vec), + (12, pending_outbound_htlcs, required_vec), + (13, holding_cell_htlc_updates, required_vec), + (14, resend_order, required), + (15, monitor_pending_channel_ready, required), + (16, monitor_pending_revoke_and_ack, required), + (17, monitor_pending_commitment_signed, required), + (18, monitor_pending_forwards, required), + (19, monitor_pending_failures, required_vec), + (20, monitor_pending_finalized_fulfills, required), + (21, monitor_pending_update_adds, required), + (22, pending_update_fee, required), + (23, holding_cell_update_fee, required), + (24, next_holder_htlc_id, required), + (25, next_counterparty_htlc_id, required), + (26, feerate_per_kw, required), + (27, update_time_counter, required), + (28, target_closing_feerate_sats_per_kw, required), + (29, channel_creation_height, required), + (30, counterparty_dust_limit_satoshis, required), + (31, holder_dust_limit_satoshis, required), + (32, counterparty_max_htlc_value_in_flight_msat, required), + (33, holder_max_htlc_value_in_flight_msat, required), + (34, counterparty_htlc_minimum_msat, required), + (35, holder_htlc_minimum_msat, required), + (36, counterparty_max_accepted_htlcs, required), + (37, holder_max_accepted_htlcs, required), + (38, minimum_depth, required), + (39, counterparty_forwarding_info, required), + (40, is_manual_broadcast, required), + (41, is_batch_funding, required), + (42, counterparty_next_commitment_point, required), + (43, counterparty_current_commitment_point, required), + (44, counterparty_node_id, required), + (45, counterparty_shutdown_scriptpubkey, required), + (46, commitment_secrets, required), + (47, channel_update_status_enabled, required), + (48, announcement_sigs, required), + (49, latest_inbound_scid_alias, required), + (50, outbound_scid_alias, required), + (51, historical_scids, required), + (52, channel_pending_event_emitted, required), + (53, funding_tx_broadcast_safe_event_emitted, required), + (54, initial_channel_ready_event_emitted, required), + (55, local_initiated_shutdown, required), + (56, channel_keys_id, required), + (57, blocked_monitor_updates, required_vec), + (58, interactive_tx_signing_session, required), + (59, holder_commitment_point, required), + (60, pending_splice, required), + (61, quiescent_action, upgradable_option), +}); + #[cfg(any(test, fuzzing))] #[derive(Clone, Copy, Default, Debug)] struct PredictedNextFee { @@ -7365,6 +7831,8 @@ where payment_info, }], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: Some(UpdateChannelState::Funded(self.into())), }; if !self.context.channel_state.can_generate_new_commitment() { @@ -7478,6 +7946,11 @@ where // to be strictly increasing by one, so decrement it here. self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); + #[cfg(feature = "safe_channels")] + { + monitor_update.channel_state = + Some(UpdateChannelState::Funded(self.into())); + } } else { let blocked_upd = self.context.blocked_monitor_updates.get(0); let new_mon_id = blocked_upd @@ -8001,7 +8474,8 @@ where ); self.context.latest_monitor_update_id += 1; - let monitor_update = ChannelMonitorUpdate { + #[allow(unused_mut)] + let mut monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id, updates: vec![ChannelMonitorUpdateStep::RenegotiatedFunding { channel_parameters: pending_splice_funding.channel_transaction_parameters.clone(), @@ -8009,6 +8483,8 @@ where counterparty_commitment_tx, }], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: None, }; self.context @@ -8018,6 +8494,10 @@ where .received_commitment_signed(); self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + #[cfg(feature = "safe_channels")] + { + monitor_update.channel_state = Some(UpdateChannelState::Funded(self.into())); + } Ok(self.push_ret_blockable_mon_update(monitor_update)) } @@ -8277,6 +8757,8 @@ where update_id: self.context.latest_monitor_update_id, updates: vec![update], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: None, }; self.context.expecting_peer_commitment_signed = false; @@ -8301,6 +8783,10 @@ where } log_debug!(logger, "Received valid commitment_signed from peer, updated HTLC state but awaiting a monitor update resolution to reply.", ); + #[cfg(feature = "safe_channels")] + { + monitor_update.channel_state = Some(UpdateChannelState::Funded(self.into())); + } return Ok(self.push_ret_blockable_mon_update(monitor_update)); } @@ -8329,6 +8815,10 @@ where Vec::new(), Vec::new(), ); + #[cfg(feature = "safe_channels")] + { + monitor_update.channel_state = Some(UpdateChannelState::Funded(self.into())); + } return Ok(self.push_ret_blockable_mon_update(monitor_update)); } @@ -8381,6 +8871,8 @@ where update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet! updates: Vec::new(), channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: None, }; let mut htlc_updates = Vec::new(); @@ -8476,6 +8968,12 @@ where unreachable!() }; update_fulfill_count += 1; + + #[cfg(feature = "safe_channels")] + { + additional_monitor_update.channel_state = + Some(UpdateChannelState::Funded(self.into())); + } monitor_update.updates.append(&mut additional_monitor_update.updates); None }, @@ -8534,6 +9032,11 @@ where update_add_count, update_fulfill_count, update_fail_count); self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); + + #[cfg(feature = "safe_channels")] + { + monitor_update.channel_state = Some(UpdateChannelState::Funded(self.into())); + } (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail) } else { (None, Vec::new()) @@ -8650,6 +9153,8 @@ where secret: msg.per_commitment_secret, }], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: None, }; // Update state now that we've passed all the can-fail calls... @@ -8881,6 +9386,10 @@ where }; macro_rules! return_with_htlcs_to_fail { ($htlcs_to_fail: expr) => { + #[cfg(feature = "safe_channels")] + { + monitor_update.channel_state = Some(UpdateChannelState::Funded(self.into())); + } if !release_monitor { self.context .blocked_monitor_updates @@ -10534,6 +11043,8 @@ where scriptpubkey: self.get_closing_scriptpubkey(), }], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: Some(UpdateChannelState::Funded(self.into())), }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); self.push_ret_blockable_mon_update(monitor_update) @@ -10988,10 +11499,13 @@ where if self.context.blocked_monitor_updates.is_empty() { return None; } - Some(( - self.context.blocked_monitor_updates.remove(0).update, - !self.context.blocked_monitor_updates.is_empty(), - )) + #[allow(unused_mut)] + let mut update = self.context.blocked_monitor_updates.remove(0).update; + #[cfg(feature = "safe_channels")] + { + update.channel_state = Some(UpdateChannelState::Funded(self.into())); + } + Some((update, !self.context.blocked_monitor_updates.is_empty())) } /// Pushes a new monitor update into our monitor update queue, returning it if it should be @@ -11291,6 +11805,8 @@ where funding_txid: funding_txo.txid, }], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: Some(UpdateChannelState::Funded(self.into())), }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); let monitor_update = self.push_ret_blockable_mon_update(monitor_update); @@ -12882,12 +13398,14 @@ where } self.context.latest_monitor_update_id += 1; + self.context.channel_state.set_awaiting_remote_revoke(); let monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id, updates: vec![update], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: Some(UpdateChannelState::Funded(self.into())), }; - self.context.channel_state.set_awaiting_remote_revoke(); monitor_update } @@ -13132,6 +13650,8 @@ where scriptpubkey: self.get_closing_scriptpubkey(), }], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: Some(UpdateChannelState::Funded(self.into())), }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); self.push_ret_blockable_mon_update(monitor_update) diff --git a/lightning/src/ln/channel_state.rs b/lightning/src/ln/channel_state.rs index d10327b259a..dc892992cfe 100644 --- a/lightning/src/ln/channel_state.rs +++ b/lightning/src/ln/channel_state.rs @@ -213,7 +213,7 @@ impl_writeable_tlv_based!(OutboundHTLCDetails, { }); /// Information needed for constructing an invoice route hint for this channel. -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct CounterpartyForwardingInfo { /// Base routing fee in millisatoshis. pub fee_base_msat: u32, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f2e8fa70e4f..fb60445e412 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -41,6 +41,8 @@ use crate::chain; use crate::chain::chaininterface::{ BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator, }; +#[cfg(feature = "safe_channels")] +use crate::chain::channelmonitor::UpdateChannelState; use crate::chain::channelmonitor::{ Balance, ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, MonitorEvent, WithChannelMonitor, ANTI_REORG_DELAY, CLTV_CLAIM_BUFFER, HTLC_FAIL_BACK_BUFFER, @@ -207,8 +209,7 @@ use crate::ln::script::ShutdownScript; // our payment, which we can use to decode errors or inform the user that the payment was sent. /// Information about where a received HTLC('s onion) has indicated the HTLC should go. -#[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug -#[cfg_attr(test, derive(Debug, PartialEq))] +#[derive(Clone, Debug, PartialEq, Eq)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug pub enum PendingHTLCRouting { /// An HTLC which should be forwarded on to another node. Forward { @@ -386,8 +387,7 @@ impl PendingHTLCRouting { /// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it /// should go next. -#[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug -#[cfg_attr(test, derive(Debug, PartialEq))] +#[derive(Clone, Debug, PartialEq, Eq)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug pub struct PendingHTLCInfo { /// Further routing details based on whether the HTLC is being forwarded or received. pub routing: PendingHTLCRouting, @@ -429,15 +429,14 @@ pub struct PendingHTLCInfo { pub skimmed_fee_msat: Option, } -#[derive(Clone, Debug)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug +#[derive(Clone, Debug, PartialEq, Eq)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug pub(super) enum HTLCFailureMsg { Relay(msgs::UpdateFailHTLC), Malformed(msgs::UpdateFailMalformedHTLC), } /// Stores whether we can't forward an HTLC or relevant forwarding info -#[cfg_attr(test, derive(Debug))] -#[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug +#[derive(Clone, Debug, PartialEq, Eq)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug pub(super) enum PendingHTLCStatus { Forward(PendingHTLCInfo), Fail(HTLCFailureMsg), @@ -1009,7 +1008,7 @@ impl MsgHandleErrInternal { /// be sent in the order they appear in the return value, however sometimes the order needs to be /// variable at runtime (eg FundedChannel::channel_reestablish needs to re-send messages in the order /// they were originally sent). In those cases, this enum is also returned. -#[derive(Clone, PartialEq, Debug)] +#[derive(Clone, PartialEq, Eq, Debug)] pub(super) enum RAACommitmentOrder { /// Send the CommitmentUpdate messages first CommitmentFirst, @@ -1017,6 +1016,11 @@ pub(super) enum RAACommitmentOrder { RevokeAndACKFirst, } +impl_writeable_tlv_based_enum!(RAACommitmentOrder, + (0, CommitmentFirst) => {}, + (1, RevokeAndACKFirst) => {}, +); + /// Similar to scenarios used by [`RAACommitmentOrder`], this determines whether a `channel_ready` /// message should be sent first (i.e., prior to a `commitment_update`) or after the initial /// `commitment_update` and `tx_signatures` for channel funding. @@ -3370,42 +3374,10 @@ macro_rules! handle_monitor_update_completion { }}; } -/// Returns whether the monitor update is completed, `false` if the update is in-progress. -fn handle_monitor_update_res( - cm: &CM, update_res: ChannelMonitorUpdateStatus, logger: LG, -) -> bool { - debug_assert!(cm.get_cm().background_events_processed_since_startup.load(Ordering::Acquire)); - match update_res { - ChannelMonitorUpdateStatus::UnrecoverableError => { - let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; - log_error!(logger, "{}", err_str); - panic!("{}", err_str); - }, - ChannelMonitorUpdateStatus::InProgress => { - #[cfg(not(any(test, feature = "_externalize_tests")))] - if cm.get_cm().monitor_update_type.swap(1, Ordering::Relaxed) == 2 { - panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart"); - } - log_debug!( - logger, - "ChannelMonitor update in flight, holding messages until the update completes.", - ); - false - }, - ChannelMonitorUpdateStatus::Completed => { - #[cfg(not(any(test, feature = "_externalize_tests")))] - if cm.get_cm().monitor_update_type.swap(2, Ordering::Relaxed) == 1 { - panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart"); - } - true - }, - } -} - macro_rules! handle_initial_monitor { ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); - let update_completed = handle_monitor_update_res($self, $update_res, logger); + let update_completed = $self.handle_monitor_update_res($update_res, logger); if update_completed { handle_monitor_update_completion!( $self, @@ -3418,69 +3390,17 @@ macro_rules! handle_initial_monitor { }; } -fn handle_new_monitor_update_internal( - cm: &CM, - in_flight_monitor_updates: &mut BTreeMap)>, - channel_id: ChannelId, funding_txo: OutPoint, counterparty_node_id: PublicKey, - new_update: ChannelMonitorUpdate, logger: LG, -) -> (bool, bool) { - let in_flight_updates = &mut in_flight_monitor_updates - .entry(channel_id) - .or_insert_with(|| (funding_txo, Vec::new())) - .1; - // During startup, we push monitor updates as background events through to here in - // order to replay updates that were in-flight when we shut down. Thus, we have to - // filter for uniqueness here. - let update_idx = - in_flight_updates.iter().position(|upd| upd == &new_update).unwrap_or_else(|| { - in_flight_updates.push(new_update); - in_flight_updates.len() - 1 - }); - - if cm.get_cm().background_events_processed_since_startup.load(Ordering::Acquire) { - let update_res = - cm.get_cm().chain_monitor.update_channel(channel_id, &in_flight_updates[update_idx]); - let update_completed = handle_monitor_update_res(cm, update_res, logger); - if update_completed { - let _ = in_flight_updates.remove(update_idx); - } - (update_completed, update_completed && in_flight_updates.is_empty()) - } else { - // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we - // fail to persist it. This is a fairly safe assumption, however, since anything we do - // during the startup sequence should be replayed exactly if we immediately crash. - let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, - funding_txo, - channel_id, - update: in_flight_updates[update_idx].clone(), - }; - // We want to track the in-flight update both in `in_flight_monitor_updates` and in - // `pending_background_events` to avoid a race condition during - // `pending_background_events` processing where we complete one - // `ChannelMonitorUpdate` (but there are more pending as background events) but we - // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to - // run post-completion actions. - // We could work around that with some effort, but its simpler to just track updates - // twice. - cm.get_cm().pending_background_events.lock().unwrap().push(event); - (false, false) - } -} - macro_rules! handle_post_close_monitor_update { ( $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $counterparty_node_id: expr, $channel_id: expr ) => {{ - let (update_completed, all_updates_complete) = handle_new_monitor_update_internal( - $self, + let (update_completed, all_updates_complete) = $self.handle_new_monitor_update_internal( &mut $peer_state.in_flight_monitor_updates, $channel_id, $funding_txo, $counterparty_node_id, $update, - WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None), ); if all_updates_complete { let update_actions = $peer_state @@ -3510,14 +3430,12 @@ macro_rules! handle_new_monitor_update_locked_actions_handled_by_caller { ( $self: ident, $funding_txo: expr, $update: expr, $in_flight_monitor_updates: expr, $chan_context: expr ) => {{ - let (update_completed, _all_updates_complete) = handle_new_monitor_update_internal( - $self, + let (update_completed, _all_updates_complete) = $self.handle_new_monitor_update_internal( $in_flight_monitor_updates, $chan_context.channel_id(), $funding_txo, $chan_context.get_counterparty_node_id(), $update, - WithChannelContext::from(&$self.logger, &$chan_context, None), ); update_completed }}; @@ -3528,14 +3446,12 @@ macro_rules! handle_new_monitor_update { $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr ) => {{ - let (update_completed, all_updates_complete) = handle_new_monitor_update_internal( - $self, + let (update_completed, all_updates_complete) = $self.handle_new_monitor_update_internal( &mut $peer_state.in_flight_monitor_updates, $chan.context.channel_id(), $funding_txo, $chan.context.get_counterparty_node_id(), $update, - WithChannelContext::from(&$self.logger, &$chan.context, None), ); if all_updates_complete { handle_monitor_update_completion!( @@ -9283,6 +9199,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ payment_info, }], channel_id: Some(prev_hop.channel_id), + #[cfg(feature = "safe_channels")] + channel_state: None, }; // We don't have any idea if this is a duplicate claim without interrogating the @@ -9795,6 +9713,90 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } + fn handle_new_monitor_update_internal( + &self, + in_flight_monitor_updates: &mut BTreeMap)>, + channel_id: ChannelId, funding_txo: OutPoint, counterparty_node_id: PublicKey, + new_update: ChannelMonitorUpdate, + ) -> (bool, bool) { + let in_flight_updates = &mut in_flight_monitor_updates + .entry(channel_id) + .or_insert_with(|| (funding_txo, Vec::new())) + .1; + // During startup, we push monitor updates as background events through to here in + // order to replay updates that were in-flight when we shut down. Thus, we have to + // filter for uniqueness here. + let update_idx = + in_flight_updates.iter().position(|upd| upd == &new_update).unwrap_or_else(|| { + in_flight_updates.push(new_update); + in_flight_updates.len() - 1 + }); + + if self.background_events_processed_since_startup.load(Ordering::Acquire) { + let update_res = + self.chain_monitor.update_channel(channel_id, &in_flight_updates[update_idx]); + let logger = + WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None); + let update_completed = self.handle_monitor_update_res(update_res, logger); + if update_completed { + let _ = in_flight_updates.remove(update_idx); + } + (update_completed, update_completed && in_flight_updates.is_empty()) + } else { + // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we + // fail to persist it. This is a fairly safe assumption, however, since anything we do + // during the startup sequence should be replayed exactly if we immediately crash. + let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id, + funding_txo, + channel_id, + update: in_flight_updates[update_idx].clone(), + }; + // We want to track the in-flight update both in `in_flight_monitor_updates` and in + // `pending_background_events` to avoid a race condition during + // `pending_background_events` processing where we complete one + // `ChannelMonitorUpdate` (but there are more pending as background events) but we + // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to + // run post-completion actions. + // We could work around that with some effort, but its simpler to just track updates + // twice. + self.pending_background_events.lock().unwrap().push(event); + (false, false) + } + } + + /// Returns whether the monitor update is completed, `false` if the update is in-progress. + fn handle_monitor_update_res( + &self, update_res: ChannelMonitorUpdateStatus, logger: LG, + ) -> bool { + debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire)); + match update_res { + ChannelMonitorUpdateStatus::UnrecoverableError => { + let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; + log_error!(logger, "{}", err_str); + panic!("{}", err_str); + }, + ChannelMonitorUpdateStatus::InProgress => { + #[cfg(not(any(test, feature = "_externalize_tests")))] + if self.monitor_update_type.swap(1, Ordering::Relaxed) == 2 { + panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart"); + } + log_debug!( + logger, + "ChannelMonitor update in flight, holding messages until the update completes.", + ); + false + }, + ChannelMonitorUpdateStatus::Completed => { + #[cfg(not(any(test, feature = "_externalize_tests")))] + if self.monitor_update_type.swap(2, Ordering::Relaxed) == 1 { + panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart"); + } + true + }, + } + } + /// Handles a channel reentering a functional state, either due to reconnect or a monitor /// update completion. #[rustfmt::skip] @@ -10657,6 +10659,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fail_chan!("Already had channel with the new channel_id"); }, hash_map::Entry::Vacant(e) => { + #[cfg(feature = "safe_channels")] + { + monitor.update_channel_state(UpdateChannelState::Funded((&mut chan).into())); + } let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor); if let Ok(persist_state) = monitor_res { // There's no problem signing a counterparty's funding transaction if our monitor @@ -10827,6 +10833,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ match chan .funding_signed(&msg, best_block, &self.signer_provider, &self.logger) .and_then(|(funded_chan, monitor)| { + #[cfg(feature = "safe_channels")] + { + monitor.update_channel_state(UpdateChannelState::Funded(funded_chan.into())); + } self.chain_monitor .watch_channel(funded_chan.context.channel_id(), monitor) .map_err(|()| { @@ -11545,6 +11555,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(chan) = chan.as_funded_mut() { if let Some(monitor) = monitor_opt { + #[cfg(feature = "safe_channels")] + { + monitor.update_channel_state(UpdateChannelState::Funded(chan.into())); + } let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor); if let Ok(persist_state) = monitor_res { handle_initial_monitor!(self, persist_state, peer_state_lock, peer_state, @@ -14002,6 +14016,8 @@ where updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete { htlc: htlc_id, }], + #[cfg(feature = "safe_channels")] + channel_state: None, }; let during_startup = @@ -17471,6 +17487,8 @@ where should_broadcast: true, }], channel_id: Some(monitor.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: Some(UpdateChannelState::Closed), }; log_info!( logger, @@ -18135,6 +18153,8 @@ where updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete { htlc: htlc_id, }], + #[cfg(feature = "safe_channels")] + channel_state: None, }, }); } diff --git a/lightning/src/ln/funding.rs b/lightning/src/ln/funding.rs index f80b2b6daea..d9ff9f67fdd 100644 --- a/lightning/src/ln/funding.rs +++ b/lightning/src/ln/funding.rs @@ -88,7 +88,7 @@ impl SpliceContribution { /// An input to contribute to a channel's funding transaction either when using the v2 channel /// establishment protocol or when splicing. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct FundingTxInput { /// The unspent [`TxOut`] that the input spends. /// diff --git a/lightning/src/ln/interactivetxs.rs b/lightning/src/ln/interactivetxs.rs index 4340aad420a..4ead5fb3037 100644 --- a/lightning/src/ln/interactivetxs.rs +++ b/lightning/src/ln/interactivetxs.rs @@ -98,7 +98,7 @@ pub(crate) struct NegotiationError { pub contributed_outputs: Vec, } -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub(crate) enum AbortReason { InvalidStateTransition, UnexpectedCounterpartyMessage, @@ -550,7 +550,7 @@ impl ConstructedTransaction { } } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct SharedInputSignature { holder_signature_first: bool, witness_script: ScriptBuf, @@ -568,7 +568,7 @@ impl_writeable_tlv_based!(SharedInputSignature, { /// See the specification for more details: /// https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#the-commitment_signed-message /// https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#sharing-funding-signatures-tx_signatures -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct InteractiveTxSigningSession { unsigned_tx: ConstructedTransaction, holder_sends_tx_signatures_first: bool, @@ -919,7 +919,7 @@ impl_writeable_tlv_based!(InteractiveTxSigningSession, { (11, shared_input_signature, required), }); -#[derive(Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] struct NegotiationContext { holder_node_id: PublicKey, counterparty_node_id: PublicKey, @@ -1398,7 +1398,7 @@ macro_rules! define_state { }; ($state: ident, $inner: ident, $doc: expr) => { #[doc = $doc] - #[derive(Debug)] + #[derive(Debug, Clone, PartialEq, Eq)] struct $state($inner); impl State for $state {} }; @@ -1527,7 +1527,7 @@ define_state_transitions!(RECEIVED_MSG_STATE, [ define_state_transitions!(TX_COMPLETE, SentChangeMsg, ReceivedTxComplete); define_state_transitions!(TX_COMPLETE, ReceivedChangeMsg, SentTxComplete); -#[derive(Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] enum StateMachine { Indeterminate, SentChangeMsg(SentChangeMsg), @@ -1941,7 +1941,7 @@ impl InteractiveTxInput { } } -#[derive(Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] pub(super) struct InteractiveTxConstructor { state_machine: StateMachine, is_initiator: bool, @@ -1954,7 +1954,7 @@ pub(super) struct InteractiveTxConstructor { } #[allow(clippy::enum_variant_names)] // Clippy doesn't like the repeated `Tx` prefix here -#[derive(Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] pub(crate) enum InteractiveTxMessageSend { TxAddInput(msgs::TxAddInput), TxAddOutput(msgs::TxAddOutput), diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index f237d73e533..df7501aab4d 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -2488,6 +2488,11 @@ mod fuzzy_internal_msgs { pub data: Vec, pub attribution_data: Option, } + + impl_writeable_tlv_based!(OnionErrorPacket, { + (0, data, required), + (1, attribution_data, required), + }); } #[cfg(fuzzing)] pub use self::fuzzy_internal_msgs::*; diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index dbc2ebc9d48..d211a2036d2 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -1897,12 +1897,10 @@ impl From<&HTLCFailReason> for HTLCHandlingFailureReason { } } -#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug -#[cfg_attr(test, derive(PartialEq))] +#[derive(Clone, PartialEq, Eq)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug pub(super) struct HTLCFailReason(HTLCFailReasonRepr); -#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug -#[cfg_attr(test, derive(PartialEq))] +#[derive(Clone, PartialEq, Eq)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug enum HTLCFailReasonRepr { LightningError { err: msgs::OnionErrorPacket, hold_time: Option }, Reason { data: Vec, failure_reason: LocalHTLCFailureReason }, diff --git a/lightning/src/ln/script.rs b/lightning/src/ln/script.rs index 5258b8f3283..3daebc1a265 100644 --- a/lightning/src/ln/script.rs +++ b/lightning/src/ln/script.rs @@ -21,8 +21,7 @@ use crate::prelude::*; /// A script pubkey for shutting down a channel as defined by [BOLT #2]. /// /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md -#[cfg_attr(test, derive(Debug))] -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, PartialEq, Eq, Debug)] pub struct ShutdownScript(ShutdownScriptImpl); /// An error occurring when converting from [`ScriptBuf`] to [`ShutdownScript`]. diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index dd1aaa40424..1ec212da5e3 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -772,7 +772,7 @@ impl From for ChannelConfigUpdate { /// Legacy version of [`ChannelConfig`] that stored the static /// [`ChannelHandshakeConfig::announce_for_forwarding`] and /// [`ChannelHandshakeConfig::commit_upfront_shutdown_pubkey`] fields. -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] pub(crate) struct LegacyChannelConfig { pub(crate) options: ChannelConfig, /// Deprecated but may still be read from. See [`ChannelHandshakeConfig::announce_for_forwarding`] to