diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 128091ccdd5..1cc0c8552db 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -14565,7 +14565,7 @@ where } } let mut removed_htlc_attribution_data: Vec<&Option> = Vec::new(); - let mut inbound_committed_update_adds: Vec> = Vec::new(); + let mut inbound_committed_update_adds: Vec<&Option> = Vec::new(); (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?; for htlc in self.context.pending_inbound_htlcs.iter() { if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state { @@ -14587,7 +14587,7 @@ where }, &InboundHTLCState::Committed { ref update_add_htlc_opt } => { 3u8.write(writer)?; - inbound_committed_update_adds.push(update_add_htlc_opt.clone()); + inbound_committed_update_adds.push(update_add_htlc_opt); }, &InboundHTLCState::LocalRemoved(ref removal_reason) => { 4u8.write(writer)?; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f2e8fa70e4f..65b94808297 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -11758,6 +11758,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if !new_intercept_events.is_empty() { let mut events = self.pending_events.lock().unwrap(); + // It's possible we processed this intercept forward, generated an event, then re-processed + // it here after restart, in which case the intercept event should not be pushed + // redundantly. + new_intercept_events.retain(|ev| !events.contains(ev)); events.append(&mut new_intercept_events); } } @@ -16681,7 +16685,18 @@ where } } - { + // In LDK 0.2 and below, the `ChannelManager` would track all payments and HTLCs internally and + // persist that state, relying on it being up-to-date on restart. Newer versions are moving + // towards reducing this reliance on regular persistence of the `ChannelManager`, and instead + // reconstruct HTLC/payment state based on `Channel{Monitor}` data if + // `reconstruct_manager_from_monitors` is set on read. In tests, we want to always use the new + // codepaths so we don't write the legacy maps to force reconstruction on restart. + #[cfg(not(test))] + let reconstruct_manager_from_monitors = false; + #[cfg(test)] + let reconstruct_manager_from_monitors = true; + + if !reconstruct_manager_from_monitors { let forward_htlcs = self.forward_htlcs.lock().unwrap(); (forward_htlcs.len() as u64).write(writer)?; for (short_channel_id, pending_forwards) in forward_htlcs.iter() { @@ -16691,12 +16706,16 @@ where forward.write(writer)?; } } + } else { + 0u64.write(writer)?; } let mut decode_update_add_htlcs_opt = None; - let decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap(); - if !decode_update_add_htlcs.is_empty() { - decode_update_add_htlcs_opt = Some(decode_update_add_htlcs); + if !reconstruct_manager_from_monitors { + let decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap(); + if !decode_update_add_htlcs.is_empty() { + decode_update_add_htlcs_opt = Some(decode_update_add_htlcs); + } } let claimable_payments = self.claimable_payments.lock().unwrap(); @@ -16842,9 +16861,11 @@ where } let mut pending_intercepted_htlcs = None; - let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); - if our_pending_intercepts.len() != 0 { - pending_intercepted_htlcs = Some(our_pending_intercepts); + if !reconstruct_manager_from_monitors { + let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); + if our_pending_intercepts.len() != 0 { + pending_intercepted_htlcs = Some(our_pending_intercepts); + } } let mut pending_claiming_payments = Some(&claimable_payments.pending_claiming_payments); @@ -16885,6 +16906,7 @@ where (17, in_flight_monitor_updates, option), (19, peer_storage_dir, optional_vec), (21, WithoutLength(&self.flow.writeable_async_receive_offer_cache()), required), + (23, reconstruct_manager_from_monitors, required), }); // Remove the SpliceFailed events added earlier. @@ -17116,28 +17138,32 @@ fn dedup_decode_update_add_htlcs( ) where L::Target: Logger, { - decode_update_add_htlcs.retain(|src_outb_alias, update_add_htlcs| { - update_add_htlcs.retain(|update_add| { - let matches = *src_outb_alias == prev_hop_data.prev_outbound_scid_alias - && update_add.htlc_id == prev_hop_data.htlc_id; - if matches { - let logger = WithContext::from( - logger, - prev_hop_data.counterparty_node_id, - Some(update_add.channel_id), - Some(update_add.payment_hash), - ); - log_info!( - logger, - "Removing pending to-decode HTLC with id {}: {}", - update_add.htlc_id, - removal_reason - ); + match decode_update_add_htlcs.entry(prev_hop_data.prev_outbound_scid_alias) { + hash_map::Entry::Occupied(mut update_add_htlcs) => { + update_add_htlcs.get_mut().retain(|update_add| { + let matches = update_add.htlc_id == prev_hop_data.htlc_id; + if matches { + let logger = WithContext::from( + logger, + prev_hop_data.counterparty_node_id, + Some(update_add.channel_id), + Some(update_add.payment_hash), + ); + log_info!( + logger, + "Removing pending to-decode HTLC with id {}: {}", + update_add.htlc_id, + removal_reason + ); + } + !matches + }); + if update_add_htlcs.get().is_empty() { + update_add_htlcs.remove(); } - !matches - }); - !update_add_htlcs.is_empty() - }); + }, + _ => {}, + } } // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the @@ -17593,9 +17619,10 @@ where }; } - // Some maps are read but may no longer be used because we attempt to rebuild the pending HTLC - // set from the `Channel{Monitor}`s instead, as a step towards removing the requirement of - // regularly persisting the `ChannelManager`. + // In LDK versions >0.2, we are taking steps to remove the requirement of regularly peristing + // the `ChannelManager`. To that end, if `reconstruct_manager_from_monitors` is set below, we + // will rebuild the pending HTLC set using data from the `Channel{Monitor}`s instead and ignore + // these legacy maps. let mut pending_intercepted_htlcs_legacy: Option> = None; let mut decode_update_add_htlcs_legacy: Option>> = @@ -17625,6 +17652,7 @@ where let mut inbound_payment_id_secret = None; let mut peer_storage_dir: Option)>> = None; let mut async_receive_offer_cache: AsyncReceiveOfferCache = AsyncReceiveOfferCache::new(); + let mut reconstruct_manager_from_monitors = false; read_tlv_fields!(reader, { (1, pending_outbound_payments_no_retry, option), (2, pending_intercepted_htlcs_legacy, option), @@ -17643,6 +17671,7 @@ where (17, in_flight_monitor_updates, option), (19, peer_storage_dir, optional_vec), (21, async_receive_offer_cache, (default_value, async_receive_offer_cache)), + (23, reconstruct_manager_from_monitors, (default_value, false)), }); let mut decode_update_add_htlcs_legacy = decode_update_add_htlcs_legacy.unwrap_or_else(|| new_hash_map()); @@ -17960,18 +17989,20 @@ where let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); - if let Some(chan) = peer_state.channel_by_id.get(channel_id) { - if let Some(funded_chan) = chan.as_funded() { - let inbound_committed_update_adds = - funded_chan.get_inbound_committed_update_adds(); - if !inbound_committed_update_adds.is_empty() { - // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized - // `Channel`, as part of removing the requirement to regularly persist the - // `ChannelManager`. - decode_update_add_htlcs.insert( - funded_chan.context.outbound_scid_alias(), - inbound_committed_update_adds, - ); + if reconstruct_manager_from_monitors { + if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + if let Some(funded_chan) = chan.as_funded() { + let inbound_committed_update_adds = + funded_chan.get_inbound_committed_update_adds(); + if !inbound_committed_update_adds.is_empty() { + // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized + // `Channel`, as part of removing the requirement to regularly persist the + // `ChannelManager`. + decode_update_add_htlcs.insert( + funded_chan.context.outbound_scid_alias(), + inbound_committed_update_adds, + ); + } } } } @@ -18015,33 +18046,35 @@ where is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); } - if is_channel_closed { - for (htlc_source, (htlc, preimage_opt)) in - monitor.get_all_current_outbound_htlcs() - { - let logger = WithChannelMonitor::from( - &args.logger, - monitor, - Some(htlc.payment_hash), - ); - let htlc_id = SentHTLCId::from_source(&htlc_source); - match htlc_source { - HTLCSource::PreviousHopData(prev_hop_data) => { - let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { - info.prev_funding_outpoint == prev_hop_data.outpoint - && info.prev_htlc_id == prev_hop_data.htlc_id - }; - // The ChannelMonitor is now responsible for this HTLC's - // failure/success and will let us know what its outcome is. If we - // still have an entry for this HTLC in `forward_htlcs`, - // `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not - // persisted after the monitor was when forwarding the payment. + for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() + { + let logger = + WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash)); + let htlc_id = SentHTLCId::from_source(&htlc_source); + match htlc_source { + HTLCSource::PreviousHopData(prev_hop_data) => { + let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { + info.prev_funding_outpoint == prev_hop_data.outpoint + && info.prev_htlc_id == prev_hop_data.htlc_id + }; + // If `reconstruct_manager_from_monitors` is set, we always add all inbound committed + // HTLCs to `decode_update_add_htlcs` in the above loop, but we need to prune from + // those added HTLCs if they were already forwarded to the outbound edge. Otherwise, + // we'll double-forward. + if reconstruct_manager_from_monitors { dedup_decode_update_add_htlcs( &mut decode_update_add_htlcs, &prev_hop_data, "HTLC was forwarded to the closed channel", &args.logger, ); + } + if is_channel_closed { + // The ChannelMonitor is now responsible for this HTLC's + // failure/success and will let us know what its outcome is. If we + // still have an entry for this HTLC in `forward_htlcs`, + // `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not + // persisted after the monitor was when forwarding the payment. dedup_decode_update_add_htlcs( &mut decode_update_add_htlcs_legacy, &prev_hop_data, @@ -18072,99 +18105,111 @@ where false } else { true } }); - }, - HTLCSource::OutboundRoute { - payment_id, - session_priv, - path, - bolt12_invoice, - .. - } => { - if let Some(preimage) = preimage_opt { - let pending_events = Mutex::new(pending_events_read); - let update = PaymentCompleteUpdate { - counterparty_node_id: monitor.get_counterparty_node_id(), - channel_funding_outpoint: monitor.get_funding_txo(), - channel_id: monitor.channel_id(), - htlc_id, - }; - let mut compl_action = Some( + } + }, + HTLCSource::OutboundRoute { + payment_id, + session_priv, + path, + bolt12_invoice, + .. + } => { + if !is_channel_closed { + continue; + } + if let Some(preimage) = preimage_opt { + let pending_events = Mutex::new(pending_events_read); + let update = PaymentCompleteUpdate { + counterparty_node_id: monitor.get_counterparty_node_id(), + channel_funding_outpoint: monitor.get_funding_txo(), + channel_id: monitor.channel_id(), + htlc_id, + }; + let mut compl_action = Some( EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(update) ); - pending_outbounds.claim_htlc( - payment_id, - preimage, - bolt12_invoice, - session_priv, - path, - true, - &mut compl_action, - &pending_events, - ); - // If the completion action was not consumed, then there was no - // payment to claim, and we need to tell the `ChannelMonitor` - // we don't need to hear about the HTLC again, at least as long - // as the PaymentSent event isn't still sitting around in our - // event queue. - let have_action = if compl_action.is_some() { - let pending_events = pending_events.lock().unwrap(); - pending_events.iter().any(|(_, act)| *act == compl_action) - } else { - false - }; - if !have_action && compl_action.is_some() { - let mut peer_state = per_peer_state - .get(&counterparty_node_id) - .map(|state| state.lock().unwrap()) - .expect("Channels originating a preimage must have peer state"); - let update_id = peer_state - .closed_channel_monitor_update_ids - .get_mut(channel_id) - .expect("Channels originating a preimage must have a monitor"); - // Note that for channels closed pre-0.1, the latest - // update_id is `u64::MAX`. - *update_id = update_id.saturating_add(1); - - pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id: monitor.get_counterparty_node_id(), + pending_outbounds.claim_htlc( + payment_id, + preimage, + bolt12_invoice, + session_priv, + path, + true, + &mut compl_action, + &pending_events, + ); + // If the completion action was not consumed, then there was no + // payment to claim, and we need to tell the `ChannelMonitor` + // we don't need to hear about the HTLC again, at least as long + // as the PaymentSent event isn't still sitting around in our + // event queue. + let have_action = if compl_action.is_some() { + let pending_events = pending_events.lock().unwrap(); + pending_events.iter().any(|(_, act)| *act == compl_action) + } else { + false + }; + if !have_action && compl_action.is_some() { + let mut peer_state = per_peer_state + .get(&counterparty_node_id) + .map(|state| state.lock().unwrap()) + .expect( + "Channels originating a preimage must have peer state", + ); + let update_id = peer_state + .closed_channel_monitor_update_ids + .get_mut(channel_id) + .expect( + "Channels originating a preimage must have a monitor", + ); + // Note that for channels closed pre-0.1, the latest + // update_id is `u64::MAX`. + *update_id = update_id.saturating_add(1); + + pending_background_events.push( + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id: monitor + .get_counterparty_node_id(), funding_txo: monitor.get_funding_txo(), channel_id: monitor.channel_id(), update: ChannelMonitorUpdate { update_id: *update_id, channel_id: Some(monitor.channel_id()), - updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete { + updates: vec![ + ChannelMonitorUpdateStep::ReleasePaymentComplete { htlc: htlc_id, - }], + }, + ], }, - }); - } - pending_events_read = pending_events.into_inner().unwrap(); + }, + ); } - }, - } + pending_events_read = pending_events.into_inner().unwrap(); + } + }, } - for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() { - log_info!( - args.logger, - "Failing HTLC with payment hash {} as it was resolved on-chain.", - payment_hash - ); - let completion_action = Some(PaymentCompleteUpdate { - counterparty_node_id: monitor.get_counterparty_node_id(), - channel_funding_outpoint: monitor.get_funding_txo(), - channel_id: monitor.channel_id(), - htlc_id: SentHTLCId::from_source(&htlc_source), - }); + } + for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() { + log_info!( + args.logger, + "Failing HTLC with payment hash {} as it was resolved on-chain.", + payment_hash + ); + let completion_action = Some(PaymentCompleteUpdate { + counterparty_node_id: monitor.get_counterparty_node_id(), + channel_funding_outpoint: monitor.get_funding_txo(), + channel_id: monitor.channel_id(), + htlc_id: SentHTLCId::from_source(&htlc_source), + }); - failed_htlcs.push(( - htlc_source, - payment_hash, - monitor.get_counterparty_node_id(), - monitor.channel_id(), - LocalHTLCFailureReason::OnChainTimeout, - completion_action, - )); - } + failed_htlcs.push(( + htlc_source, + payment_hash, + monitor.get_counterparty_node_id(), + monitor.channel_id(), + LocalHTLCFailureReason::OnChainTimeout, + completion_action, + )); } // Whether the downstream channel was closed or not, try to re-apply any payment @@ -18532,101 +18577,49 @@ where } } - // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`. - // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs. - for (src, _, _, _, _, _) in failed_htlcs.iter() { - if let HTLCSource::PreviousHopData(prev_hop_data) = src { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - prev_hop_data, - "HTLC was failed backwards during manager read", - &args.logger, - ); - } - } - - // See above comment on `failed_htlcs`. - for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { - for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - prev_hop_data, - "HTLC was already decoded and marked as a claimable payment", - &args.logger, - ); - } - } - - // Remove HTLCs from `forward_htlcs` if they are also present in `decode_update_add_htlcs`. - // - // In the future, the full set of pending HTLCs will be pulled from `Channel{Monitor}` data and - // placed in `ChannelManager::decode_update_add_htlcs` on read, to be handled on the next call - // to `process_pending_htlc_forwards`. This is part of a larger effort to remove the requirement - // of regularly persisting the `ChannelManager`. The new pipeline is supported for HTLC forwards - // received on LDK 0.3+ but not <= 0.2, so prune non-legacy HTLCs from `forward_htlcs`. - forward_htlcs_legacy.retain(|scid, pending_fwds| { - for fwd in pending_fwds { - let (prev_scid, prev_htlc_id) = match fwd { - HTLCForwardInfo::AddHTLC(htlc) => { - (htlc.prev_outbound_scid_alias, htlc.prev_htlc_id) - }, - HTLCForwardInfo::FailHTLC { htlc_id, .. } - | HTLCForwardInfo::FailMalformedHTLC { htlc_id, .. } => (*scid, *htlc_id), - }; - if let Some(pending_update_adds) = decode_update_add_htlcs.get_mut(&prev_scid) { - if pending_update_adds - .iter() - .any(|update_add| update_add.htlc_id == prev_htlc_id) - { - return false; - } + if reconstruct_manager_from_monitors { + // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`. + // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs. + for (src, _, _, _, _, _) in failed_htlcs.iter() { + if let HTLCSource::PreviousHopData(prev_hop_data) = src { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was failed backwards during manager read", + &args.logger, + ); } } - true - }); - // Remove intercepted HTLC forwards if they are also present in `decode_update_add_htlcs`. See - // the above comment. - pending_intercepted_htlcs_legacy.retain(|id, fwd| { - let prev_scid = fwd.prev_outbound_scid_alias; - if let Some(pending_update_adds) = decode_update_add_htlcs.get_mut(&prev_scid) { - if pending_update_adds - .iter() - .any(|update_add| update_add.htlc_id == fwd.prev_htlc_id) - { - pending_events_read.retain( - |(ev, _)| !matches!(ev, Event::HTLCIntercepted { intercept_id, .. } if intercept_id == id), + + // See above comment on `failed_htlcs`. + for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { + for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was already decoded and marked as a claimable payment", + &args.logger, ); - return false; } } + } + + // If we have a pending intercept HTLC present but no corresponding event, add that now rather + // than relying on the user having persisted the event prior to shutdown. + for (id, intercept) in pending_intercepted_htlcs_legacy.iter() { if !pending_events_read.iter().any( |(ev, _)| matches!(ev, Event::HTLCIntercepted { intercept_id, .. } if intercept_id == id), ) { - match create_htlc_intercepted_event(*id, &fwd) { + match create_htlc_intercepted_event(*id, intercept) { Ok(ev) => pending_events_read.push_back((ev, None)), Err(()) => debug_assert!(false), } } - true - }); - // Add legacy update_adds that were received on LDK <= 0.2 that are not present in the - // `decode_update_add_htlcs` map that was rebuilt from `Channel{Monitor}` data, see above - // comment. - for (scid, legacy_update_adds) in decode_update_add_htlcs_legacy.drain() { - match decode_update_add_htlcs.entry(scid) { - hash_map::Entry::Occupied(mut update_adds) => { - for legacy_update_add in legacy_update_adds { - if !update_adds.get().contains(&legacy_update_add) { - update_adds.get_mut().push(legacy_update_add); - } - } - }, - hash_map::Entry::Vacant(entry) => { - entry.insert(legacy_update_adds); - }, - } } + if !reconstruct_manager_from_monitors { + decode_update_add_htlcs = decode_update_add_htlcs_legacy; + } let best_block = BestBlock::new(best_block_hash, best_block_height); let flow = OffersMessageFlow::new( chain_hash, @@ -18991,12 +18984,11 @@ where mod tests { use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; use crate::ln::channelmanager::{ - create_recv_pending_htlc_info, inbound_payment, HTLCForwardInfo, InterceptId, PaymentId, + create_recv_pending_htlc_info, inbound_payment, InterceptId, PaymentId, RecipientOnionFields, }; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; - use crate::ln::onion_utils::AttributionData; use crate::ln::onion_utils::{self, LocalHTLCFailureReason}; use crate::ln::outbound_payment::Retry; use crate::ln::types::ChannelId; @@ -19006,7 +18998,6 @@ mod tests { use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use crate::util::config::{ChannelConfig, ChannelConfigUpdate}; use crate::util::errors::APIError; - use crate::util::ser::Writeable; use crate::util::test_utils; use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; @@ -20064,66 +20055,6 @@ mod tests { check_spends!(txn[0], funding_tx); } } - - #[test] - #[rustfmt::skip] - fn test_malformed_forward_htlcs_ser() { - // Ensure that `HTLCForwardInfo::FailMalformedHTLC`s are (de)serialized properly. - let chanmon_cfg = create_chanmon_cfgs(1); - let node_cfg = create_node_cfgs(1, &chanmon_cfg); - let persister; - let chain_monitor; - let chanmgrs = create_node_chanmgrs(1, &node_cfg, &[None]); - let deserialized_chanmgr; - let mut nodes = create_network(1, &node_cfg, &chanmgrs); - - let dummy_failed_htlc = |htlc_id| { - HTLCForwardInfo::FailHTLC { htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42], attribution_data: Some(AttributionData::new()) } } - }; - let dummy_malformed_htlc = |htlc_id| { - HTLCForwardInfo::FailMalformedHTLC { - htlc_id, - failure_code: LocalHTLCFailureReason::InvalidOnionPayload.failure_code(), - sha256_of_onion: [0; 32], - } - }; - - let dummy_htlcs_1: Vec = (1..10).map(|htlc_id| { - if htlc_id % 2 == 0 { - dummy_failed_htlc(htlc_id) - } else { - dummy_malformed_htlc(htlc_id) - } - }).collect(); - - let dummy_htlcs_2: Vec = (1..10).map(|htlc_id| { - if htlc_id % 2 == 1 { - dummy_failed_htlc(htlc_id) - } else { - dummy_malformed_htlc(htlc_id) - } - }).collect(); - - - let (scid_1, scid_2) = (42, 43); - let mut forward_htlcs = new_hash_map(); - forward_htlcs.insert(scid_1, dummy_htlcs_1.clone()); - forward_htlcs.insert(scid_2, dummy_htlcs_2.clone()); - - let mut chanmgr_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap(); - *chanmgr_fwd_htlcs = forward_htlcs.clone(); - core::mem::drop(chanmgr_fwd_htlcs); - - reload_node!(nodes[0], nodes[0].node.encode(), &[], persister, chain_monitor, deserialized_chanmgr); - - let mut deserialized_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap(); - for scid in [scid_1, scid_2].iter() { - let deserialized_htlcs = deserialized_fwd_htlcs.remove(scid).unwrap(); - assert_eq!(forward_htlcs.remove(scid).unwrap(), deserialized_htlcs); - } - assert!(deserialized_fwd_htlcs.is_empty()); - core::mem::drop(deserialized_fwd_htlcs); - } } #[cfg(ldk_bench)] diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index d143082821d..ffd1e8aa477 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1258,6 +1258,62 @@ fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { expect_payment_sent(&nodes[0], payment_preimage, None, true, true); } +#[test] +fn test_manager_persisted_post_outbound_edge_forward() { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; + + // Lock in the HTLC from node_a <> node_b. + let amt_msat = 5000; + let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors(&nodes[0], 1); + let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); + + // Add the HTLC to the outbound edge, node_b <> node_c. + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors(&nodes[1], 1); + + // Disconnect peers and reload the forwarding node_b. + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + + let node_b_encoded = nodes[1].node.encode(); + let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); + reload_node!(nodes[1], node_b_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + + reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[0])); + let mut args_b_c = ReconnectArgs::new(&nodes[1], &nodes[2]); + args_b_c.send_channel_ready = (true, true); + args_b_c.send_announcement_sigs = (true, true); + args_b_c.pending_htlc_adds = (0, 1); + // While reconnecting, we re-send node_b's outbound update_add and commit the HTLC to the b<>c + // channel. + reconnect_nodes(args_b_c); + + // Ensure node_b won't double-forward the outbound HTLC (this was previously broken). + nodes[1].node.process_pending_htlc_forwards(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Claim the HTLC backwards to node_a. + expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id()); + let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], path, payment_preimage)); + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} + #[test] fn test_reload_partial_funding_batch() { let chanmon_cfgs = create_chanmon_cfgs(3);