Skip to content

Commit 4ba0a3c

Browse files
Move PendingHTLCStatus construction inside channel lock
We need the channel lock for constructing a pending HTLC's status because we need to know if the channel accepts underpaying HTLCs in upcoming commits.
1 parent 773f785 commit 4ba0a3c

File tree

1 file changed

+178
-146
lines changed

1 file changed

+178
-146
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 178 additions & 146 deletions
Original file line numberDiff line numberDiff line change
@@ -2626,12 +2626,14 @@ where
26262626
})
26272627
}
26282628

2629-
fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> PendingHTLCStatus {
2629+
fn decode_update_add_htlc_onion(
2630+
&self, msg: &msgs::UpdateAddHTLC
2631+
) -> Result<(onion_utils::Hop, [u8; 32]), HTLCFailureMsg> {
26302632
macro_rules! return_malformed_err {
26312633
($msg: expr, $err_code: expr) => {
26322634
{
26332635
log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
2634-
return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
2636+
return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
26352637
channel_id: msg.channel_id,
26362638
htlc_id: msg.htlc_id,
26372639
sha256_of_onion: Sha256::hash(&msg.onion_routing_packet.hop_data).into_inner(),
@@ -2662,7 +2664,7 @@ where
26622664
($msg: expr, $err_code: expr, $data: expr) => {
26632665
{
26642666
log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
2665-
return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
2667+
return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
26662668
channel_id: msg.channel_id,
26672669
htlc_id: msg.htlc_id,
26682670
reason: HTLCFailReason::reason($err_code, $data.to_vec())
@@ -2681,8 +2683,174 @@ where
26812683
return_err!(err_msg, err_code, &[0; 0]);
26822684
},
26832685
};
2686+
let (outgoing_scid, outgoing_amt_msat, outgoing_cltv_value) = match next_hop {
2687+
onion_utils::Hop::Forward {
2688+
next_hop_data: msgs::OnionHopData {
2689+
format: msgs::OnionHopDataFormat::NonFinalNode { short_channel_id }, amt_to_forward,
2690+
outgoing_cltv_value,
2691+
}, ..
2692+
} => (short_channel_id, amt_to_forward, outgoing_cltv_value),
2693+
// We'll do receive checks in [`Self::construct_pending_htlc_info`] so we have access to the
2694+
// inbound channel's state.
2695+
onion_utils::Hop::Receive { .. } => return Ok((next_hop, shared_secret)),
2696+
_ => {
2697+
return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0; 0]);
2698+
}
2699+
};
26842700

2685-
let pending_forward_info = match next_hop {
2701+
// Perform outbound checks here instead of in [`Self::construct_pending_htlc_info`] because we
2702+
// can't hold the outbound peer state lock at the same time as the inbound peer state lock.
2703+
if let Some((err, mut code, chan_update)) = loop {
2704+
let id_option = self.short_to_chan_info.read().unwrap().get(&outgoing_scid).cloned();
2705+
let forwarding_chan_info_opt = match id_option {
2706+
None => { // unknown_next_peer
2707+
// Note that this is likely a timing oracle for detecting whether an scid is a
2708+
// phantom or an intercept.
2709+
if (self.default_configuration.accept_intercept_htlcs &&
2710+
fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)) ||
2711+
fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)
2712+
{
2713+
None
2714+
} else {
2715+
break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
2716+
}
2717+
},
2718+
Some((cp_id, id)) => Some((cp_id.clone(), id.clone())),
2719+
};
2720+
let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt {
2721+
let per_peer_state = self.per_peer_state.read().unwrap();
2722+
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
2723+
if peer_state_mutex_opt.is_none() {
2724+
break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
2725+
}
2726+
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
2727+
let peer_state = &mut *peer_state_lock;
2728+
let chan = match peer_state.channel_by_id.get_mut(&forwarding_id) {
2729+
None => {
2730+
// Channel was removed. The short_to_chan_info and channel_by_id maps
2731+
// have no consistency guarantees.
2732+
break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
2733+
},
2734+
Some(chan) => chan
2735+
};
2736+
if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
2737+
// Note that the behavior here should be identical to the above block - we
2738+
// should NOT reveal the existence or non-existence of a private channel if
2739+
// we don't allow forwards outbound over them.
2740+
break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
2741+
}
2742+
if chan.context.get_channel_type().supports_scid_privacy() && outgoing_scid != chan.context.outbound_scid_alias() {
2743+
// `option_scid_alias` (referred to in LDK as `scid_privacy`) means
2744+
// "refuse to forward unless the SCID alias was used", so we pretend
2745+
// we don't have the channel here.
2746+
break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
2747+
}
2748+
let chan_update_opt = self.get_channel_update_for_onion(outgoing_scid, chan).ok();
2749+
2750+
// Note that we could technically not return an error yet here and just hope
2751+
// that the connection is reestablished or monitor updated by the time we get
2752+
// around to doing the actual forward, but better to fail early if we can and
2753+
// hopefully an attacker trying to path-trace payments cannot make this occur
2754+
// on a small/per-node/per-channel scale.
2755+
if !chan.context.is_live() { // channel_disabled
2756+
// If the channel_update we're going to return is disabled (i.e. the
2757+
// peer has been disabled for some time), return `channel_disabled`,
2758+
// otherwise return `temporary_channel_failure`.
2759+
if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
2760+
break Some(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
2761+
} else {
2762+
break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
2763+
}
2764+
}
2765+
if outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
2766+
break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
2767+
}
2768+
if let Err((err, code)) = chan.htlc_satisfies_config(&msg, outgoing_amt_msat, outgoing_cltv_value) {
2769+
break Some((err, code, chan_update_opt));
2770+
}
2771+
chan_update_opt
2772+
} else {
2773+
if (msg.cltv_expiry as u64) < (outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 {
2774+
// We really should set `incorrect_cltv_expiry` here but as we're not
2775+
// forwarding over a real channel we can't generate a channel_update
2776+
// for it. Instead we just return a generic temporary_node_failure.
2777+
break Some((
2778+
"Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
2779+
0x2000 | 2, None,
2780+
));
2781+
}
2782+
None
2783+
};
2784+
2785+
let cur_height = self.best_block.read().unwrap().height() + 1;
2786+
// Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
2787+
// but we want to be robust wrt to counterparty packet sanitization (see
2788+
// HTLC_FAIL_BACK_BUFFER rationale).
2789+
if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
2790+
break Some(("CLTV expiry is too close", 0x1000 | 14, chan_update_opt));
2791+
}
2792+
if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
2793+
break Some(("CLTV expiry is too far in the future", 21, None));
2794+
}
2795+
// If the HTLC expires ~now, don't bother trying to forward it to our
2796+
// counterparty. They should fail it anyway, but we don't want to bother with
2797+
// the round-trips or risk them deciding they definitely want the HTLC and
2798+
// force-closing to ensure they get it if we're offline.
2799+
// We previously had a much more aggressive check here which tried to ensure
2800+
// our counterparty receives an HTLC which has *our* risk threshold met on it,
2801+
// but there is no need to do that, and since we're a bit conservative with our
2802+
// risk threshold it just results in failing to forward payments.
2803+
if (outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
2804+
break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, chan_update_opt));
2805+
}
2806+
2807+
break None;
2808+
}
2809+
{
2810+
let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
2811+
if let Some(chan_update) = chan_update {
2812+
if code == 0x1000 | 11 || code == 0x1000 | 12 {
2813+
msg.amount_msat.write(&mut res).expect("Writes cannot fail");
2814+
}
2815+
else if code == 0x1000 | 13 {
2816+
msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
2817+
}
2818+
else if code == 0x1000 | 20 {
2819+
// TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
2820+
0u16.write(&mut res).expect("Writes cannot fail");
2821+
}
2822+
(chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
2823+
msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
2824+
chan_update.write(&mut res).expect("Writes cannot fail");
2825+
} else if code & 0x1000 == 0x1000 {
2826+
// If we're trying to return an error that requires a `channel_update` but
2827+
// we're forwarding to a phantom or intercept "channel" (i.e. cannot
2828+
// generate an update), just use the generic "temporary_node_failure"
2829+
// instead.
2830+
code = 0x2000 | 2;
2831+
}
2832+
return_err!(err, code, &res.0[..]);
2833+
}
2834+
Ok((next_hop, shared_secret))
2835+
}
2836+
2837+
fn construct_pending_htlc_status<'a>(
2838+
&self, msg: &msgs::UpdateAddHTLC, shared_secret: [u8; 32], decoded_hop: onion_utils::Hop,
2839+
) -> PendingHTLCStatus {
2840+
macro_rules! return_err {
2841+
($msg: expr, $err_code: expr, $data: expr) => {
2842+
{
2843+
log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
2844+
return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
2845+
channel_id: msg.channel_id,
2846+
htlc_id: msg.htlc_id,
2847+
reason: HTLCFailReason::reason($err_code, $data.to_vec())
2848+
.get_encrypted_failure_packet(&shared_secret, &None),
2849+
}));
2850+
}
2851+
}
2852+
}
2853+
match decoded_hop {
26862854
onion_utils::Hop::Receive(next_hop_data) => {
26872855
// OUR PAYMENT!
26882856
match self.construct_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry, None) {
@@ -2724,148 +2892,7 @@ where
27242892
outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
27252893
})
27262894
}
2727-
};
2728-
2729-
if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref outgoing_amt_msat, ref outgoing_cltv_value, .. }) = &pending_forward_info {
2730-
// If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel
2731-
// with a short_channel_id of 0. This is important as various things later assume
2732-
// short_channel_id is non-0 in any ::Forward.
2733-
if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
2734-
if let Some((err, mut code, chan_update)) = loop {
2735-
let id_option = self.short_to_chan_info.read().unwrap().get(short_channel_id).cloned();
2736-
let forwarding_chan_info_opt = match id_option {
2737-
None => { // unknown_next_peer
2738-
// Note that this is likely a timing oracle for detecting whether an scid is a
2739-
// phantom or an intercept.
2740-
if (self.default_configuration.accept_intercept_htlcs &&
2741-
fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash)) ||
2742-
fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash)
2743-
{
2744-
None
2745-
} else {
2746-
break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
2747-
}
2748-
},
2749-
Some((cp_id, id)) => Some((cp_id.clone(), id.clone())),
2750-
};
2751-
let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt {
2752-
let per_peer_state = self.per_peer_state.read().unwrap();
2753-
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
2754-
if peer_state_mutex_opt.is_none() {
2755-
break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
2756-
}
2757-
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
2758-
let peer_state = &mut *peer_state_lock;
2759-
let chan = match peer_state.channel_by_id.get_mut(&forwarding_id) {
2760-
None => {
2761-
// Channel was removed. The short_to_chan_info and channel_by_id maps
2762-
// have no consistency guarantees.
2763-
break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
2764-
},
2765-
Some(chan) => chan
2766-
};
2767-
if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
2768-
// Note that the behavior here should be identical to the above block - we
2769-
// should NOT reveal the existence or non-existence of a private channel if
2770-
// we don't allow forwards outbound over them.
2771-
break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
2772-
}
2773-
if chan.context.get_channel_type().supports_scid_privacy() && *short_channel_id != chan.context.outbound_scid_alias() {
2774-
// `option_scid_alias` (referred to in LDK as `scid_privacy`) means
2775-
// "refuse to forward unless the SCID alias was used", so we pretend
2776-
// we don't have the channel here.
2777-
break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
2778-
}
2779-
let chan_update_opt = self.get_channel_update_for_onion(*short_channel_id, chan).ok();
2780-
2781-
// Note that we could technically not return an error yet here and just hope
2782-
// that the connection is reestablished or monitor updated by the time we get
2783-
// around to doing the actual forward, but better to fail early if we can and
2784-
// hopefully an attacker trying to path-trace payments cannot make this occur
2785-
// on a small/per-node/per-channel scale.
2786-
if !chan.context.is_live() { // channel_disabled
2787-
// If the channel_update we're going to return is disabled (i.e. the
2788-
// peer has been disabled for some time), return `channel_disabled`,
2789-
// otherwise return `temporary_channel_failure`.
2790-
if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
2791-
break Some(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
2792-
} else {
2793-
break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
2794-
}
2795-
}
2796-
if *outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
2797-
break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
2798-
}
2799-
if let Err((err, code)) = chan.htlc_satisfies_config(&msg, *outgoing_amt_msat, *outgoing_cltv_value) {
2800-
break Some((err, code, chan_update_opt));
2801-
}
2802-
chan_update_opt
2803-
} else {
2804-
if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 {
2805-
// We really should set `incorrect_cltv_expiry` here but as we're not
2806-
// forwarding over a real channel we can't generate a channel_update
2807-
// for it. Instead we just return a generic temporary_node_failure.
2808-
break Some((
2809-
"Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
2810-
0x2000 | 2, None,
2811-
));
2812-
}
2813-
None
2814-
};
2815-
2816-
let cur_height = self.best_block.read().unwrap().height() + 1;
2817-
// Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
2818-
// but we want to be robust wrt to counterparty packet sanitization (see
2819-
// HTLC_FAIL_BACK_BUFFER rationale).
2820-
if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
2821-
break Some(("CLTV expiry is too close", 0x1000 | 14, chan_update_opt));
2822-
}
2823-
if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
2824-
break Some(("CLTV expiry is too far in the future", 21, None));
2825-
}
2826-
// If the HTLC expires ~now, don't bother trying to forward it to our
2827-
// counterparty. They should fail it anyway, but we don't want to bother with
2828-
// the round-trips or risk them deciding they definitely want the HTLC and
2829-
// force-closing to ensure they get it if we're offline.
2830-
// We previously had a much more aggressive check here which tried to ensure
2831-
// our counterparty receives an HTLC which has *our* risk threshold met on it,
2832-
// but there is no need to do that, and since we're a bit conservative with our
2833-
// risk threshold it just results in failing to forward payments.
2834-
if (*outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
2835-
break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, chan_update_opt));
2836-
}
2837-
2838-
break None;
2839-
}
2840-
{
2841-
let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
2842-
if let Some(chan_update) = chan_update {
2843-
if code == 0x1000 | 11 || code == 0x1000 | 12 {
2844-
msg.amount_msat.write(&mut res).expect("Writes cannot fail");
2845-
}
2846-
else if code == 0x1000 | 13 {
2847-
msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
2848-
}
2849-
else if code == 0x1000 | 20 {
2850-
// TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
2851-
0u16.write(&mut res).expect("Writes cannot fail");
2852-
}
2853-
(chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
2854-
msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
2855-
chan_update.write(&mut res).expect("Writes cannot fail");
2856-
} else if code & 0x1000 == 0x1000 {
2857-
// If we're trying to return an error that requires a `channel_update` but
2858-
// we're forwarding to a phantom or intercept "channel" (i.e. cannot
2859-
// generate an update), just use the generic "temporary_node_failure"
2860-
// instead.
2861-
code = 0x2000 | 2;
2862-
}
2863-
return_err!(err, code, &res.0[..]);
2864-
}
2865-
}
28662895
}
2867-
2868-
pending_forward_info
28692896
}
28702897

28712898
/// Gets the current [`channel_update`] for the given channel. This first checks if the channel is
@@ -5427,7 +5454,7 @@ where
54275454
//encrypted with the same key. It's not immediately obvious how to usefully exploit that,
54285455
//but we should prevent it anyway.
54295456

5430-
let pending_forward_info = self.decode_update_add_htlc_onion(msg);
5457+
let decoded_hop_res = self.decode_update_add_htlc_onion(msg);
54315458
let per_peer_state = self.per_peer_state.read().unwrap();
54325459
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
54335460
.ok_or_else(|| {
@@ -5439,6 +5466,11 @@ where
54395466
match peer_state.channel_by_id.entry(msg.channel_id) {
54405467
hash_map::Entry::Occupied(mut chan) => {
54415468

5469+
let pending_forward_info = match decoded_hop_res {
5470+
Ok((next_hop, shared_secret)) =>
5471+
self.construct_pending_htlc_status(msg, shared_secret, next_hop),
5472+
Err(e) => PendingHTLCStatus::Fail(e)
5473+
};
54425474
let create_pending_htlc_status = |chan: &Channel<<SP::Target as SignerProvider>::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
54435475
// If the update_add is completely bogus, the call will Err and we will close,
54445476
// but if we've sent a shutdown and they haven't acknowledged it yet, we just

0 commit comments

Comments
 (0)