Skip to content

Commit 4a18391

Browse files
Move PendingHTLCStatus construction inside channel lock
We need the channel lock for constructing a pending HTLC's status because we need to know if the channel accepts underpaying HTLCs in upcoming commits.
1 parent ae9e96e commit 4a18391

File tree

1 file changed

+178
-146
lines changed

1 file changed

+178
-146
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 178 additions & 146 deletions
Original file line numberDiff line numberDiff line change
@@ -2619,12 +2619,14 @@ where
26192619
})
26202620
}
26212621

2622-
fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> PendingHTLCStatus {
2622+
fn decode_update_add_htlc_onion(
2623+
&self, msg: &msgs::UpdateAddHTLC
2624+
) -> Result<(onion_utils::Hop, [u8; 32]), HTLCFailureMsg> {
26232625
macro_rules! return_malformed_err {
26242626
($msg: expr, $err_code: expr) => {
26252627
{
26262628
log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
2627-
return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
2629+
return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
26282630
channel_id: msg.channel_id,
26292631
htlc_id: msg.htlc_id,
26302632
sha256_of_onion: Sha256::hash(&msg.onion_routing_packet.hop_data).into_inner(),
@@ -2655,7 +2657,7 @@ where
26552657
($msg: expr, $err_code: expr, $data: expr) => {
26562658
{
26572659
log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
2658-
return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
2660+
return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
26592661
channel_id: msg.channel_id,
26602662
htlc_id: msg.htlc_id,
26612663
reason: HTLCFailReason::reason($err_code, $data.to_vec())
@@ -2674,8 +2676,174 @@ where
26742676
return_err!(err_msg, err_code, &[0; 0]);
26752677
},
26762678
};
2679+
let (outgoing_scid, outgoing_amt_msat, outgoing_cltv_value) = match next_hop {
2680+
onion_utils::Hop::Forward {
2681+
next_hop_data: msgs::OnionHopData {
2682+
format: msgs::OnionHopDataFormat::NonFinalNode { short_channel_id }, amt_to_forward,
2683+
outgoing_cltv_value,
2684+
}, ..
2685+
} => (short_channel_id, amt_to_forward, outgoing_cltv_value),
2686+
// We'll do receive checks in [`Self::construct_pending_htlc_info`] so we have access to the
2687+
// inbound channel's state.
2688+
onion_utils::Hop::Receive { .. } => return Ok((next_hop, shared_secret)),
2689+
_ => {
2690+
return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0; 0]);
2691+
}
2692+
};
26772693

2678-
let pending_forward_info = match next_hop {
2694+
// Perform outbound checks here instead of in [`Self::construct_pending_htlc_info`] because we
2695+
// can't hold the outbound peer state lock at the same time as the inbound peer state lock.
2696+
if let Some((err, mut code, chan_update)) = loop {
2697+
let id_option = self.short_to_chan_info.read().unwrap().get(&outgoing_scid).cloned();
2698+
let forwarding_chan_info_opt = match id_option {
2699+
None => { // unknown_next_peer
2700+
// Note that this is likely a timing oracle for detecting whether an scid is a
2701+
// phantom or an intercept.
2702+
if (self.default_configuration.accept_intercept_htlcs &&
2703+
fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)) ||
2704+
fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)
2705+
{
2706+
None
2707+
} else {
2708+
break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
2709+
}
2710+
},
2711+
Some((cp_id, id)) => Some((cp_id.clone(), id.clone())),
2712+
};
2713+
let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt {
2714+
let per_peer_state = self.per_peer_state.read().unwrap();
2715+
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
2716+
if peer_state_mutex_opt.is_none() {
2717+
break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
2718+
}
2719+
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
2720+
let peer_state = &mut *peer_state_lock;
2721+
let chan = match peer_state.channel_by_id.get_mut(&forwarding_id) {
2722+
None => {
2723+
// Channel was removed. The short_to_chan_info and channel_by_id maps
2724+
// have no consistency guarantees.
2725+
break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
2726+
},
2727+
Some(chan) => chan
2728+
};
2729+
if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
2730+
// Note that the behavior here should be identical to the above block - we
2731+
// should NOT reveal the existence or non-existence of a private channel if
2732+
// we don't allow forwards outbound over them.
2733+
break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
2734+
}
2735+
if chan.context.get_channel_type().supports_scid_privacy() && outgoing_scid != chan.context.outbound_scid_alias() {
2736+
// `option_scid_alias` (referred to in LDK as `scid_privacy`) means
2737+
// "refuse to forward unless the SCID alias was used", so we pretend
2738+
// we don't have the channel here.
2739+
break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
2740+
}
2741+
let chan_update_opt = self.get_channel_update_for_onion(outgoing_scid, chan).ok();
2742+
2743+
// Note that we could technically not return an error yet here and just hope
2744+
// that the connection is reestablished or monitor updated by the time we get
2745+
// around to doing the actual forward, but better to fail early if we can and
2746+
// hopefully an attacker trying to path-trace payments cannot make this occur
2747+
// on a small/per-node/per-channel scale.
2748+
if !chan.context.is_live() { // channel_disabled
2749+
// If the channel_update we're going to return is disabled (i.e. the
2750+
// peer has been disabled for some time), return `channel_disabled`,
2751+
// otherwise return `temporary_channel_failure`.
2752+
if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
2753+
break Some(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
2754+
} else {
2755+
break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
2756+
}
2757+
}
2758+
if outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
2759+
break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
2760+
}
2761+
if let Err((err, code)) = chan.htlc_satisfies_config(&msg, outgoing_amt_msat, outgoing_cltv_value) {
2762+
break Some((err, code, chan_update_opt));
2763+
}
2764+
chan_update_opt
2765+
} else {
2766+
if (msg.cltv_expiry as u64) < (outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 {
2767+
// We really should set `incorrect_cltv_expiry` here but as we're not
2768+
// forwarding over a real channel we can't generate a channel_update
2769+
// for it. Instead we just return a generic temporary_node_failure.
2770+
break Some((
2771+
"Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
2772+
0x2000 | 2, None,
2773+
));
2774+
}
2775+
None
2776+
};
2777+
2778+
let cur_height = self.best_block.read().unwrap().height() + 1;
2779+
// Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
2780+
// but we want to be robust wrt to counterparty packet sanitization (see
2781+
// HTLC_FAIL_BACK_BUFFER rationale).
2782+
if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
2783+
break Some(("CLTV expiry is too close", 0x1000 | 14, chan_update_opt));
2784+
}
2785+
if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
2786+
break Some(("CLTV expiry is too far in the future", 21, None));
2787+
}
2788+
// If the HTLC expires ~now, don't bother trying to forward it to our
2789+
// counterparty. They should fail it anyway, but we don't want to bother with
2790+
// the round-trips or risk them deciding they definitely want the HTLC and
2791+
// force-closing to ensure they get it if we're offline.
2792+
// We previously had a much more aggressive check here which tried to ensure
2793+
// our counterparty receives an HTLC which has *our* risk threshold met on it,
2794+
// but there is no need to do that, and since we're a bit conservative with our
2795+
// risk threshold it just results in failing to forward payments.
2796+
if (outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
2797+
break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, chan_update_opt));
2798+
}
2799+
2800+
break None;
2801+
}
2802+
{
2803+
let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
2804+
if let Some(chan_update) = chan_update {
2805+
if code == 0x1000 | 11 || code == 0x1000 | 12 {
2806+
msg.amount_msat.write(&mut res).expect("Writes cannot fail");
2807+
}
2808+
else if code == 0x1000 | 13 {
2809+
msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
2810+
}
2811+
else if code == 0x1000 | 20 {
2812+
// TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
2813+
0u16.write(&mut res).expect("Writes cannot fail");
2814+
}
2815+
(chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
2816+
msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
2817+
chan_update.write(&mut res).expect("Writes cannot fail");
2818+
} else if code & 0x1000 == 0x1000 {
2819+
// If we're trying to return an error that requires a `channel_update` but
2820+
// we're forwarding to a phantom or intercept "channel" (i.e. cannot
2821+
// generate an update), just use the generic "temporary_node_failure"
2822+
// instead.
2823+
code = 0x2000 | 2;
2824+
}
2825+
return_err!(err, code, &res.0[..]);
2826+
}
2827+
Ok((next_hop, shared_secret))
2828+
}
2829+
2830+
fn construct_pending_htlc_status<'a>(
2831+
&self, msg: &msgs::UpdateAddHTLC, shared_secret: [u8; 32], decoded_hop: onion_utils::Hop,
2832+
) -> PendingHTLCStatus {
2833+
macro_rules! return_err {
2834+
($msg: expr, $err_code: expr, $data: expr) => {
2835+
{
2836+
log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
2837+
return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
2838+
channel_id: msg.channel_id,
2839+
htlc_id: msg.htlc_id,
2840+
reason: HTLCFailReason::reason($err_code, $data.to_vec())
2841+
.get_encrypted_failure_packet(&shared_secret, &None),
2842+
}));
2843+
}
2844+
}
2845+
}
2846+
match decoded_hop {
26792847
onion_utils::Hop::Receive(next_hop_data) => {
26802848
// OUR PAYMENT!
26812849
match self.construct_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry, None) {
@@ -2717,148 +2885,7 @@ where
27172885
outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
27182886
})
27192887
}
2720-
};
2721-
2722-
if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref outgoing_amt_msat, ref outgoing_cltv_value, .. }) = &pending_forward_info {
2723-
// If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel
2724-
// with a short_channel_id of 0. This is important as various things later assume
2725-
// short_channel_id is non-0 in any ::Forward.
2726-
if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
2727-
if let Some((err, mut code, chan_update)) = loop {
2728-
let id_option = self.short_to_chan_info.read().unwrap().get(short_channel_id).cloned();
2729-
let forwarding_chan_info_opt = match id_option {
2730-
None => { // unknown_next_peer
2731-
// Note that this is likely a timing oracle for detecting whether an scid is a
2732-
// phantom or an intercept.
2733-
if (self.default_configuration.accept_intercept_htlcs &&
2734-
fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash)) ||
2735-
fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash)
2736-
{
2737-
None
2738-
} else {
2739-
break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
2740-
}
2741-
},
2742-
Some((cp_id, id)) => Some((cp_id.clone(), id.clone())),
2743-
};
2744-
let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt {
2745-
let per_peer_state = self.per_peer_state.read().unwrap();
2746-
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
2747-
if peer_state_mutex_opt.is_none() {
2748-
break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
2749-
}
2750-
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
2751-
let peer_state = &mut *peer_state_lock;
2752-
let chan = match peer_state.channel_by_id.get_mut(&forwarding_id) {
2753-
None => {
2754-
// Channel was removed. The short_to_chan_info and channel_by_id maps
2755-
// have no consistency guarantees.
2756-
break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
2757-
},
2758-
Some(chan) => chan
2759-
};
2760-
if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
2761-
// Note that the behavior here should be identical to the above block - we
2762-
// should NOT reveal the existence or non-existence of a private channel if
2763-
// we don't allow forwards outbound over them.
2764-
break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
2765-
}
2766-
if chan.context.get_channel_type().supports_scid_privacy() && *short_channel_id != chan.context.outbound_scid_alias() {
2767-
// `option_scid_alias` (referred to in LDK as `scid_privacy`) means
2768-
// "refuse to forward unless the SCID alias was used", so we pretend
2769-
// we don't have the channel here.
2770-
break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
2771-
}
2772-
let chan_update_opt = self.get_channel_update_for_onion(*short_channel_id, chan).ok();
2773-
2774-
// Note that we could technically not return an error yet here and just hope
2775-
// that the connection is reestablished or monitor updated by the time we get
2776-
// around to doing the actual forward, but better to fail early if we can and
2777-
// hopefully an attacker trying to path-trace payments cannot make this occur
2778-
// on a small/per-node/per-channel scale.
2779-
if !chan.context.is_live() { // channel_disabled
2780-
// If the channel_update we're going to return is disabled (i.e. the
2781-
// peer has been disabled for some time), return `channel_disabled`,
2782-
// otherwise return `temporary_channel_failure`.
2783-
if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
2784-
break Some(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
2785-
} else {
2786-
break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
2787-
}
2788-
}
2789-
if *outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
2790-
break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
2791-
}
2792-
if let Err((err, code)) = chan.htlc_satisfies_config(&msg, *outgoing_amt_msat, *outgoing_cltv_value) {
2793-
break Some((err, code, chan_update_opt));
2794-
}
2795-
chan_update_opt
2796-
} else {
2797-
if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 {
2798-
// We really should set `incorrect_cltv_expiry` here but as we're not
2799-
// forwarding over a real channel we can't generate a channel_update
2800-
// for it. Instead we just return a generic temporary_node_failure.
2801-
break Some((
2802-
"Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
2803-
0x2000 | 2, None,
2804-
));
2805-
}
2806-
None
2807-
};
2808-
2809-
let cur_height = self.best_block.read().unwrap().height() + 1;
2810-
// Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
2811-
// but we want to be robust wrt to counterparty packet sanitization (see
2812-
// HTLC_FAIL_BACK_BUFFER rationale).
2813-
if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
2814-
break Some(("CLTV expiry is too close", 0x1000 | 14, chan_update_opt));
2815-
}
2816-
if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
2817-
break Some(("CLTV expiry is too far in the future", 21, None));
2818-
}
2819-
// If the HTLC expires ~now, don't bother trying to forward it to our
2820-
// counterparty. They should fail it anyway, but we don't want to bother with
2821-
// the round-trips or risk them deciding they definitely want the HTLC and
2822-
// force-closing to ensure they get it if we're offline.
2823-
// We previously had a much more aggressive check here which tried to ensure
2824-
// our counterparty receives an HTLC which has *our* risk threshold met on it,
2825-
// but there is no need to do that, and since we're a bit conservative with our
2826-
// risk threshold it just results in failing to forward payments.
2827-
if (*outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
2828-
break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, chan_update_opt));
2829-
}
2830-
2831-
break None;
2832-
}
2833-
{
2834-
let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
2835-
if let Some(chan_update) = chan_update {
2836-
if code == 0x1000 | 11 || code == 0x1000 | 12 {
2837-
msg.amount_msat.write(&mut res).expect("Writes cannot fail");
2838-
}
2839-
else if code == 0x1000 | 13 {
2840-
msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
2841-
}
2842-
else if code == 0x1000 | 20 {
2843-
// TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
2844-
0u16.write(&mut res).expect("Writes cannot fail");
2845-
}
2846-
(chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
2847-
msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
2848-
chan_update.write(&mut res).expect("Writes cannot fail");
2849-
} else if code & 0x1000 == 0x1000 {
2850-
// If we're trying to return an error that requires a `channel_update` but
2851-
// we're forwarding to a phantom or intercept "channel" (i.e. cannot
2852-
// generate an update), just use the generic "temporary_node_failure"
2853-
// instead.
2854-
code = 0x2000 | 2;
2855-
}
2856-
return_err!(err, code, &res.0[..]);
2857-
}
2858-
}
28592888
}
2860-
2861-
pending_forward_info
28622889
}
28632890

28642891
/// Gets the current [`channel_update`] for the given channel. This first checks if the channel is
@@ -5358,7 +5385,7 @@ where
53585385
//encrypted with the same key. It's not immediately obvious how to usefully exploit that,
53595386
//but we should prevent it anyway.
53605387

5361-
let pending_forward_info = self.decode_update_add_htlc_onion(msg);
5388+
let decoded_hop_res = self.decode_update_add_htlc_onion(msg);
53625389
let per_peer_state = self.per_peer_state.read().unwrap();
53635390
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
53645391
.ok_or_else(|| {
@@ -5370,6 +5397,11 @@ where
53705397
match peer_state.channel_by_id.entry(msg.channel_id) {
53715398
hash_map::Entry::Occupied(mut chan) => {
53725399

5400+
let pending_forward_info = match decoded_hop_res {
5401+
Ok((next_hop, shared_secret)) =>
5402+
self.construct_pending_htlc_status(msg, shared_secret, next_hop),
5403+
Err(e) => PendingHTLCStatus::Fail(e)
5404+
};
53735405
let create_pending_htlc_status = |chan: &Channel<<SP::Target as SignerProvider>::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
53745406
// If the update_add is completely bogus, the call will Err and we will close,
53755407
// but if we've sent a shutdown and they haven't acknowledged it yet, we just

0 commit comments

Comments
 (0)