Skip to content

Commit f61e663

Browse files
Paolo Abenigregkh
authored andcommitted
mptcp: don't always assume copied data in mptcp_cleanup_rbuf()
commit 551844f upstream. Under some corner cases the MPTCP protocol can end-up invoking mptcp_cleanup_rbuf() when no data has been copied, but such helper assumes the opposite condition. Explicitly drop such assumption and performs the costly call only when strictly needed - before releasing the msk socket lock. Fixes: fd89767 ("mptcp: be careful on MPTCP-level ack.") Cc: stable@vger.kernel.org Signed-off-by: Paolo Abeni <pabeni@redhat.com> Reviewed-by: Mat Martineau <martineau@kernel.org> Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org> Link: https://patch.msgid.link/20241230-net-mptcp-rbuf-fixes-v1-2-8608af434ceb@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 27c843e commit f61e663

File tree

1 file changed

+9
-9
lines changed

1 file changed

+9
-9
lines changed

net/mptcp/protocol.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -528,13 +528,13 @@ static void mptcp_send_ack(struct mptcp_sock *msk)
528528
mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow));
529529
}
530530

531-
static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
531+
static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied)
532532
{
533533
bool slow;
534534

535535
slow = lock_sock_fast(ssk);
536536
if (tcp_can_send_ack(ssk))
537-
tcp_cleanup_rbuf(ssk, 1);
537+
tcp_cleanup_rbuf(ssk, copied);
538538
unlock_sock_fast(ssk, slow);
539539
}
540540

@@ -551,22 +551,22 @@ static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
551551
(ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED)));
552552
}
553553

554-
static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
554+
static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied)
555555
{
556556
int old_space = READ_ONCE(msk->old_wspace);
557557
struct mptcp_subflow_context *subflow;
558558
struct sock *sk = (struct sock *)msk;
559559
int space = __mptcp_space(sk);
560560
bool cleanup, rx_empty;
561561

562-
cleanup = (space > 0) && (space >= (old_space << 1));
563-
rx_empty = !__mptcp_rmem(sk);
562+
cleanup = (space > 0) && (space >= (old_space << 1)) && copied;
563+
rx_empty = !__mptcp_rmem(sk) && copied;
564564

565565
mptcp_for_each_subflow(msk, subflow) {
566566
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
567567

568568
if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
569-
mptcp_subflow_cleanup_rbuf(ssk);
569+
mptcp_subflow_cleanup_rbuf(ssk, copied);
570570
}
571571
}
572572

@@ -2183,9 +2183,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
21832183

21842184
copied += bytes_read;
21852185

2186-
/* be sure to advertise window change */
2187-
mptcp_cleanup_rbuf(msk);
2188-
21892186
if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk))
21902187
continue;
21912188

@@ -2234,13 +2231,16 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
22342231
}
22352232

22362233
pr_debug("block timeout %ld\n", timeo);
2234+
mptcp_cleanup_rbuf(msk, copied);
22372235
err = sk_wait_data(sk, &timeo, NULL);
22382236
if (err < 0) {
22392237
err = copied ? : err;
22402238
goto out_err;
22412239
}
22422240
}
22432241

2242+
mptcp_cleanup_rbuf(msk, copied);
2243+
22442244
out_err:
22452245
if (cmsg_flags && copied >= 0) {
22462246
if (cmsg_flags & MPTCP_CMSG_TS)

0 commit comments

Comments
 (0)