Skip to content

Commit f278b6d

Browse files
edumazetkuba-moo
authored andcommitted
Revert "tcp: avoid atomic operations on sk->sk_rmem_alloc"
This reverts commit 0de2a5c. I forgot that a TCP socket could receive messages in its error queue. sock_queue_err_skb() can be called without socket lock being held, and changes sk->sk_rmem_alloc. The fact that skbs in error queue are limited by sk->sk_rcvbuf means that error messages can be dropped if socket receive queues are full, which is an orthogonal issue. In future kernels, we could use a separate sk->sk_error_mem_alloc counter specifically for the error queue. Fixes: 0de2a5c ("tcp: avoid atomic operations on sk->sk_rmem_alloc") Signed-off-by: Eric Dumazet <edumazet@google.com> Link: https://patch.msgid.link/20250331075946.31960-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
1 parent dd07df9 commit f278b6d

File tree

4 files changed

+6
-35
lines changed

4 files changed

+6
-35
lines changed

include/net/tcp.h

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -779,7 +779,6 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
779779

780780
/* tcp.c */
781781
void tcp_get_info(struct sock *, struct tcp_info *);
782-
void tcp_sock_rfree(struct sk_buff *skb);
783782

784783
/* Read 'sendfile()'-style from a TCP socket */
785784
int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
@@ -2899,18 +2898,4 @@ enum skb_drop_reason tcp_inbound_hash(struct sock *sk,
28992898
const void *saddr, const void *daddr,
29002899
int family, int dif, int sdif);
29012900

2902-
/* version of skb_set_owner_r() avoiding one atomic_add() */
2903-
static inline void tcp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
2904-
{
2905-
skb_orphan(skb);
2906-
skb->sk = sk;
2907-
skb->destructor = tcp_sock_rfree;
2908-
2909-
sock_owned_by_me(sk);
2910-
atomic_set(&sk->sk_rmem_alloc,
2911-
atomic_read(&sk->sk_rmem_alloc) + skb->truesize);
2912-
2913-
sk_forward_alloc_add(sk, -skb->truesize);
2914-
}
2915-
29162901
#endif /* _TCP_H */

net/ipv4/tcp.c

Lines changed: 2 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1525,25 +1525,11 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
15251525
__tcp_cleanup_rbuf(sk, copied);
15261526
}
15271527

1528-
/* private version of sock_rfree() avoiding one atomic_sub() */
1529-
void tcp_sock_rfree(struct sk_buff *skb)
1530-
{
1531-
struct sock *sk = skb->sk;
1532-
unsigned int len = skb->truesize;
1533-
1534-
sock_owned_by_me(sk);
1535-
atomic_set(&sk->sk_rmem_alloc,
1536-
atomic_read(&sk->sk_rmem_alloc) - len);
1537-
1538-
sk_forward_alloc_add(sk, len);
1539-
sk_mem_reclaim(sk);
1540-
}
1541-
15421528
static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
15431529
{
15441530
__skb_unlink(skb, &sk->sk_receive_queue);
1545-
if (likely(skb->destructor == tcp_sock_rfree)) {
1546-
tcp_sock_rfree(skb);
1531+
if (likely(skb->destructor == sock_rfree)) {
1532+
sock_rfree(skb);
15471533
skb->destructor = NULL;
15481534
skb->sk = NULL;
15491535
return skb_attempt_defer_free(skb);

net/ipv4/tcp_fastopen.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
189189
tcp_segs_in(tp, skb);
190190
__skb_pull(skb, tcp_hdrlen(skb));
191191
sk_forced_mem_schedule(sk, skb->truesize);
192-
tcp_skb_set_owner_r(skb, sk);
192+
skb_set_owner_r(skb, sk);
193193

194194
TCP_SKB_CB(skb)->seq++;
195195
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;

net/ipv4/tcp_input.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5171,7 +5171,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
51715171
if (tcp_is_sack(tp))
51725172
tcp_grow_window(sk, skb, false);
51735173
skb_condense(skb);
5174-
tcp_skb_set_owner_r(skb, sk);
5174+
skb_set_owner_r(skb, sk);
51755175
}
51765176
}
51775177

@@ -5187,7 +5187,7 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
51875187
tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
51885188
if (!eaten) {
51895189
tcp_add_receive_queue(sk, skb);
5190-
tcp_skb_set_owner_r(skb, sk);
5190+
skb_set_owner_r(skb, sk);
51915191
}
51925192
return eaten;
51935193
}
@@ -5504,7 +5504,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
55045504
__skb_queue_before(list, skb, nskb);
55055505
else
55065506
__skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */
5507-
tcp_skb_set_owner_r(nskb, sk);
5507+
skb_set_owner_r(nskb, sk);
55085508
mptcp_skb_ext_move(nskb, skb);
55095509

55105510
/* Copy data, releasing collapsed skbs. */

0 commit comments

Comments
 (0)