Skip to content

Commit 8c4d04f

Browse files
edumazetkuba-moo
authored andcommitted
tcp_metrics: annotate data-races around tm->tcpm_vals[]
tm->tcpm_vals[] values can be read or written locklessly. Add needed READ_ONCE()/WRITE_ONCE() to document this, and force use of tcp_metric_get() and tcp_metric_set() Fixes: 51c5d0c ("tcp: Maintain dynamic metrics in local cache.") Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: David Ahern <dsahern@kernel.org> Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
1 parent 285ce11 commit 8c4d04f

File tree

1 file changed

+14
-9
lines changed

1 file changed

+14
-9
lines changed

net/ipv4/tcp_metrics.c

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -63,17 +63,19 @@ static bool tcp_metric_locked(struct tcp_metrics_block *tm,
6363
return READ_ONCE(tm->tcpm_lock) & (1 << idx);
6464
}
6565

66-
static u32 tcp_metric_get(struct tcp_metrics_block *tm,
66+
static u32 tcp_metric_get(const struct tcp_metrics_block *tm,
6767
enum tcp_metric_index idx)
6868
{
69-
return tm->tcpm_vals[idx];
69+
/* Paired with WRITE_ONCE() in tcp_metric_set() */
70+
return READ_ONCE(tm->tcpm_vals[idx]);
7071
}
7172

7273
static void tcp_metric_set(struct tcp_metrics_block *tm,
7374
enum tcp_metric_index idx,
7475
u32 val)
7576
{
76-
tm->tcpm_vals[idx] = val;
77+
/* Paired with READ_ONCE() in tcp_metric_get() */
78+
WRITE_ONCE(tm->tcpm_vals[idx], val);
7779
}
7880

7981
static bool addr_same(const struct inetpeer_addr *a,
@@ -115,13 +117,16 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm,
115117
WRITE_ONCE(tm->tcpm_lock, val);
116118

117119
msval = dst_metric_raw(dst, RTAX_RTT);
118-
tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
120+
tcp_metric_set(tm, TCP_METRIC_RTT, msval * USEC_PER_MSEC);
119121

120122
msval = dst_metric_raw(dst, RTAX_RTTVAR);
121-
tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
122-
tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
123-
tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
124-
tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
123+
tcp_metric_set(tm, TCP_METRIC_RTTVAR, msval * USEC_PER_MSEC);
124+
tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
125+
dst_metric_raw(dst, RTAX_SSTHRESH));
126+
tcp_metric_set(tm, TCP_METRIC_CWND,
127+
dst_metric_raw(dst, RTAX_CWND));
128+
tcp_metric_set(tm, TCP_METRIC_REORDERING,
129+
dst_metric_raw(dst, RTAX_REORDERING));
125130
if (fastopen_clear) {
126131
tm->tcpm_fastopen.mss = 0;
127132
tm->tcpm_fastopen.syn_loss = 0;
@@ -667,7 +672,7 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
667672
if (!nest)
668673
goto nla_put_failure;
669674
for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
670-
u32 val = tm->tcpm_vals[i];
675+
u32 val = tcp_metric_get(tm, i);
671676

672677
if (!val)
673678
continue;

0 commit comments

Comments
 (0)