Skip to content

Commit 4c9fbff

Browse files
author
Martin KaFai Lau
committed
Merge branch 'Two fixes for cpu-map'
Hou Tao says: ==================== The patchset fixes two reported warning in cpu-map when running xdp_redirect_cpu and some RT threads concurrently. Patch #1 fixes the warning in __cpu_map_ring_cleanup() when kthread is stopped prematurely. Patch #2 fixes the warning in __xdp_return() when there are pending skbs in ptr_ring. Please see individual patches for more details. And comments are always welcome. ==================== Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
2 parents bcc29b7 + 7c62b75 commit 4c9fbff

File tree

1 file changed

+21
-14
lines changed

1 file changed

+21
-14
lines changed

kernel/bpf/cpumap.c

Lines changed: 21 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
#include <linux/sched.h>
2929
#include <linux/workqueue.h>
3030
#include <linux/kthread.h>
31+
#include <linux/completion.h>
3132
#include <trace/events/xdp.h>
3233
#include <linux/btf_ids.h>
3334

@@ -73,6 +74,7 @@ struct bpf_cpu_map_entry {
7374
struct rcu_head rcu;
7475

7576
struct work_struct kthread_stop_wq;
77+
struct completion kthread_running;
7678
};
7779

7880
struct bpf_cpu_map {
@@ -129,11 +131,17 @@ static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
129131
* invoked cpu_map_kthread_stop(). Catch any broken behaviour
130132
* gracefully and warn once.
131133
*/
132-
struct xdp_frame *xdpf;
134+
void *ptr;
133135

134-
while ((xdpf = ptr_ring_consume(ring)))
135-
if (WARN_ON_ONCE(xdpf))
136-
xdp_return_frame(xdpf);
136+
while ((ptr = ptr_ring_consume(ring))) {
137+
WARN_ON_ONCE(1);
138+
if (unlikely(__ptr_test_bit(0, &ptr))) {
139+
__ptr_clear_bit(0, &ptr);
140+
kfree_skb(ptr);
141+
continue;
142+
}
143+
xdp_return_frame(ptr);
144+
}
137145
}
138146

139147
static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
@@ -153,7 +161,6 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
153161
static void cpu_map_kthread_stop(struct work_struct *work)
154162
{
155163
struct bpf_cpu_map_entry *rcpu;
156-
int err;
157164

158165
rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
159166

@@ -163,14 +170,7 @@ static void cpu_map_kthread_stop(struct work_struct *work)
163170
rcu_barrier();
164171

165172
/* kthread_stop will wake_up_process and wait for it to complete */
166-
err = kthread_stop(rcpu->kthread);
167-
if (err) {
168-
/* kthread_stop may be called before cpu_map_kthread_run
169-
* is executed, so we need to release the memory related
170-
* to rcpu.
171-
*/
172-
put_cpu_map_entry(rcpu);
173-
}
173+
kthread_stop(rcpu->kthread);
174174
}
175175

176176
static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
@@ -298,11 +298,11 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
298298
return nframes;
299299
}
300300

301-
302301
static int cpu_map_kthread_run(void *data)
303302
{
304303
struct bpf_cpu_map_entry *rcpu = data;
305304

305+
complete(&rcpu->kthread_running);
306306
set_current_state(TASK_INTERRUPTIBLE);
307307

308308
/* When kthread gives stop order, then rcpu have been disconnected
@@ -467,6 +467,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
467467
goto free_ptr_ring;
468468

469469
/* Setup kthread */
470+
init_completion(&rcpu->kthread_running);
470471
rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
471472
"cpumap/%d/map:%d", cpu,
472473
map->id);
@@ -480,6 +481,12 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
480481
kthread_bind(rcpu->kthread, cpu);
481482
wake_up_process(rcpu->kthread);
482483

484+
/* Make sure kthread has been running, so kthread_stop() will not
485+
* stop the kthread prematurely and all pending frames or skbs
486+
* will be handled by the kthread before kthread_stop() returns.
487+
*/
488+
wait_for_completion(&rcpu->kthread_running);
489+
483490
return rcpu;
484491

485492
free_prog:

0 commit comments

Comments
 (0)