28
28
#include <linux/sched.h>
29
29
#include <linux/workqueue.h>
30
30
#include <linux/kthread.h>
31
+ #include <linux/completion.h>
31
32
#include <trace/events/xdp.h>
32
33
#include <linux/btf_ids.h>
33
34
@@ -73,6 +74,7 @@ struct bpf_cpu_map_entry {
73
74
struct rcu_head rcu ;
74
75
75
76
struct work_struct kthread_stop_wq ;
77
+ struct completion kthread_running ;
76
78
};
77
79
78
80
struct bpf_cpu_map {
@@ -129,11 +131,17 @@ static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
129
131
* invoked cpu_map_kthread_stop(). Catch any broken behaviour
130
132
* gracefully and warn once.
131
133
*/
132
- struct xdp_frame * xdpf ;
134
+ void * ptr ;
133
135
134
- while ((xdpf = ptr_ring_consume (ring )))
135
- if (WARN_ON_ONCE (xdpf ))
136
- xdp_return_frame (xdpf );
136
+ while ((ptr = ptr_ring_consume (ring ))) {
137
+ WARN_ON_ONCE (1 );
138
+ if (unlikely (__ptr_test_bit (0 , & ptr ))) {
139
+ __ptr_clear_bit (0 , & ptr );
140
+ kfree_skb (ptr );
141
+ continue ;
142
+ }
143
+ xdp_return_frame (ptr );
144
+ }
137
145
}
138
146
139
147
static void put_cpu_map_entry (struct bpf_cpu_map_entry * rcpu )
@@ -153,7 +161,6 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
153
161
static void cpu_map_kthread_stop (struct work_struct * work )
154
162
{
155
163
struct bpf_cpu_map_entry * rcpu ;
156
- int err ;
157
164
158
165
rcpu = container_of (work , struct bpf_cpu_map_entry , kthread_stop_wq );
159
166
@@ -163,14 +170,7 @@ static void cpu_map_kthread_stop(struct work_struct *work)
163
170
rcu_barrier ();
164
171
165
172
/* kthread_stop will wake_up_process and wait for it to complete */
166
- err = kthread_stop (rcpu -> kthread );
167
- if (err ) {
168
- /* kthread_stop may be called before cpu_map_kthread_run
169
- * is executed, so we need to release the memory related
170
- * to rcpu.
171
- */
172
- put_cpu_map_entry (rcpu );
173
- }
173
+ kthread_stop (rcpu -> kthread );
174
174
}
175
175
176
176
static void cpu_map_bpf_prog_run_skb (struct bpf_cpu_map_entry * rcpu ,
@@ -298,11 +298,11 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
298
298
return nframes ;
299
299
}
300
300
301
-
302
301
static int cpu_map_kthread_run (void * data )
303
302
{
304
303
struct bpf_cpu_map_entry * rcpu = data ;
305
304
305
+ complete (& rcpu -> kthread_running );
306
306
set_current_state (TASK_INTERRUPTIBLE );
307
307
308
308
/* When kthread gives stop order, then rcpu have been disconnected
@@ -467,6 +467,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
467
467
goto free_ptr_ring ;
468
468
469
469
/* Setup kthread */
470
+ init_completion (& rcpu -> kthread_running );
470
471
rcpu -> kthread = kthread_create_on_node (cpu_map_kthread_run , rcpu , numa ,
471
472
"cpumap/%d/map:%d" , cpu ,
472
473
map -> id );
@@ -480,6 +481,12 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
480
481
kthread_bind (rcpu -> kthread , cpu );
481
482
wake_up_process (rcpu -> kthread );
482
483
484
+ /* Make sure kthread has been running, so kthread_stop() will not
485
+ * stop the kthread prematurely and all pending frames or skbs
486
+ * will be handled by the kthread before kthread_stop() returns.
487
+ */
488
+ wait_for_completion (& rcpu -> kthread_running );
489
+
483
490
return rcpu ;
484
491
485
492
free_prog :
0 commit comments