Skip to content

Commit 3932f22

Browse files
committed
Martin KaFai Lau says: ==================== pull-request: bpf 2023-08-03 We've added 5 non-merge commits during the last 7 day(s) which contain a total of 3 files changed, 37 insertions(+), 20 deletions(-). The main changes are: 1) Disable preemption in perf_event_output helpers code, from Jiri Olsa 2) Add length check for SK_DIAG_BPF_STORAGE_REQ_MAP_FD parsing, from Lin Ma 3) Multiple warning splat fixes in cpumap from Hou Tao * tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: bpf, cpumap: Handle skb as well when clean up ptr_ring bpf, cpumap: Make sure kthread is running before map update returns bpf: Add length check for SK_DIAG_BPF_STORAGE_REQ_MAP_FD parsing bpf: Disable preemption in bpf_event_output bpf: Disable preemption in bpf_perf_event_output ==================== Link: https://lore.kernel.org/r/20230803181429.994607-1-martin.lau@linux.dev Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2 parents 0d48a84 + 4c9fbff commit 3932f22

File tree

3 files changed

+37
-20
lines changed

3 files changed

+37
-20
lines changed

kernel/bpf/cpumap.c

Lines changed: 21 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
#include <linux/sched.h>
2929
#include <linux/workqueue.h>
3030
#include <linux/kthread.h>
31+
#include <linux/completion.h>
3132
#include <trace/events/xdp.h>
3233
#include <linux/btf_ids.h>
3334

@@ -73,6 +74,7 @@ struct bpf_cpu_map_entry {
7374
struct rcu_head rcu;
7475

7576
struct work_struct kthread_stop_wq;
77+
struct completion kthread_running;
7678
};
7779

7880
struct bpf_cpu_map {
@@ -129,11 +131,17 @@ static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
129131
* invoked cpu_map_kthread_stop(). Catch any broken behaviour
130132
* gracefully and warn once.
131133
*/
132-
struct xdp_frame *xdpf;
134+
void *ptr;
133135

134-
while ((xdpf = ptr_ring_consume(ring)))
135-
if (WARN_ON_ONCE(xdpf))
136-
xdp_return_frame(xdpf);
136+
while ((ptr = ptr_ring_consume(ring))) {
137+
WARN_ON_ONCE(1);
138+
if (unlikely(__ptr_test_bit(0, &ptr))) {
139+
__ptr_clear_bit(0, &ptr);
140+
kfree_skb(ptr);
141+
continue;
142+
}
143+
xdp_return_frame(ptr);
144+
}
137145
}
138146

139147
static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
@@ -153,7 +161,6 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
153161
static void cpu_map_kthread_stop(struct work_struct *work)
154162
{
155163
struct bpf_cpu_map_entry *rcpu;
156-
int err;
157164

158165
rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
159166

@@ -163,14 +170,7 @@ static void cpu_map_kthread_stop(struct work_struct *work)
163170
rcu_barrier();
164171

165172
/* kthread_stop will wake_up_process and wait for it to complete */
166-
err = kthread_stop(rcpu->kthread);
167-
if (err) {
168-
/* kthread_stop may be called before cpu_map_kthread_run
169-
* is executed, so we need to release the memory related
170-
* to rcpu.
171-
*/
172-
put_cpu_map_entry(rcpu);
173-
}
173+
kthread_stop(rcpu->kthread);
174174
}
175175

176176
static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
@@ -298,11 +298,11 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
298298
return nframes;
299299
}
300300

301-
302301
static int cpu_map_kthread_run(void *data)
303302
{
304303
struct bpf_cpu_map_entry *rcpu = data;
305304

305+
complete(&rcpu->kthread_running);
306306
set_current_state(TASK_INTERRUPTIBLE);
307307

308308
/* When kthread gives stop order, then rcpu have been disconnected
@@ -467,6 +467,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
467467
goto free_ptr_ring;
468468

469469
/* Setup kthread */
470+
init_completion(&rcpu->kthread_running);
470471
rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
471472
"cpumap/%d/map:%d", cpu,
472473
map->id);
@@ -480,6 +481,12 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
480481
kthread_bind(rcpu->kthread, cpu);
481482
wake_up_process(rcpu->kthread);
482483

484+
/* Make sure kthread has been running, so kthread_stop() will not
485+
* stop the kthread prematurely and all pending frames or skbs
486+
* will be handled by the kthread before kthread_stop() returns.
487+
*/
488+
wait_for_completion(&rcpu->kthread_running);
489+
483490
return rcpu;
484491

485492
free_prog:

kernel/trace/bpf_trace.c

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -661,16 +661,19 @@ static DEFINE_PER_CPU(int, bpf_trace_nest_level);
661661
BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
662662
u64, flags, void *, data, u64, size)
663663
{
664-
struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
665-
int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
664+
struct bpf_trace_sample_data *sds;
666665
struct perf_raw_record raw = {
667666
.frag = {
668667
.size = size,
669668
.data = data,
670669
},
671670
};
672671
struct perf_sample_data *sd;
673-
int err;
672+
int nest_level, err;
673+
674+
preempt_disable();
675+
sds = this_cpu_ptr(&bpf_trace_sds);
676+
nest_level = this_cpu_inc_return(bpf_trace_nest_level);
674677

675678
if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
676679
err = -EBUSY;
@@ -688,9 +691,9 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
688691
perf_sample_save_raw_data(sd, &raw);
689692

690693
err = __bpf_perf_event_output(regs, map, flags, sd);
691-
692694
out:
693695
this_cpu_dec(bpf_trace_nest_level);
696+
preempt_enable();
694697
return err;
695698
}
696699

@@ -715,7 +718,6 @@ static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
715718
u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
716719
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
717720
{
718-
int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
719721
struct perf_raw_frag frag = {
720722
.copy = ctx_copy,
721723
.size = ctx_size,
@@ -732,8 +734,12 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
732734
};
733735
struct perf_sample_data *sd;
734736
struct pt_regs *regs;
737+
int nest_level;
735738
u64 ret;
736739

740+
preempt_disable();
741+
nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
742+
737743
if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
738744
ret = -EBUSY;
739745
goto out;
@@ -748,6 +754,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
748754
ret = __bpf_perf_event_output(regs, map, flags, sd);
749755
out:
750756
this_cpu_dec(bpf_event_output_nest_level);
757+
preempt_enable();
751758
return ret;
752759
}
753760

net/core/bpf_sk_storage.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -496,8 +496,11 @@ bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
496496
return ERR_PTR(-EPERM);
497497

498498
nla_for_each_nested(nla, nla_stgs, rem) {
499-
if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
499+
if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD) {
500+
if (nla_len(nla) != sizeof(u32))
501+
return ERR_PTR(-EINVAL);
500502
nr_maps++;
503+
}
501504
}
502505

503506
diag = kzalloc(struct_size(diag, maps, nr_maps), GFP_KERNEL);

0 commit comments

Comments
 (0)