Skip to content

Commit 35f301d

Browse files
committed
Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Pull bpf fixes from Daniel Borkmann: - Fix a bug in the BPF verifier to track changes to packet data property for global functions (Eduard Zingerman) - Fix a theoretical BPF prog_array use-after-free in RCU handling of __uprobe_perf_func (Jann Horn) - Fix BPF tracing to have an explicit list of tracepoints and their arguments which need to be annotated as PTR_MAYBE_NULL (Kumar Kartikeya Dwivedi) - Fix a logic bug in the bpf_remove_insns code where a potential error would have been wrongly propagated (Anton Protopopov) - Avoid deadlock scenarios caused by nested kprobe and fentry BPF programs (Priya Bala Govindasamy) - Fix a bug in BPF verifier which was missing a size check for BTF-based context access (Kumar Kartikeya Dwivedi) - Fix a crash found by syzbot through an invalid BPF prog_array access in perf_event_detach_bpf_prog (Jiri Olsa) - Fix several BPF sockmap bugs including a race causing a refcount imbalance upon element replace (Michal Luczaj) - Fix a use-after-free from mismatching BPF program/attachment RCU flavors (Jann Horn) * tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: (23 commits) bpf: Avoid deadlock caused by nested kprobe and fentry bpf programs selftests/bpf: Add tests for raw_tp NULL args bpf: Augment raw_tp arguments with PTR_MAYBE_NULL bpf: Revert "bpf: Mark raw_tp arguments with PTR_MAYBE_NULL" selftests/bpf: Add test for narrow ctx load for pointer args bpf: Check size for BTF-based ctx access of pointer members selftests/bpf: extend changes_pkt_data with cases w/o subprograms bpf: fix null dereference when computing changes_pkt_data of prog w/o subprogs bpf: Fix theoretical prog_array UAF in __uprobe_perf_func() bpf: fix potential error return selftests/bpf: validate that tail call invalidates packet pointers bpf: consider that tail calls invalidate packet pointers selftests/bpf: freplace tests for tracking of changes_packet_data bpf: check changes_pkt_data property for extension programs selftests/bpf: test for changing packet data from global functions bpf: track changes_pkt_data property for global functions bpf: refactor bpf_helper_changes_pkt_data to use helper number bpf: add find_containing_subprog() utility function bpf,perf: Fix invalid prog_array access in perf_event_detach_bpf_prog bpf: Fix UAF via mismatching bpf_prog/attachment RCU flavors ...
2 parents a0e3919 + c83508d commit 35f301d

23 files changed

+596
-164
lines changed

include/linux/bpf.h

Lines changed: 6 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1527,6 +1527,7 @@ struct bpf_prog_aux {
15271527
bool is_extended; /* true if extended by freplace program */
15281528
bool jits_use_priv_stack;
15291529
bool priv_stack_requested;
1530+
bool changes_pkt_data;
15301531
u64 prog_array_member_cnt; /* counts how many times as member of prog_array */
15311532
struct mutex ext_mutex; /* mutex for is_extended and prog_array_member_cnt */
15321533
struct bpf_arena *arena;
@@ -2193,26 +2194,25 @@ bpf_prog_run_array(const struct bpf_prog_array *array,
21932194
* rcu-protected dynamically sized maps.
21942195
*/
21952196
static __always_inline u32
2196-
bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu,
2197+
bpf_prog_run_array_uprobe(const struct bpf_prog_array *array,
21972198
const void *ctx, bpf_prog_run_fn run_prog)
21982199
{
21992200
const struct bpf_prog_array_item *item;
22002201
const struct bpf_prog *prog;
2201-
const struct bpf_prog_array *array;
22022202
struct bpf_run_ctx *old_run_ctx;
22032203
struct bpf_trace_run_ctx run_ctx;
22042204
u32 ret = 1;
22052205

22062206
might_fault();
2207+
RCU_LOCKDEP_WARN(!rcu_read_lock_trace_held(), "no rcu lock held");
2208+
2209+
if (unlikely(!array))
2210+
return ret;
22072211

2208-
rcu_read_lock_trace();
22092212
migrate_disable();
22102213

22112214
run_ctx.is_uprobe = true;
22122215

2213-
array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held());
2214-
if (unlikely(!array))
2215-
goto out;
22162216
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
22172217
item = &array->items[0];
22182218
while ((prog = READ_ONCE(item->prog))) {
@@ -2227,9 +2227,7 @@ bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu,
22272227
rcu_read_unlock();
22282228
}
22292229
bpf_reset_run_ctx(old_run_ctx);
2230-
out:
22312230
migrate_enable();
2232-
rcu_read_unlock_trace();
22332231
return ret;
22342232
}
22352233

@@ -3516,10 +3514,4 @@ static inline bool bpf_is_subprog(const struct bpf_prog *prog)
35163514
return prog->aux->func_idx != 0;
35173515
}
35183516

3519-
static inline bool bpf_prog_is_raw_tp(const struct bpf_prog *prog)
3520-
{
3521-
return prog->type == BPF_PROG_TYPE_TRACING &&
3522-
prog->expected_attach_type == BPF_TRACE_RAW_TP;
3523-
}
3524-
35253517
#endif /* _LINUX_BPF_H */

include/linux/bpf_verifier.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -659,6 +659,7 @@ struct bpf_subprog_info {
659659
bool args_cached: 1;
660660
/* true if bpf_fastcall stack region is used by functions that can't be inlined */
661661
bool keep_fastcall_stack: 1;
662+
bool changes_pkt_data: 1;
662663

663664
enum priv_stack_mode priv_stack_mode;
664665
u8 arg_cnt;

include/linux/filter.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1122,7 +1122,7 @@ bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena);
11221122
bool bpf_jit_supports_private_stack(void);
11231123
u64 bpf_arch_uaddress_limit(void);
11241124
void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
1125-
bool bpf_helper_changes_pkt_data(void *func);
1125+
bool bpf_helper_changes_pkt_data(enum bpf_func_id func_id);
11261126

11271127
static inline bool bpf_dump_raw_ok(const struct cred *cred)
11281128
{

kernel/bpf/Makefile

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,3 +53,9 @@ obj-$(CONFIG_BPF_SYSCALL) += relo_core.o
5353
obj-$(CONFIG_BPF_SYSCALL) += btf_iter.o
5454
obj-$(CONFIG_BPF_SYSCALL) += btf_relocate.o
5555
obj-$(CONFIG_BPF_SYSCALL) += kmem_cache_iter.o
56+
57+
CFLAGS_REMOVE_percpu_freelist.o = $(CC_FLAGS_FTRACE)
58+
CFLAGS_REMOVE_bpf_lru_list.o = $(CC_FLAGS_FTRACE)
59+
CFLAGS_REMOVE_queue_stack_maps.o = $(CC_FLAGS_FTRACE)
60+
CFLAGS_REMOVE_lpm_trie.o = $(CC_FLAGS_FTRACE)
61+
CFLAGS_REMOVE_ringbuf.o = $(CC_FLAGS_FTRACE)

kernel/bpf/btf.c

Lines changed: 145 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6439,6 +6439,101 @@ int btf_ctx_arg_offset(const struct btf *btf, const struct btf_type *func_proto,
64396439
return off;
64406440
}
64416441

6442+
struct bpf_raw_tp_null_args {
6443+
const char *func;
6444+
u64 mask;
6445+
};
6446+
6447+
static const struct bpf_raw_tp_null_args raw_tp_null_args[] = {
6448+
/* sched */
6449+
{ "sched_pi_setprio", 0x10 },
6450+
/* ... from sched_numa_pair_template event class */
6451+
{ "sched_stick_numa", 0x100 },
6452+
{ "sched_swap_numa", 0x100 },
6453+
/* afs */
6454+
{ "afs_make_fs_call", 0x10 },
6455+
{ "afs_make_fs_calli", 0x10 },
6456+
{ "afs_make_fs_call1", 0x10 },
6457+
{ "afs_make_fs_call2", 0x10 },
6458+
{ "afs_protocol_error", 0x1 },
6459+
{ "afs_flock_ev", 0x10 },
6460+
/* cachefiles */
6461+
{ "cachefiles_lookup", 0x1 | 0x200 },
6462+
{ "cachefiles_unlink", 0x1 },
6463+
{ "cachefiles_rename", 0x1 },
6464+
{ "cachefiles_prep_read", 0x1 },
6465+
{ "cachefiles_mark_active", 0x1 },
6466+
{ "cachefiles_mark_failed", 0x1 },
6467+
{ "cachefiles_mark_inactive", 0x1 },
6468+
{ "cachefiles_vfs_error", 0x1 },
6469+
{ "cachefiles_io_error", 0x1 },
6470+
{ "cachefiles_ondemand_open", 0x1 },
6471+
{ "cachefiles_ondemand_copen", 0x1 },
6472+
{ "cachefiles_ondemand_close", 0x1 },
6473+
{ "cachefiles_ondemand_read", 0x1 },
6474+
{ "cachefiles_ondemand_cread", 0x1 },
6475+
{ "cachefiles_ondemand_fd_write", 0x1 },
6476+
{ "cachefiles_ondemand_fd_release", 0x1 },
6477+
/* ext4, from ext4__mballoc event class */
6478+
{ "ext4_mballoc_discard", 0x10 },
6479+
{ "ext4_mballoc_free", 0x10 },
6480+
/* fib */
6481+
{ "fib_table_lookup", 0x100 },
6482+
/* filelock */
6483+
/* ... from filelock_lock event class */
6484+
{ "posix_lock_inode", 0x10 },
6485+
{ "fcntl_setlk", 0x10 },
6486+
{ "locks_remove_posix", 0x10 },
6487+
{ "flock_lock_inode", 0x10 },
6488+
/* ... from filelock_lease event class */
6489+
{ "break_lease_noblock", 0x10 },
6490+
{ "break_lease_block", 0x10 },
6491+
{ "break_lease_unblock", 0x10 },
6492+
{ "generic_delete_lease", 0x10 },
6493+
{ "time_out_leases", 0x10 },
6494+
/* host1x */
6495+
{ "host1x_cdma_push_gather", 0x10000 },
6496+
/* huge_memory */
6497+
{ "mm_khugepaged_scan_pmd", 0x10 },
6498+
{ "mm_collapse_huge_page_isolate", 0x1 },
6499+
{ "mm_khugepaged_scan_file", 0x10 },
6500+
{ "mm_khugepaged_collapse_file", 0x10 },
6501+
/* kmem */
6502+
{ "mm_page_alloc", 0x1 },
6503+
{ "mm_page_pcpu_drain", 0x1 },
6504+
/* .. from mm_page event class */
6505+
{ "mm_page_alloc_zone_locked", 0x1 },
6506+
/* netfs */
6507+
{ "netfs_failure", 0x10 },
6508+
/* power */
6509+
{ "device_pm_callback_start", 0x10 },
6510+
/* qdisc */
6511+
{ "qdisc_dequeue", 0x1000 },
6512+
/* rxrpc */
6513+
{ "rxrpc_recvdata", 0x1 },
6514+
{ "rxrpc_resend", 0x10 },
6515+
/* sunrpc */
6516+
{ "xs_stream_read_data", 0x1 },
6517+
/* ... from xprt_cong_event event class */
6518+
{ "xprt_reserve_cong", 0x10 },
6519+
{ "xprt_release_cong", 0x10 },
6520+
{ "xprt_get_cong", 0x10 },
6521+
{ "xprt_put_cong", 0x10 },
6522+
/* tcp */
6523+
{ "tcp_send_reset", 0x11 },
6524+
/* tegra_apb_dma */
6525+
{ "tegra_dma_tx_status", 0x100 },
6526+
/* timer_migration */
6527+
{ "tmigr_update_events", 0x1 },
6528+
/* writeback, from writeback_folio_template event class */
6529+
{ "writeback_dirty_folio", 0x10 },
6530+
{ "folio_wait_writeback", 0x10 },
6531+
/* rdma */
6532+
{ "mr_integ_alloc", 0x2000 },
6533+
/* bpf_testmod */
6534+
{ "bpf_testmod_test_read", 0x0 },
6535+
};
6536+
64426537
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
64436538
const struct bpf_prog *prog,
64446539
struct bpf_insn_access_aux *info)
@@ -6449,6 +6544,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
64496544
const char *tname = prog->aux->attach_func_name;
64506545
struct bpf_verifier_log *log = info->log;
64516546
const struct btf_param *args;
6547+
bool ptr_err_raw_tp = false;
64526548
const char *tag_value;
64536549
u32 nr_args, arg;
64546550
int i, ret;
@@ -6543,6 +6639,12 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
65436639
return false;
65446640
}
65456641

6642+
if (size != sizeof(u64)) {
6643+
bpf_log(log, "func '%s' size %d must be 8\n",
6644+
tname, size);
6645+
return false;
6646+
}
6647+
65466648
/* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */
65476649
for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
65486650
const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
@@ -6588,12 +6690,42 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
65886690
if (prog_args_trusted(prog))
65896691
info->reg_type |= PTR_TRUSTED;
65906692

6591-
/* Raw tracepoint arguments always get marked as maybe NULL */
6592-
if (bpf_prog_is_raw_tp(prog))
6593-
info->reg_type |= PTR_MAYBE_NULL;
6594-
else if (btf_param_match_suffix(btf, &args[arg], "__nullable"))
6693+
if (btf_param_match_suffix(btf, &args[arg], "__nullable"))
65956694
info->reg_type |= PTR_MAYBE_NULL;
65966695

6696+
if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
6697+
struct btf *btf = prog->aux->attach_btf;
6698+
const struct btf_type *t;
6699+
const char *tname;
6700+
6701+
/* BTF lookups cannot fail, return false on error */
6702+
t = btf_type_by_id(btf, prog->aux->attach_btf_id);
6703+
if (!t)
6704+
return false;
6705+
tname = btf_name_by_offset(btf, t->name_off);
6706+
if (!tname)
6707+
return false;
6708+
/* Checked by bpf_check_attach_target */
6709+
tname += sizeof("btf_trace_") - 1;
6710+
for (i = 0; i < ARRAY_SIZE(raw_tp_null_args); i++) {
6711+
/* Is this a func with potential NULL args? */
6712+
if (strcmp(tname, raw_tp_null_args[i].func))
6713+
continue;
6714+
if (raw_tp_null_args[i].mask & (0x1 << (arg * 4)))
6715+
info->reg_type |= PTR_MAYBE_NULL;
6716+
/* Is the current arg IS_ERR? */
6717+
if (raw_tp_null_args[i].mask & (0x2 << (arg * 4)))
6718+
ptr_err_raw_tp = true;
6719+
break;
6720+
}
6721+
/* If we don't know NULL-ness specification and the tracepoint
6722+
* is coming from a loadable module, be conservative and mark
6723+
* argument as PTR_MAYBE_NULL.
6724+
*/
6725+
if (i == ARRAY_SIZE(raw_tp_null_args) && btf_is_module(btf))
6726+
info->reg_type |= PTR_MAYBE_NULL;
6727+
}
6728+
65976729
if (tgt_prog) {
65986730
enum bpf_prog_type tgt_type;
65996731

@@ -6638,6 +6770,15 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
66386770
bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n",
66396771
tname, arg, info->btf_id, btf_type_str(t),
66406772
__btf_name_by_offset(btf, t->name_off));
6773+
6774+
/* Perform all checks on the validity of type for this argument, but if
6775+
* we know it can be IS_ERR at runtime, scrub pointer type and mark as
6776+
* scalar.
6777+
*/
6778+
if (ptr_err_raw_tp) {
6779+
bpf_log(log, "marking pointer arg%d as scalar as it may encode error", arg);
6780+
info->reg_type = SCALAR_VALUE;
6781+
}
66416782
return true;
66426783
}
66436784
EXPORT_SYMBOL_GPL(btf_ctx_access);

kernel/bpf/core.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -539,14 +539,18 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
539539

540540
int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
541541
{
542+
int err;
543+
542544
/* Branch offsets can't overflow when program is shrinking, no need
543545
* to call bpf_adj_branches(..., true) here
544546
*/
545547
memmove(prog->insnsi + off, prog->insnsi + off + cnt,
546548
sizeof(struct bpf_insn) * (prog->len - off - cnt));
547549
prog->len -= cnt;
548550

549-
return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
551+
err = bpf_adj_branches(prog, off, off + cnt, off, false);
552+
WARN_ON_ONCE(err);
553+
return err;
550554
}
551555

552556
static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
@@ -2936,7 +2940,7 @@ void __weak bpf_jit_compile(struct bpf_prog *prog)
29362940
{
29372941
}
29382942

2939-
bool __weak bpf_helper_changes_pkt_data(void *func)
2943+
bool __weak bpf_helper_changes_pkt_data(enum bpf_func_id func_id)
29402944
{
29412945
return false;
29422946
}

0 commit comments

Comments
 (0)