Skip to content

Commit 76ea955

Browse files
T.J. MercierAlexei Starovoitov
authored andcommitted
bpf: Add dmabuf iterator
The dmabuf iterator traverses the list of all DMA buffers. DMA buffers are refcounted through their associated struct file. A reference is taken on each buffer as the list is iterated to ensure each buffer persists for the duration of the bpf program execution without holding the list mutex. Signed-off-by: T.J. Mercier <tjmercier@google.com> Reviewed-by: Christian König <christian.koenig@amd.com> Acked-by: Song Liu <song@kernel.org> Link: https://lore.kernel.org/r/20250522230429.941193-3-tjmercier@google.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
1 parent 89f9dba commit 76ea955

File tree

4 files changed

+175
-0
lines changed

4 files changed

+175
-0
lines changed

drivers/dma-buf/dma-buf.c

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,9 @@
1919
#include <linux/anon_inodes.h>
2020
#include <linux/export.h>
2121
#include <linux/debugfs.h>
22+
#include <linux/list.h>
2223
#include <linux/module.h>
24+
#include <linux/mutex.h>
2325
#include <linux/seq_file.h>
2426
#include <linux/sync_file.h>
2527
#include <linux/poll.h>
@@ -55,6 +57,72 @@ static void __dma_buf_list_del(struct dma_buf *dmabuf)
5557
mutex_unlock(&dmabuf_list_mutex);
5658
}
5759

60+
/**
61+
* dma_buf_iter_begin - begin iteration through global list of all DMA buffers
62+
*
63+
* Returns the first buffer in the global list of DMA-bufs that's not in the
64+
* process of being destroyed. Increments that buffer's reference count to
65+
* prevent buffer destruction. Callers must release the reference, either by
66+
* continuing iteration with dma_buf_iter_next(), or with dma_buf_put().
67+
*
68+
* Return:
69+
* * First buffer from global list, with refcount elevated
70+
* * NULL if no active buffers are present
71+
*/
72+
struct dma_buf *dma_buf_iter_begin(void)
73+
{
74+
struct dma_buf *ret = NULL, *dmabuf;
75+
76+
/*
77+
* The list mutex does not protect a dmabuf's refcount, so it can be
78+
* zeroed while we are iterating. We cannot call get_dma_buf() since the
79+
* caller may not already own a reference to the buffer.
80+
*/
81+
mutex_lock(&dmabuf_list_mutex);
82+
list_for_each_entry(dmabuf, &dmabuf_list, list_node) {
83+
if (file_ref_get(&dmabuf->file->f_ref)) {
84+
ret = dmabuf;
85+
break;
86+
}
87+
}
88+
mutex_unlock(&dmabuf_list_mutex);
89+
return ret;
90+
}
91+
92+
/**
93+
* dma_buf_iter_next - continue iteration through global list of all DMA buffers
94+
* @dmabuf: [in] pointer to dma_buf
95+
*
96+
* Decrements the reference count on the provided buffer. Returns the next
97+
* buffer from the remainder of the global list of DMA-bufs with its reference
98+
* count incremented. Callers must release the reference, either by continuing
99+
* iteration with dma_buf_iter_next(), or with dma_buf_put().
100+
*
101+
* Return:
102+
* * Next buffer from global list, with refcount elevated
103+
* * NULL if no additional active buffers are present
104+
*/
105+
struct dma_buf *dma_buf_iter_next(struct dma_buf *dmabuf)
106+
{
107+
struct dma_buf *ret = NULL;
108+
109+
/*
110+
* The list mutex does not protect a dmabuf's refcount, so it can be
111+
* zeroed while we are iterating. We cannot call get_dma_buf() since the
112+
* caller may not already own a reference to the buffer.
113+
*/
114+
mutex_lock(&dmabuf_list_mutex);
115+
dma_buf_put(dmabuf);
116+
list_for_each_entry_continue(dmabuf, &dmabuf_list, list_node) {
117+
if (file_ref_get(&dmabuf->file->f_ref)) {
118+
ret = dmabuf;
119+
break;
120+
}
121+
}
122+
mutex_unlock(&dmabuf_list_mutex);
123+
return ret;
124+
}
125+
58126
static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
59127
{
60128
struct dma_buf *dmabuf;

include/linux/dma-buf.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -634,4 +634,6 @@ int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map);
634634
void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map);
635635
int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
636636
void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
637+
struct dma_buf *dma_buf_iter_begin(void);
638+
struct dma_buf *dma_buf_iter_next(struct dma_buf *dmbuf);
637639
#endif /* __DMA_BUF_H__ */

kernel/bpf/Makefile

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,9 @@ obj-$(CONFIG_BPF_SYSCALL) += relo_core.o
5353
obj-$(CONFIG_BPF_SYSCALL) += btf_iter.o
5454
obj-$(CONFIG_BPF_SYSCALL) += btf_relocate.o
5555
obj-$(CONFIG_BPF_SYSCALL) += kmem_cache_iter.o
56+
ifeq ($(CONFIG_DMA_SHARED_BUFFER),y)
57+
obj-$(CONFIG_BPF_SYSCALL) += dmabuf_iter.o
58+
endif
5659

5760
CFLAGS_REMOVE_percpu_freelist.o = $(CC_FLAGS_FTRACE)
5861
CFLAGS_REMOVE_bpf_lru_list.o = $(CC_FLAGS_FTRACE)

kernel/bpf/dmabuf_iter.c

Lines changed: 102 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,102 @@
1+
// SPDX-License-Identifier: GPL-2.0-only
2+
/* Copyright (c) 2025 Google LLC */
3+
#include <linux/bpf.h>
4+
#include <linux/btf_ids.h>
5+
#include <linux/dma-buf.h>
6+
#include <linux/kernel.h>
7+
#include <linux/seq_file.h>
8+
9+
static void *dmabuf_iter_seq_start(struct seq_file *seq, loff_t *pos)
10+
{
11+
if (*pos)
12+
return NULL;
13+
14+
return dma_buf_iter_begin();
15+
}
16+
17+
static void *dmabuf_iter_seq_next(struct seq_file *seq, void *v, loff_t *pos)
18+
{
19+
struct dma_buf *dmabuf = v;
20+
21+
++*pos;
22+
23+
return dma_buf_iter_next(dmabuf);
24+
}
25+
26+
struct bpf_iter__dmabuf {
27+
__bpf_md_ptr(struct bpf_iter_meta *, meta);
28+
__bpf_md_ptr(struct dma_buf *, dmabuf);
29+
};
30+
31+
static int __dmabuf_seq_show(struct seq_file *seq, void *v, bool in_stop)
32+
{
33+
struct bpf_iter_meta meta = {
34+
.seq = seq,
35+
};
36+
struct bpf_iter__dmabuf ctx = {
37+
.meta = &meta,
38+
.dmabuf = v,
39+
};
40+
struct bpf_prog *prog = bpf_iter_get_info(&meta, in_stop);
41+
42+
if (prog)
43+
return bpf_iter_run_prog(prog, &ctx);
44+
45+
return 0;
46+
}
47+
48+
static int dmabuf_iter_seq_show(struct seq_file *seq, void *v)
49+
{
50+
return __dmabuf_seq_show(seq, v, false);
51+
}
52+
53+
static void dmabuf_iter_seq_stop(struct seq_file *seq, void *v)
54+
{
55+
struct dma_buf *dmabuf = v;
56+
57+
if (dmabuf)
58+
dma_buf_put(dmabuf);
59+
}
60+
61+
static const struct seq_operations dmabuf_iter_seq_ops = {
62+
.start = dmabuf_iter_seq_start,
63+
.next = dmabuf_iter_seq_next,
64+
.stop = dmabuf_iter_seq_stop,
65+
.show = dmabuf_iter_seq_show,
66+
};
67+
68+
static void bpf_iter_dmabuf_show_fdinfo(const struct bpf_iter_aux_info *aux,
69+
struct seq_file *seq)
70+
{
71+
seq_puts(seq, "dmabuf iter\n");
72+
}
73+
74+
static const struct bpf_iter_seq_info dmabuf_iter_seq_info = {
75+
.seq_ops = &dmabuf_iter_seq_ops,
76+
.init_seq_private = NULL,
77+
.fini_seq_private = NULL,
78+
.seq_priv_size = 0,
79+
};
80+
81+
static struct bpf_iter_reg bpf_dmabuf_reg_info = {
82+
.target = "dmabuf",
83+
.feature = BPF_ITER_RESCHED,
84+
.show_fdinfo = bpf_iter_dmabuf_show_fdinfo,
85+
.ctx_arg_info_size = 1,
86+
.ctx_arg_info = {
87+
{ offsetof(struct bpf_iter__dmabuf, dmabuf),
88+
PTR_TO_BTF_ID_OR_NULL },
89+
},
90+
.seq_info = &dmabuf_iter_seq_info,
91+
};
92+
93+
DEFINE_BPF_ITER_FUNC(dmabuf, struct bpf_iter_meta *meta, struct dma_buf *dmabuf)
94+
BTF_ID_LIST_SINGLE(bpf_dmabuf_btf_id, struct, dma_buf)
95+
96+
static int __init dmabuf_iter_init(void)
97+
{
98+
bpf_dmabuf_reg_info.ctx_arg_info[0].btf_id = bpf_dmabuf_btf_id[0];
99+
return bpf_iter_reg_target(&bpf_dmabuf_reg_info);
100+
}
101+
102+
late_initcall(dmabuf_iter_init);

0 commit comments

Comments
 (0)