Skip to content

Commit c9eb810

Browse files
author
Alexei Starovoitov
committed
bpf: Use try_alloc_pages() to allocate pages for bpf needs.
Use try_alloc_pages() and free_pages_nolock() for BPF needs when context doesn't allow using normal alloc_pages. This is a prerequisite for further work. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/r/20250222024427.30294-7-alexei.starovoitov@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
1 parent e8d78db commit c9eb810

File tree

3 files changed

+23
-7
lines changed

3 files changed

+23
-7
lines changed

include/linux/bpf.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2348,7 +2348,7 @@ int generic_map_delete_batch(struct bpf_map *map,
23482348
struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
23492349
struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
23502350

2351-
int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
2351+
int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
23522352
unsigned long nr_pages, struct page **page_array);
23532353
#ifdef CONFIG_MEMCG
23542354
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,

kernel/bpf/arena.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -287,7 +287,7 @@ static vm_fault_t arena_vm_fault(struct vm_fault *vmf)
287287
return VM_FAULT_SIGSEGV;
288288

289289
/* Account into memcg of the process that created bpf_arena */
290-
ret = bpf_map_alloc_pages(map, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, 1, &page);
290+
ret = bpf_map_alloc_pages(map, NUMA_NO_NODE, 1, &page);
291291
if (ret) {
292292
range_tree_set(&arena->rt, vmf->pgoff, 1);
293293
return VM_FAULT_SIGSEGV;
@@ -465,8 +465,7 @@ static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt
465465
if (ret)
466466
goto out_free_pages;
467467

468-
ret = bpf_map_alloc_pages(&arena->map, GFP_KERNEL | __GFP_ZERO,
469-
node_id, page_cnt, pages);
468+
ret = bpf_map_alloc_pages(&arena->map, node_id, page_cnt, pages);
470469
if (ret)
471470
goto out;
472471

kernel/bpf/syscall.c

Lines changed: 20 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -569,7 +569,24 @@ static void bpf_map_release_memcg(struct bpf_map *map)
569569
}
570570
#endif
571571

572-
int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
572+
static bool can_alloc_pages(void)
573+
{
574+
return preempt_count() == 0 && !irqs_disabled() &&
575+
!IS_ENABLED(CONFIG_PREEMPT_RT);
576+
}
577+
578+
static struct page *__bpf_alloc_page(int nid)
579+
{
580+
if (!can_alloc_pages())
581+
return try_alloc_pages(nid, 0);
582+
583+
return alloc_pages_node(nid,
584+
GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT
585+
| __GFP_NOWARN,
586+
0);
587+
}
588+
589+
int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
573590
unsigned long nr_pages, struct page **pages)
574591
{
575592
unsigned long i, j;
@@ -582,14 +599,14 @@ int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
582599
old_memcg = set_active_memcg(memcg);
583600
#endif
584601
for (i = 0; i < nr_pages; i++) {
585-
pg = alloc_pages_node(nid, gfp | __GFP_ACCOUNT, 0);
602+
pg = __bpf_alloc_page(nid);
586603

587604
if (pg) {
588605
pages[i] = pg;
589606
continue;
590607
}
591608
for (j = 0; j < i; j++)
592-
__free_page(pages[j]);
609+
free_pages_nolock(pages[j], 0);
593610
ret = -ENOMEM;
594611
break;
595612
}

0 commit comments

Comments
 (0)