Skip to content

Commit 23fa022

Browse files
kirylakpm00
authored andcommitted
mm/page_alloc: ensure try_alloc_pages() plays well with unaccepted memory
try_alloc_pages() will not attempt to allocate memory if the system has *any* unaccepted memory. Memory is accepted as needed and can remain in the system indefinitely, causing the interface to always fail. Rather than immediately giving up, attempt to use already accepted memory on free lists. Pass 'alloc_flags' to cond_accept_memory() and do not accept new memory for ALLOC_TRYLOCK requests. Found via code inspection - only BPF uses this at present and the runtime effects are unclear. Link: https://lkml.kernel.org/r/20250506112509.905147-2-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Fixes: 97769a5 ("mm, bpf: Introduce try_alloc_pages() for opportunistic page allocation") Cc: Alexei Starovoitov <ast@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Brendan Jackman <jackmanb@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent d55582d commit 23fa022

File tree

1 file changed

+15
-13
lines changed

1 file changed

+15
-13
lines changed

mm/page_alloc.c

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -290,7 +290,8 @@ EXPORT_SYMBOL(nr_online_nodes);
290290
#endif
291291

292292
static bool page_contains_unaccepted(struct page *page, unsigned int order);
293-
static bool cond_accept_memory(struct zone *zone, unsigned int order);
293+
static bool cond_accept_memory(struct zone *zone, unsigned int order,
294+
int alloc_flags);
294295
static bool __free_unaccepted(struct page *page);
295296

296297
int page_group_by_mobility_disabled __read_mostly;
@@ -3611,7 +3612,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
36113612
}
36123613
}
36133614

3614-
cond_accept_memory(zone, order);
3615+
cond_accept_memory(zone, order, alloc_flags);
36153616

36163617
/*
36173618
* Detect whether the number of free pages is below high
@@ -3638,7 +3639,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
36383639
gfp_mask)) {
36393640
int ret;
36403641

3641-
if (cond_accept_memory(zone, order))
3642+
if (cond_accept_memory(zone, order, alloc_flags))
36423643
goto try_this_zone;
36433644

36443645
/*
@@ -3691,7 +3692,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
36913692

36923693
return page;
36933694
} else {
3694-
if (cond_accept_memory(zone, order))
3695+
if (cond_accept_memory(zone, order, alloc_flags))
36953696
goto try_this_zone;
36963697

36973698
/* Try again if zone has deferred pages */
@@ -4844,7 +4845,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
48444845
goto failed;
48454846
}
48464847

4847-
cond_accept_memory(zone, 0);
4848+
cond_accept_memory(zone, 0, alloc_flags);
48484849
retry_this_zone:
48494850
mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
48504851
if (zone_watermark_fast(zone, 0, mark,
@@ -4853,7 +4854,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
48534854
break;
48544855
}
48554856

4856-
if (cond_accept_memory(zone, 0))
4857+
if (cond_accept_memory(zone, 0, alloc_flags))
48574858
goto retry_this_zone;
48584859

48594860
/* Try again if zone has deferred pages */
@@ -7281,7 +7282,8 @@ static inline bool has_unaccepted_memory(void)
72817282
return static_branch_unlikely(&zones_with_unaccepted_pages);
72827283
}
72837284

7284-
static bool cond_accept_memory(struct zone *zone, unsigned int order)
7285+
static bool cond_accept_memory(struct zone *zone, unsigned int order,
7286+
int alloc_flags)
72857287
{
72867288
long to_accept, wmark;
72877289
bool ret = false;
@@ -7292,6 +7294,10 @@ static bool cond_accept_memory(struct zone *zone, unsigned int order)
72927294
if (list_empty(&zone->unaccepted_pages))
72937295
return false;
72947296

7297+
/* Bailout, since try_to_accept_memory_one() needs to take a lock */
7298+
if (alloc_flags & ALLOC_TRYLOCK)
7299+
return false;
7300+
72957301
wmark = promo_wmark_pages(zone);
72967302

72977303
/*
@@ -7348,7 +7354,8 @@ static bool page_contains_unaccepted(struct page *page, unsigned int order)
73487354
return false;
73497355
}
73507356

7351-
static bool cond_accept_memory(struct zone *zone, unsigned int order)
7357+
static bool cond_accept_memory(struct zone *zone, unsigned int order,
7358+
int alloc_flags)
73527359
{
73537360
return false;
73547361
}
@@ -7419,11 +7426,6 @@ struct page *try_alloc_pages_noprof(int nid, unsigned int order)
74197426
if (!pcp_allowed_order(order))
74207427
return NULL;
74217428

7422-
#ifdef CONFIG_UNACCEPTED_MEMORY
7423-
/* Bailout, since try_to_accept_memory_one() needs to take a lock */
7424-
if (has_unaccepted_memory())
7425-
return NULL;
7426-
#endif
74277429
/* Bailout, since _deferred_grow_zone() needs to take a lock */
74287430
if (deferred_pages_enabled())
74297431
return NULL;

0 commit comments

Comments
 (0)