Skip to content

Commit 807174a

Browse files
kirylakpm00
authored andcommitted
mm: fix endless reclaim on machines with unaccepted memory
Unaccepted memory is considered unusable free memory, which is not counted as free on the zone watermark check. This causes get_page_from_freelist() to accept more memory to hit the high watermark, but it creates problems in the reclaim path. The reclaim path encounters a failed zone watermark check and attempts to reclaim memory. This is usually successful, but if there is little or no reclaimable memory, it can result in endless reclaim with little to no progress. This can occur early in the boot process, just after start of the init process when the only reclaimable memory is the page cache of the init executable and its libraries. Make unaccepted memory free from watermark check point of view. This way unaccepted memory will never be the trigger of memory reclaim. Accept more memory in the get_page_from_freelist() if needed. Link: https://lkml.kernel.org/r/20240809114854.3745464-2-kirill.shutemov@linux.intel.com Fixes: dcdfdd4 ("mm: Add support for unaccepted memory") Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reported-by: Jianxiong Gao <jxgao@google.com> Acked-by: David Hildenbrand <david@redhat.com> Tested-by: Jianxiong Gao <jxgao@google.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Mike Rapoport (Microsoft) <rppt@kernel.org> Cc: Tom Lendacky <thomas.lendacky@amd.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: <stable@vger.kernel.org> [6.5+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent af3b7d0 commit 807174a

File tree

1 file changed

+20
-22
lines changed

1 file changed

+20
-22
lines changed

mm/page_alloc.c

Lines changed: 20 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -287,7 +287,7 @@ EXPORT_SYMBOL(nr_online_nodes);
287287

288288
static bool page_contains_unaccepted(struct page *page, unsigned int order);
289289
static void accept_page(struct page *page, unsigned int order);
290-
static bool try_to_accept_memory(struct zone *zone, unsigned int order);
290+
static bool cond_accept_memory(struct zone *zone, unsigned int order);
291291
static inline bool has_unaccepted_memory(void);
292292
static bool __free_unaccepted(struct page *page);
293293

@@ -3072,9 +3072,6 @@ static inline long __zone_watermark_unusable_free(struct zone *z,
30723072
if (!(alloc_flags & ALLOC_CMA))
30733073
unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
30743074
#endif
3075-
#ifdef CONFIG_UNACCEPTED_MEMORY
3076-
unusable_free += zone_page_state(z, NR_UNACCEPTED);
3077-
#endif
30783075

30793076
return unusable_free;
30803077
}
@@ -3368,6 +3365,8 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
33683365
}
33693366
}
33703367

3368+
cond_accept_memory(zone, order);
3369+
33713370
/*
33723371
* Detect whether the number of free pages is below high
33733372
* watermark. If so, we will decrease pcp->high and free
@@ -3393,10 +3392,8 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
33933392
gfp_mask)) {
33943393
int ret;
33953394

3396-
if (has_unaccepted_memory()) {
3397-
if (try_to_accept_memory(zone, order))
3398-
goto try_this_zone;
3399-
}
3395+
if (cond_accept_memory(zone, order))
3396+
goto try_this_zone;
34003397

34013398
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
34023399
/*
@@ -3450,10 +3447,8 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
34503447

34513448
return page;
34523449
} else {
3453-
if (has_unaccepted_memory()) {
3454-
if (try_to_accept_memory(zone, order))
3455-
goto try_this_zone;
3456-
}
3450+
if (cond_accept_memory(zone, order))
3451+
goto try_this_zone;
34573452

34583453
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
34593454
/* Try again if zone has deferred pages */
@@ -6950,9 +6945,6 @@ static bool try_to_accept_memory_one(struct zone *zone)
69506945
struct page *page;
69516946
bool last;
69526947

6953-
if (list_empty(&zone->unaccepted_pages))
6954-
return false;
6955-
69566948
spin_lock_irqsave(&zone->lock, flags);
69576949
page = list_first_entry_or_null(&zone->unaccepted_pages,
69586950
struct page, lru);
@@ -6978,23 +6970,29 @@ static bool try_to_accept_memory_one(struct zone *zone)
69786970
return true;
69796971
}
69806972

6981-
static bool try_to_accept_memory(struct zone *zone, unsigned int order)
6973+
static bool cond_accept_memory(struct zone *zone, unsigned int order)
69826974
{
69836975
long to_accept;
6984-
int ret = false;
6976+
bool ret = false;
6977+
6978+
if (!has_unaccepted_memory())
6979+
return false;
6980+
6981+
if (list_empty(&zone->unaccepted_pages))
6982+
return false;
69856983

69866984
/* How much to accept to get to high watermark? */
69876985
to_accept = high_wmark_pages(zone) -
69886986
(zone_page_state(zone, NR_FREE_PAGES) -
6989-
__zone_watermark_unusable_free(zone, order, 0));
6987+
__zone_watermark_unusable_free(zone, order, 0) -
6988+
zone_page_state(zone, NR_UNACCEPTED));
69906989

6991-
/* Accept at least one page */
6992-
do {
6990+
while (to_accept > 0) {
69936991
if (!try_to_accept_memory_one(zone))
69946992
break;
69956993
ret = true;
69966994
to_accept -= MAX_ORDER_NR_PAGES;
6997-
} while (to_accept > 0);
6995+
}
69986996

69996997
return ret;
70006998
}
@@ -7037,7 +7035,7 @@ static void accept_page(struct page *page, unsigned int order)
70377035
{
70387036
}
70397037

7040-
static bool try_to_accept_memory(struct zone *zone, unsigned int order)
7038+
static bool cond_accept_memory(struct zone *zone, unsigned int order)
70417039
{
70427040
return false;
70437041
}

0 commit comments

Comments
 (0)