Skip to content

Commit 07e09c4

Browse files
davidhildenbrandakpm00
authored andcommitted
mm/huge_memory: work on folio->swap instead of page->private when splitting folio
Let's work on folio->swap instead. While at it, use folio_test_anon() and folio_test_swapcache() -- the original folio remains valid even after splitting (but is then an order-0 folio). We can probably convert a lot more to folios in that code, let's focus on folio->swap handling only for now. Link: https://lkml.kernel.org/r/20230821160849.531668-5-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Chris Li <chrisl@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dan Streetman <ddstreet@ieee.org> Cc: Hugh Dickins <hughd@google.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Peter Xu <peterx@redhat.com> Cc: Seth Jennings <sjenning@redhat.com> Cc: Vitaly Wool <vitaly.wool@konsulko.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 3d2c908 commit 07e09c4

File tree

1 file changed

+15
-14
lines changed

1 file changed

+15
-14
lines changed

mm/huge_memory.c

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2401,10 +2401,16 @@ static void lru_add_page_tail(struct page *head, struct page *tail,
24012401
}
24022402
}
24032403

2404-
static void __split_huge_page_tail(struct page *head, int tail,
2404+
static void __split_huge_page_tail(struct folio *folio, int tail,
24052405
struct lruvec *lruvec, struct list_head *list)
24062406
{
2407+
struct page *head = &folio->page;
24072408
struct page *page_tail = head + tail;
2409+
/*
2410+
* Careful: new_folio is not a "real" folio before we cleared PageTail.
2411+
* Don't pass it around before clear_compound_head().
2412+
*/
2413+
struct folio *new_folio = (struct folio *)page_tail;
24082414

24092415
VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
24102416

@@ -2453,8 +2459,8 @@ static void __split_huge_page_tail(struct page *head, int tail,
24532459
VM_WARN_ON_ONCE_PAGE(true, page_tail);
24542460
page_tail->private = 0;
24552461
}
2456-
if (PageSwapCache(head))
2457-
set_page_private(page_tail, (unsigned long)head->private + tail);
2462+
if (folio_test_swapcache(folio))
2463+
new_folio->swap.val = folio->swap.val + tail;
24582464

24592465
/* Page flags must be visible before we make the page non-compound. */
24602466
smp_wmb();
@@ -2500,11 +2506,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
25002506
/* complete memcg works before add pages to LRU */
25012507
split_page_memcg(head, nr);
25022508

2503-
if (PageAnon(head) && PageSwapCache(head)) {
2504-
swp_entry_t entry = { .val = page_private(head) };
2505-
2506-
offset = swp_offset(entry);
2507-
swap_cache = swap_address_space(entry);
2509+
if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
2510+
offset = swp_offset(folio->swap);
2511+
swap_cache = swap_address_space(folio->swap);
25082512
xa_lock(&swap_cache->i_pages);
25092513
}
25102514

@@ -2514,7 +2518,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
25142518
ClearPageHasHWPoisoned(head);
25152519

25162520
for (i = nr - 1; i >= 1; i--) {
2517-
__split_huge_page_tail(head, i, lruvec, list);
2521+
__split_huge_page_tail(folio, i, lruvec, list);
25182522
/* Some pages can be beyond EOF: drop them from page cache */
25192523
if (head[i].index >= end) {
25202524
struct folio *tail = page_folio(head + i);
@@ -2559,11 +2563,8 @@ static void __split_huge_page(struct page *page, struct list_head *list,
25592563

25602564
remap_page(folio, nr);
25612565

2562-
if (PageSwapCache(head)) {
2563-
swp_entry_t entry = { .val = page_private(head) };
2564-
2565-
split_swap_cluster(entry);
2566-
}
2566+
if (folio_test_swapcache(folio))
2567+
split_swap_cluster(folio->swap);
25672568

25682569
for (i = 0; i < nr; i++) {
25692570
struct page *subpage = head + i;

0 commit comments

Comments
 (0)