Skip to content

Commit 29f671b

Browse files
sidkumar99Sasha Levin
authored andcommitted
mm/hugetlb: convert free_huge_page to folios
[ Upstream commit 0356c4b ] Use folios inside free_huge_page(), this is in preparation for converting hugetlb_cgroup_uncharge_page() to take in a folio. Link: https://lkml.kernel.org/r/20221101223059.460937-7-sidhartha.kumar@oracle.com Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Reviewed-by: Muchun Song <songmuchun@bytedance.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Bui Quang Minh <minhquangbui99@gmail.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Mina Almasry <almasrymina@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Stable-dep-of: b76b469 ("mm/hugetlb: fix missing hugetlb_lock for resv uncharge") Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent f699130 commit 29f671b

File tree

1 file changed

+14
-13
lines changed

1 file changed

+14
-13
lines changed

mm/hugetlb.c

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1918,21 +1918,22 @@ void free_huge_page(struct page *page)
19181918
* Can't pass hstate in here because it is called from the
19191919
* compound page destructor.
19201920
*/
1921-
struct hstate *h = page_hstate(page);
1922-
int nid = page_to_nid(page);
1923-
struct hugepage_subpool *spool = hugetlb_page_subpool(page);
1921+
struct folio *folio = page_folio(page);
1922+
struct hstate *h = folio_hstate(folio);
1923+
int nid = folio_nid(folio);
1924+
struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
19241925
bool restore_reserve;
19251926
unsigned long flags;
19261927

1927-
VM_BUG_ON_PAGE(page_count(page), page);
1928-
VM_BUG_ON_PAGE(page_mapcount(page), page);
1928+
VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1929+
VM_BUG_ON_FOLIO(folio_mapcount(folio), folio);
19291930

1930-
hugetlb_set_page_subpool(page, NULL);
1931-
if (PageAnon(page))
1932-
__ClearPageAnonExclusive(page);
1933-
page->mapping = NULL;
1934-
restore_reserve = HPageRestoreReserve(page);
1935-
ClearHPageRestoreReserve(page);
1931+
hugetlb_set_folio_subpool(folio, NULL);
1932+
if (folio_test_anon(folio))
1933+
__ClearPageAnonExclusive(&folio->page);
1934+
folio->mapping = NULL;
1935+
restore_reserve = folio_test_hugetlb_restore_reserve(folio);
1936+
folio_clear_hugetlb_restore_reserve(folio);
19361937

19371938
/*
19381939
* If HPageRestoreReserve was set on page, page allocation consumed a
@@ -1954,15 +1955,15 @@ void free_huge_page(struct page *page)
19541955
}
19551956

19561957
spin_lock_irqsave(&hugetlb_lock, flags);
1957-
ClearHPageMigratable(page);
1958+
folio_clear_hugetlb_migratable(folio);
19581959
hugetlb_cgroup_uncharge_page(hstate_index(h),
19591960
pages_per_huge_page(h), page);
19601961
hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
19611962
pages_per_huge_page(h), page);
19621963
if (restore_reserve)
19631964
h->resv_huge_pages++;
19641965

1965-
if (HPageTemporary(page)) {
1966+
if (folio_test_hugetlb_temporary(folio)) {
19661967
remove_hugetlb_page(h, page, false);
19671968
spin_unlock_irqrestore(&hugetlb_lock, flags);
19681969
update_and_free_page(h, page, true);

0 commit comments

Comments
 (0)