Skip to content

Commit 25c17c4

Browse files
Yang Shictmarinas
authored andcommitted
hugetlb: arm64: add mte support
Enable MTE support for hugetlb. The MTE page flags will be set on the folio only. When copying hugetlb folio (for example, CoW), the tags for all subpages will be copied when copying the first subpage. When freeing hugetlb folio, the MTE flags will be cleared. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Yang Shi <yang@os.amperecomputing.com> Link: https://lore.kernel.org/r/20241001225220.271178-1-yang@os.amperecomputing.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
1 parent 9852d85 commit 25c17c4

File tree

9 files changed

+159
-8
lines changed

9 files changed

+159
-8
lines changed

arch/arm64/include/asm/hugetlb.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
#define __ASM_HUGETLB_H
1212

1313
#include <asm/cacheflush.h>
14+
#include <asm/mte.h>
1415
#include <asm/page.h>
1516

1617
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
@@ -21,6 +22,13 @@ extern bool arch_hugetlb_migration_supported(struct hstate *h);
2122
static inline void arch_clear_hugetlb_flags(struct folio *folio)
2223
{
2324
clear_bit(PG_dcache_clean, &folio->flags);
25+
26+
#ifdef CONFIG_ARM64_MTE
27+
if (system_supports_mte()) {
28+
clear_bit(PG_mte_tagged, &folio->flags);
29+
clear_bit(PG_mte_lock, &folio->flags);
30+
}
31+
#endif
2432
}
2533
#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
2634

arch/arm64/include/asm/mman.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,8 @@ static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
3838
* backed by tags-capable memory. The vm_flags may be overridden by a
3939
* filesystem supporting MTE (RAM-based).
4040
*/
41-
if (system_supports_mte() && (flags & MAP_ANONYMOUS))
41+
if (system_supports_mte() &&
42+
(flags & (MAP_ANONYMOUS | MAP_HUGETLB)))
4243
return VM_MTE_ALLOWED;
4344

4445
return 0;

arch/arm64/include/asm/mte.h

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,8 @@ void mte_free_tag_storage(char *storage);
4141

4242
static inline void set_page_mte_tagged(struct page *page)
4343
{
44+
VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
45+
4446
/*
4547
* Ensure that the tags written prior to this function are visible
4648
* before the page flags update.
@@ -53,6 +55,8 @@ static inline bool page_mte_tagged(struct page *page)
5355
{
5456
bool ret = test_bit(PG_mte_tagged, &page->flags);
5557

58+
VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
59+
5660
/*
5761
* If the page is tagged, ensure ordering with a likely subsequent
5862
* read of the tags.
@@ -76,6 +80,8 @@ static inline bool page_mte_tagged(struct page *page)
7680
*/
7781
static inline bool try_page_mte_tagging(struct page *page)
7882
{
83+
VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
84+
7985
if (!test_and_set_bit(PG_mte_lock, &page->flags))
8086
return true;
8187

@@ -157,6 +163,67 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child,
157163

158164
#endif /* CONFIG_ARM64_MTE */
159165

166+
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_ARM64_MTE)
167+
static inline void folio_set_hugetlb_mte_tagged(struct folio *folio)
168+
{
169+
VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
170+
171+
/*
172+
* Ensure that the tags written prior to this function are visible
173+
* before the folio flags update.
174+
*/
175+
smp_wmb();
176+
set_bit(PG_mte_tagged, &folio->flags);
177+
178+
}
179+
180+
static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio)
181+
{
182+
bool ret = test_bit(PG_mte_tagged, &folio->flags);
183+
184+
VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
185+
186+
/*
187+
* If the folio is tagged, ensure ordering with a likely subsequent
188+
* read of the tags.
189+
*/
190+
if (ret)
191+
smp_rmb();
192+
return ret;
193+
}
194+
195+
static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
196+
{
197+
VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
198+
199+
if (!test_and_set_bit(PG_mte_lock, &folio->flags))
200+
return true;
201+
202+
/*
203+
* The tags are either being initialised or may have been initialised
204+
* already. Check if the PG_mte_tagged flag has been set or wait
205+
* otherwise.
206+
*/
207+
smp_cond_load_acquire(&folio->flags, VAL & (1UL << PG_mte_tagged));
208+
209+
return false;
210+
}
211+
#else
212+
static inline void folio_set_hugetlb_mte_tagged(struct folio *folio)
213+
{
214+
}
215+
216+
static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio)
217+
{
218+
return false;
219+
}
220+
221+
static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
222+
{
223+
return false;
224+
}
225+
#endif
226+
160227
static inline void mte_disable_tco_entry(struct task_struct *task)
161228
{
162229
if (!system_supports_mte())

arch/arm64/kernel/hibernate.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -266,9 +266,15 @@ static int swsusp_mte_save_tags(void)
266266
max_zone_pfn = zone_end_pfn(zone);
267267
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
268268
struct page *page = pfn_to_online_page(pfn);
269+
struct folio *folio;
269270

270271
if (!page)
271272
continue;
273+
folio = page_folio(page);
274+
275+
if (folio_test_hugetlb(folio) &&
276+
!folio_test_hugetlb_mte_tagged(folio))
277+
continue;
272278

273279
if (!page_mte_tagged(page))
274280
continue;

arch/arm64/kernel/mte.c

Lines changed: 25 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,24 @@ EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode);
3838
void mte_sync_tags(pte_t pte, unsigned int nr_pages)
3939
{
4040
struct page *page = pte_page(pte);
41-
unsigned int i;
41+
struct folio *folio = page_folio(page);
42+
unsigned long i;
43+
44+
if (folio_test_hugetlb(folio)) {
45+
unsigned long nr = folio_nr_pages(folio);
46+
47+
/* Hugetlb MTE flags are set for head page only */
48+
if (folio_try_hugetlb_mte_tagging(folio)) {
49+
for (i = 0; i < nr; i++, page++)
50+
mte_clear_page_tags(page_address(page));
51+
folio_set_hugetlb_mte_tagged(folio);
52+
}
53+
54+
/* ensure the tags are visible before the PTE is set */
55+
smp_wmb();
56+
57+
return;
58+
}
4259

4360
/* if PG_mte_tagged is set, tags have already been initialised */
4461
for (i = 0; i < nr_pages; i++, page++) {
@@ -410,6 +427,7 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
410427
void *maddr;
411428
struct page *page = get_user_page_vma_remote(mm, addr,
412429
gup_flags, &vma);
430+
struct folio *folio;
413431

414432
if (IS_ERR(page)) {
415433
err = PTR_ERR(page);
@@ -428,7 +446,12 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
428446
put_page(page);
429447
break;
430448
}
431-
WARN_ON_ONCE(!page_mte_tagged(page));
449+
450+
folio = page_folio(page);
451+
if (folio_test_hugetlb(folio))
452+
WARN_ON_ONCE(!folio_test_hugetlb_mte_tagged(folio));
453+
else
454+
WARN_ON_ONCE(!page_mte_tagged(page));
432455

433456
/* limit access to the end of the page */
434457
offset = offset_in_page(addr);

arch/arm64/kvm/guest.c

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1055,6 +1055,7 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
10551055
void *maddr;
10561056
unsigned long num_tags;
10571057
struct page *page;
1058+
struct folio *folio;
10581059

10591060
if (is_error_noslot_pfn(pfn)) {
10601061
ret = -EFAULT;
@@ -1068,10 +1069,13 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
10681069
ret = -EFAULT;
10691070
goto out;
10701071
}
1072+
folio = page_folio(page);
10711073
maddr = page_address(page);
10721074

10731075
if (!write) {
1074-
if (page_mte_tagged(page))
1076+
if ((folio_test_hugetlb(folio) &&
1077+
folio_test_hugetlb_mte_tagged(folio)) ||
1078+
page_mte_tagged(page))
10751079
num_tags = mte_copy_tags_to_user(tags, maddr,
10761080
MTE_GRANULES_PER_PAGE);
10771081
else
@@ -1085,14 +1089,20 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
10851089
* __set_ptes() in the VMM but still overriding the
10861090
* tags, hence ignoring the return value.
10871091
*/
1088-
try_page_mte_tagging(page);
1092+
if (folio_test_hugetlb(folio))
1093+
folio_try_hugetlb_mte_tagging(folio);
1094+
else
1095+
try_page_mte_tagging(page);
10891096
num_tags = mte_copy_tags_from_user(maddr, tags,
10901097
MTE_GRANULES_PER_PAGE);
10911098

10921099
/* uaccess failed, don't leave stale tags */
10931100
if (num_tags != MTE_GRANULES_PER_PAGE)
10941101
mte_clear_page_tags(maddr);
1095-
set_page_mte_tagged(page);
1102+
if (folio_test_hugetlb(folio))
1103+
folio_set_hugetlb_mte_tagged(folio);
1104+
else
1105+
set_page_mte_tagged(page);
10961106

10971107
kvm_release_pfn_dirty(pfn);
10981108
}

arch/arm64/kvm/mmu.c

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1401,10 +1401,21 @@ static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
14011401
{
14021402
unsigned long i, nr_pages = size >> PAGE_SHIFT;
14031403
struct page *page = pfn_to_page(pfn);
1404+
struct folio *folio = page_folio(page);
14041405

14051406
if (!kvm_has_mte(kvm))
14061407
return;
14071408

1409+
if (folio_test_hugetlb(folio)) {
1410+
/* Hugetlb has MTE flags set on head page only */
1411+
if (folio_try_hugetlb_mte_tagging(folio)) {
1412+
for (i = 0; i < nr_pages; i++, page++)
1413+
mte_clear_page_tags(page_address(page));
1414+
folio_set_hugetlb_mte_tagged(folio);
1415+
}
1416+
return;
1417+
}
1418+
14081419
for (i = 0; i < nr_pages; i++, page++) {
14091420
if (try_page_mte_tagging(page)) {
14101421
mte_clear_page_tags(page_address(page));

arch/arm64/mm/copypage.c

Lines changed: 26 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,15 +18,40 @@ void copy_highpage(struct page *to, struct page *from)
1818
{
1919
void *kto = page_address(to);
2020
void *kfrom = page_address(from);
21+
struct folio *src = page_folio(from);
22+
struct folio *dst = page_folio(to);
23+
unsigned int i, nr_pages;
2124

2225
copy_page(kto, kfrom);
2326

2427
if (kasan_hw_tags_enabled())
2528
page_kasan_tag_reset(to);
2629

27-
if (system_supports_mte() && page_mte_tagged(from)) {
30+
if (!system_supports_mte())
31+
return;
32+
33+
if (folio_test_hugetlb(src) &&
34+
folio_test_hugetlb_mte_tagged(src)) {
35+
if (!folio_try_hugetlb_mte_tagging(dst))
36+
return;
37+
38+
/*
39+
* Populate tags for all subpages.
40+
*
41+
* Don't assume the first page is head page since
42+
* huge page copy may start from any subpage.
43+
*/
44+
nr_pages = folio_nr_pages(src);
45+
for (i = 0; i < nr_pages; i++) {
46+
kfrom = page_address(folio_page(src, i));
47+
kto = page_address(folio_page(dst, i));
48+
mte_copy_page_tags(kto, kfrom);
49+
}
50+
folio_set_hugetlb_mte_tagged(dst);
51+
} else if (page_mte_tagged(from)) {
2852
/* It's a new page, shouldn't have been tagged yet */
2953
WARN_ON_ONCE(!try_page_mte_tagging(to));
54+
3055
mte_copy_page_tags(kto, kfrom);
3156
set_page_mte_tagged(to);
3257
}

fs/hugetlbfs/inode.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
110110
* way when do_mmap unwinds (may be important on powerpc
111111
* and ia64).
112112
*/
113-
vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND);
113+
vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND | VM_MTE_ALLOWED);
114114
vma->vm_ops = &hugetlb_vm_ops;
115115

116116
ret = seal_check_write(info->seals, vma);

0 commit comments

Comments
 (0)