Skip to content

Commit 168b849

Browse files
Merge patch series "svnapot fixes"
Alexandre Ghiti <alexghiti@rivosinc.com> says: While merging riscv napot and arm64 contpte support, I noticed we did not abide by the specification which states that we should clear a napot mapping before setting a new one, called "break before make" in arm64 (patch 1). And also that we did not add the new hugetlb page size added by napot in hugetlb_mask_last_page() (patch 2). * b4-shazam-merge: riscv: Fix hugetlb_mask_last_page() when NAPOT is enabled riscv: Fix set_huge_pte_at() for NAPOT mapping Link: https://lore.kernel.org/r/20240117195741.1926459-1-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2 parents d9807d6 + a179a4b commit 168b849

File tree

1 file changed

+60
-2
lines changed

1 file changed

+60
-2
lines changed

arch/riscv/mm/hugetlbpage.c

Lines changed: 60 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,26 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
125125
return pte;
126126
}
127127

128+
unsigned long hugetlb_mask_last_page(struct hstate *h)
129+
{
130+
unsigned long hp_size = huge_page_size(h);
131+
132+
switch (hp_size) {
133+
#ifndef __PAGETABLE_PMD_FOLDED
134+
case PUD_SIZE:
135+
return P4D_SIZE - PUD_SIZE;
136+
#endif
137+
case PMD_SIZE:
138+
return PUD_SIZE - PMD_SIZE;
139+
case napot_cont_size(NAPOT_CONT64KB_ORDER):
140+
return PMD_SIZE - napot_cont_size(NAPOT_CONT64KB_ORDER);
141+
default:
142+
break;
143+
}
144+
145+
return 0UL;
146+
}
147+
128148
static pte_t get_clear_contig(struct mm_struct *mm,
129149
unsigned long addr,
130150
pte_t *ptep,
@@ -177,13 +197,36 @@ pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
177197
return entry;
178198
}
179199

200+
static void clear_flush(struct mm_struct *mm,
201+
unsigned long addr,
202+
pte_t *ptep,
203+
unsigned long pgsize,
204+
unsigned long ncontig)
205+
{
206+
struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
207+
unsigned long i, saddr = addr;
208+
209+
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
210+
ptep_get_and_clear(mm, addr, ptep);
211+
212+
flush_tlb_range(&vma, saddr, addr);
213+
}
214+
215+
/*
216+
* When dealing with NAPOT mappings, the privileged specification indicates that
217+
* "if an update needs to be made, the OS generally should first mark all of the
218+
* PTEs invalid, then issue SFENCE.VMA instruction(s) covering all 4 KiB regions
219+
* within the range, [...] then update the PTE(s), as described in Section
220+
* 4.2.1.". That's the equivalent of the Break-Before-Make approach used by
221+
* arm64.
222+
*/
180223
void set_huge_pte_at(struct mm_struct *mm,
181224
unsigned long addr,
182225
pte_t *ptep,
183226
pte_t pte,
184227
unsigned long sz)
185228
{
186-
unsigned long hugepage_shift;
229+
unsigned long hugepage_shift, pgsize;
187230
int i, pte_num;
188231

189232
if (sz >= PGDIR_SIZE)
@@ -198,7 +241,22 @@ void set_huge_pte_at(struct mm_struct *mm,
198241
hugepage_shift = PAGE_SHIFT;
199242

200243
pte_num = sz >> hugepage_shift;
201-
for (i = 0; i < pte_num; i++, ptep++, addr += (1 << hugepage_shift))
244+
pgsize = 1 << hugepage_shift;
245+
246+
if (!pte_present(pte)) {
247+
for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
248+
set_ptes(mm, addr, ptep, pte, 1);
249+
return;
250+
}
251+
252+
if (!pte_napot(pte)) {
253+
set_ptes(mm, addr, ptep, pte, 1);
254+
return;
255+
}
256+
257+
clear_flush(mm, addr, ptep, pgsize, pte_num);
258+
259+
for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
202260
set_pte_at(mm, addr, ptep, pte);
203261
}
204262

0 commit comments

Comments
 (0)