Skip to content

Commit 5362a4b

Browse files
npigginmpe
authored andcommitted
powerpc: Fix reverse map real-mode address lookup with huge vmalloc
real_vmalloc_addr() does not currently work for huge vmalloc, which is what the reverse map can be allocated with for radix host, hash guest. Extract the hugepage aware equivalent from eeh code into a helper, and convert existing sites including this one to use it. Fixes: 8abddd9 ("powerpc/64s/radix: Enable huge vmalloc mappings") Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210526120005.3432222-1-npiggin@gmail.com
1 parent 82123a3 commit 5362a4b

File tree

4 files changed

+35
-48
lines changed

4 files changed

+35
-48
lines changed

arch/powerpc/include/asm/pte-walk.h

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,35 @@ static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
3131
pgd_t *pgdir = init_mm.pgd;
3232
return __find_linux_pte(pgdir, ea, NULL, hshift);
3333
}
34+
35+
/*
36+
* Convert a kernel vmap virtual address (vmalloc or ioremap space) to a
37+
* physical address, without taking locks. This can be used in real-mode.
38+
*/
39+
static inline phys_addr_t ppc_find_vmap_phys(unsigned long addr)
40+
{
41+
pte_t *ptep;
42+
phys_addr_t pa;
43+
int hugepage_shift;
44+
45+
/*
46+
* init_mm does not free page tables, and does not do THP. It may
47+
* have huge pages from huge vmalloc / ioremap etc.
48+
*/
49+
ptep = find_init_mm_pte(addr, &hugepage_shift);
50+
if (WARN_ON(!ptep))
51+
return 0;
52+
53+
pa = PFN_PHYS(pte_pfn(*ptep));
54+
55+
if (!hugepage_shift)
56+
hugepage_shift = PAGE_SHIFT;
57+
58+
pa |= addr & ((1ul << hugepage_shift) - 1);
59+
60+
return pa;
61+
}
62+
3463
/*
3564
* This is what we should always use. Any other lockless page table lookup needs
3665
* careful audit against THP split.

arch/powerpc/kernel/eeh.c

Lines changed: 1 addition & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -346,28 +346,7 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
346346
*/
347347
static inline unsigned long eeh_token_to_phys(unsigned long token)
348348
{
349-
pte_t *ptep;
350-
unsigned long pa;
351-
int hugepage_shift;
352-
353-
/*
354-
* We won't find hugepages here(this is iomem). Hence we are not
355-
* worried about _PAGE_SPLITTING/collapse. Also we will not hit
356-
* page table free, because of init_mm.
357-
*/
358-
ptep = find_init_mm_pte(token, &hugepage_shift);
359-
if (!ptep)
360-
return token;
361-
362-
pa = pte_pfn(*ptep);
363-
364-
/* On radix we can do hugepage mappings for io, so handle that */
365-
if (!hugepage_shift)
366-
hugepage_shift = PAGE_SHIFT;
367-
368-
pa <<= PAGE_SHIFT;
369-
pa |= token & ((1ul << hugepage_shift) - 1);
370-
return pa;
349+
return ppc_find_vmap_phys(token);
371350
}
372351

373352
/*

arch/powerpc/kernel/io-workarounds.c

Lines changed: 3 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,6 @@ static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
5555
#ifdef CONFIG_PPC_INDIRECT_MMIO
5656
struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
5757
{
58-
unsigned hugepage_shift;
5958
struct iowa_bus *bus;
6059
int token;
6160

@@ -65,22 +64,13 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
6564
bus = &iowa_busses[token - 1];
6665
else {
6766
unsigned long vaddr, paddr;
68-
pte_t *ptep;
6967

7068
vaddr = (unsigned long)PCI_FIX_ADDR(addr);
7169
if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
7270
return NULL;
73-
/*
74-
* We won't find huge pages here (iomem). Also can't hit
75-
* a page table free due to init_mm
76-
*/
77-
ptep = find_init_mm_pte(vaddr, &hugepage_shift);
78-
if (ptep == NULL)
79-
paddr = 0;
80-
else {
81-
WARN_ON(hugepage_shift);
82-
paddr = pte_pfn(*ptep) << PAGE_SHIFT;
83-
}
71+
72+
paddr = ppc_find_vmap_phys(vaddr);
73+
8474
bus = iowa_pci_find(vaddr, paddr);
8575

8676
if (bus == NULL)

arch/powerpc/kvm/book3s_hv_rm_mmu.c

Lines changed: 2 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -23,20 +23,9 @@
2323
#include <asm/pte-walk.h>
2424

2525
/* Translate address of a vmalloc'd thing to a linear map address */
26-
static void *real_vmalloc_addr(void *x)
26+
static void *real_vmalloc_addr(void *addr)
2727
{
28-
unsigned long addr = (unsigned long) x;
29-
pte_t *p;
30-
/*
31-
* assume we don't have huge pages in vmalloc space...
32-
* So don't worry about THP collapse/split. Called
33-
* Only in realmode with MSR_EE = 0, hence won't need irq_save/restore.
34-
*/
35-
p = find_init_mm_pte(addr, NULL);
36-
if (!p || !pte_present(*p))
37-
return NULL;
38-
addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
39-
return __va(addr);
28+
return __va(ppc_find_vmap_phys((unsigned long)addr));
4029
}
4130

4231
/* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */

0 commit comments

Comments
 (0)