Skip to content

Commit bd7b12a

Browse files
committed
Merge tag 'powerpc-5.13-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc fixes from Michael Ellerman: "Fix our KVM reverse map real-mode handling since we enabled huge vmalloc (in some configurations). Revert a recent change to our IOMMU code which broke some devices. Fix KVM handling of FSCR on P7/P8, which could have possibly let a guest crash it's Qemu. Fix kprobes validation of prefixed instructions across page boundary. Thanks to Alexey Kardashevskiy, Christophe Leroy, Fabiano Rosas, Frederic Barrat, Naveen N. Rao, and Nicholas Piggin" * tag 'powerpc-5.13-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: Revert "powerpc/kernel/iommu: Align size for IOMMU_PAGE_SIZE() to save TCEs" KVM: PPC: Book3S HV: Save host FSCR in the P7/8 path powerpc: Fix reverse map real-mode address lookup with huge vmalloc powerpc/kprobes: Fix validation of prefixed instructions across page boundary
2 parents 773ac53 + 59cc84c commit bd7b12a

File tree

8 files changed

+49
-57
lines changed

8 files changed

+49
-57
lines changed

arch/powerpc/include/asm/pte-walk.h

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,35 @@ static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
3131
pgd_t *pgdir = init_mm.pgd;
3232
return __find_linux_pte(pgdir, ea, NULL, hshift);
3333
}
34+
35+
/*
36+
* Convert a kernel vmap virtual address (vmalloc or ioremap space) to a
37+
* physical address, without taking locks. This can be used in real-mode.
38+
*/
39+
static inline phys_addr_t ppc_find_vmap_phys(unsigned long addr)
40+
{
41+
pte_t *ptep;
42+
phys_addr_t pa;
43+
int hugepage_shift;
44+
45+
/*
46+
* init_mm does not free page tables, and does not do THP. It may
47+
* have huge pages from huge vmalloc / ioremap etc.
48+
*/
49+
ptep = find_init_mm_pte(addr, &hugepage_shift);
50+
if (WARN_ON(!ptep))
51+
return 0;
52+
53+
pa = PFN_PHYS(pte_pfn(*ptep));
54+
55+
if (!hugepage_shift)
56+
hugepage_shift = PAGE_SHIFT;
57+
58+
pa |= addr & ((1ul << hugepage_shift) - 1);
59+
60+
return pa;
61+
}
62+
3463
/*
3564
* This is what we should always use. Any other lockless page table lookup needs
3665
* careful audit against THP split.

arch/powerpc/kernel/eeh.c

Lines changed: 1 addition & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -346,28 +346,7 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
346346
*/
347347
static inline unsigned long eeh_token_to_phys(unsigned long token)
348348
{
349-
pte_t *ptep;
350-
unsigned long pa;
351-
int hugepage_shift;
352-
353-
/*
354-
* We won't find hugepages here(this is iomem). Hence we are not
355-
* worried about _PAGE_SPLITTING/collapse. Also we will not hit
356-
* page table free, because of init_mm.
357-
*/
358-
ptep = find_init_mm_pte(token, &hugepage_shift);
359-
if (!ptep)
360-
return token;
361-
362-
pa = pte_pfn(*ptep);
363-
364-
/* On radix we can do hugepage mappings for io, so handle that */
365-
if (!hugepage_shift)
366-
hugepage_shift = PAGE_SHIFT;
367-
368-
pa <<= PAGE_SHIFT;
369-
pa |= token & ((1ul << hugepage_shift) - 1);
370-
return pa;
349+
return ppc_find_vmap_phys(token);
371350
}
372351

373352
/*

arch/powerpc/kernel/io-workarounds.c

Lines changed: 3 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,6 @@ static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
5555
#ifdef CONFIG_PPC_INDIRECT_MMIO
5656
struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
5757
{
58-
unsigned hugepage_shift;
5958
struct iowa_bus *bus;
6059
int token;
6160

@@ -65,22 +64,13 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
6564
bus = &iowa_busses[token - 1];
6665
else {
6766
unsigned long vaddr, paddr;
68-
pte_t *ptep;
6967

7068
vaddr = (unsigned long)PCI_FIX_ADDR(addr);
7169
if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
7270
return NULL;
73-
/*
74-
* We won't find huge pages here (iomem). Also can't hit
75-
* a page table free due to init_mm
76-
*/
77-
ptep = find_init_mm_pte(vaddr, &hugepage_shift);
78-
if (ptep == NULL)
79-
paddr = 0;
80-
else {
81-
WARN_ON(hugepage_shift);
82-
paddr = pte_pfn(*ptep) << PAGE_SHIFT;
83-
}
71+
72+
paddr = ppc_find_vmap_phys(vaddr);
73+
8474
bus = iowa_pci_find(vaddr, paddr);
8575

8676
if (bus == NULL)

arch/powerpc/kernel/iommu.c

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -898,7 +898,6 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
898898
unsigned int order;
899899
unsigned int nio_pages, io_order;
900900
struct page *page;
901-
size_t size_io = size;
902901

903902
size = PAGE_ALIGN(size);
904903
order = get_order(size);
@@ -925,9 +924,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
925924
memset(ret, 0, size);
926925

927926
/* Set up tces to cover the allocated range */
928-
size_io = IOMMU_PAGE_ALIGN(size_io, tbl);
929-
nio_pages = size_io >> tbl->it_page_shift;
930-
io_order = get_iommu_order(size_io, tbl);
927+
nio_pages = size >> tbl->it_page_shift;
928+
io_order = get_iommu_order(size, tbl);
931929
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
932930
mask >> tbl->it_page_shift, io_order, 0);
933931
if (mapping == DMA_MAPPING_ERROR) {
@@ -942,9 +940,10 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
942940
void *vaddr, dma_addr_t dma_handle)
943941
{
944942
if (tbl) {
945-
size_t size_io = IOMMU_PAGE_ALIGN(size, tbl);
946-
unsigned int nio_pages = size_io >> tbl->it_page_shift;
943+
unsigned int nio_pages;
947944

945+
size = PAGE_ALIGN(size);
946+
nio_pages = size >> tbl->it_page_shift;
948947
iommu_free(tbl, dma_handle, nio_pages);
949948
size = PAGE_ALIGN(size);
950949
free_pages((unsigned long)vaddr, get_order(size));

arch/powerpc/kernel/kprobes.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -108,15 +108,15 @@ int arch_prepare_kprobe(struct kprobe *p)
108108
int ret = 0;
109109
struct kprobe *prev;
110110
struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr);
111-
struct ppc_inst prefix = ppc_inst_read((struct ppc_inst *)(p->addr - 1));
112111

113112
if ((unsigned long)p->addr & 0x03) {
114113
printk("Attempt to register kprobe at an unaligned address\n");
115114
ret = -EINVAL;
116115
} else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
117116
printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
118117
ret = -EINVAL;
119-
} else if (ppc_inst_prefixed(prefix)) {
118+
} else if ((unsigned long)p->addr & ~PAGE_MASK &&
119+
ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)(p->addr - 1)))) {
120120
printk("Cannot register a kprobe on the second word of prefixed instruction\n");
121121
ret = -EINVAL;
122122
}

arch/powerpc/kvm/book3s_hv.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4455,7 +4455,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
44554455
mtspr(SPRN_EBBRR, ebb_regs[1]);
44564456
mtspr(SPRN_BESCR, ebb_regs[2]);
44574457
mtspr(SPRN_TAR, user_tar);
4458-
mtspr(SPRN_FSCR, current->thread.fscr);
44594458
}
44604459
mtspr(SPRN_VRSAVE, user_vrsave);
44614460

arch/powerpc/kvm/book3s_hv_rm_mmu.c

Lines changed: 2 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -23,20 +23,9 @@
2323
#include <asm/pte-walk.h>
2424

2525
/* Translate address of a vmalloc'd thing to a linear map address */
26-
static void *real_vmalloc_addr(void *x)
26+
static void *real_vmalloc_addr(void *addr)
2727
{
28-
unsigned long addr = (unsigned long) x;
29-
pte_t *p;
30-
/*
31-
* assume we don't have huge pages in vmalloc space...
32-
* So don't worry about THP collapse/split. Called
33-
* Only in realmode with MSR_EE = 0, hence won't need irq_save/restore.
34-
*/
35-
p = find_init_mm_pte(addr, NULL);
36-
if (!p || !pte_present(*p))
37-
return NULL;
38-
addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
39-
return __va(addr);
28+
return __va(ppc_find_vmap_phys((unsigned long)addr));
4029
}
4130

4231
/* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */

arch/powerpc/kvm/book3s_hv_rmhandlers.S

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
5959
#define STACK_SLOT_UAMOR (SFS-88)
6060
#define STACK_SLOT_DAWR1 (SFS-96)
6161
#define STACK_SLOT_DAWRX1 (SFS-104)
62+
#define STACK_SLOT_FSCR (SFS-112)
6263
/* the following is used by the P9 short path */
6364
#define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */
6465

@@ -686,6 +687,8 @@ BEGIN_FTR_SECTION
686687
std r6, STACK_SLOT_DAWR0(r1)
687688
std r7, STACK_SLOT_DAWRX0(r1)
688689
std r8, STACK_SLOT_IAMR(r1)
690+
mfspr r5, SPRN_FSCR
691+
std r5, STACK_SLOT_FSCR(r1)
689692
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
690693
BEGIN_FTR_SECTION
691694
mfspr r6, SPRN_DAWR1
@@ -1663,6 +1666,10 @@ FTR_SECTION_ELSE
16631666
ld r7, STACK_SLOT_HFSCR(r1)
16641667
mtspr SPRN_HFSCR, r7
16651668
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1669+
BEGIN_FTR_SECTION
1670+
ld r5, STACK_SLOT_FSCR(r1)
1671+
mtspr SPRN_FSCR, r5
1672+
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
16661673
/*
16671674
* Restore various registers to 0, where non-zero values
16681675
* set by the guest could disrupt the host.

0 commit comments

Comments
 (0)