Skip to content

Commit c478032

Browse files
committed
Merge tag 'kvmarm-fixes-6.15-1' of https://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64: First batch of fixes for 6.15 - Rework heuristics for resolving the fault IPA (HPFAR_EL2 v. re-walk stage-1 page tables) to align with the architecture. This avoids possibly taking an SEA at EL2 on the page table walk or using an architecturally UNKNOWN fault IPA. - Use acquire/release semantics in the KVM FF-A proxy to avoid reading a stale value for the FF-A version. - Fix KVM guest driver to match PV CPUID hypercall ABI. - Use Inner Shareable Normal Write-Back mappings at stage-1 in KVM selftests, which is the only memory type for which atomic instructions are architecturally guaranteed to work.
2 parents c77eee5 + a344e25 commit c478032

File tree

12 files changed

+234
-78
lines changed

12 files changed

+234
-78
lines changed

arch/arm64/include/asm/esr.h

Lines changed: 42 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,15 @@
121121
#define ESR_ELx_FSC_SEA_TTW(n) (0x14 + (n))
122122
#define ESR_ELx_FSC_SECC (0x18)
123123
#define ESR_ELx_FSC_SECC_TTW(n) (0x1c + (n))
124+
#define ESR_ELx_FSC_ADDRSZ (0x00)
125+
126+
/*
127+
* Annoyingly, the negative levels for Address size faults aren't laid out
128+
* contiguously (or in the desired order)
129+
*/
130+
#define ESR_ELx_FSC_ADDRSZ_nL(n) ((n) == -1 ? 0x25 : 0x2C)
131+
#define ESR_ELx_FSC_ADDRSZ_L(n) ((n) < 0 ? ESR_ELx_FSC_ADDRSZ_nL(n) : \
132+
(ESR_ELx_FSC_ADDRSZ + (n)))
124133

125134
/* Status codes for individual page table levels */
126135
#define ESR_ELx_FSC_ACCESS_L(n) (ESR_ELx_FSC_ACCESS + (n))
@@ -161,8 +170,6 @@
161170
#define ESR_ELx_Xs_MASK (GENMASK_ULL(4, 0))
162171

163172
/* ISS field definitions for exceptions taken in to Hyp */
164-
#define ESR_ELx_FSC_ADDRSZ (0x00)
165-
#define ESR_ELx_FSC_ADDRSZ_L(n) (ESR_ELx_FSC_ADDRSZ + (n))
166173
#define ESR_ELx_CV (UL(1) << 24)
167174
#define ESR_ELx_COND_SHIFT (20)
168175
#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT)
@@ -464,6 +471,39 @@ static inline bool esr_fsc_is_access_flag_fault(unsigned long esr)
464471
(esr == ESR_ELx_FSC_ACCESS_L(0));
465472
}
466473

474+
static inline bool esr_fsc_is_addr_sz_fault(unsigned long esr)
475+
{
476+
esr &= ESR_ELx_FSC;
477+
478+
return (esr == ESR_ELx_FSC_ADDRSZ_L(3)) ||
479+
(esr == ESR_ELx_FSC_ADDRSZ_L(2)) ||
480+
(esr == ESR_ELx_FSC_ADDRSZ_L(1)) ||
481+
(esr == ESR_ELx_FSC_ADDRSZ_L(0)) ||
482+
(esr == ESR_ELx_FSC_ADDRSZ_L(-1));
483+
}
484+
485+
static inline bool esr_fsc_is_sea_ttw(unsigned long esr)
486+
{
487+
esr = esr & ESR_ELx_FSC;
488+
489+
return (esr == ESR_ELx_FSC_SEA_TTW(3)) ||
490+
(esr == ESR_ELx_FSC_SEA_TTW(2)) ||
491+
(esr == ESR_ELx_FSC_SEA_TTW(1)) ||
492+
(esr == ESR_ELx_FSC_SEA_TTW(0)) ||
493+
(esr == ESR_ELx_FSC_SEA_TTW(-1));
494+
}
495+
496+
static inline bool esr_fsc_is_secc_ttw(unsigned long esr)
497+
{
498+
esr = esr & ESR_ELx_FSC;
499+
500+
return (esr == ESR_ELx_FSC_SECC_TTW(3)) ||
501+
(esr == ESR_ELx_FSC_SECC_TTW(2)) ||
502+
(esr == ESR_ELx_FSC_SECC_TTW(1)) ||
503+
(esr == ESR_ELx_FSC_SECC_TTW(0)) ||
504+
(esr == ESR_ELx_FSC_SECC_TTW(-1));
505+
}
506+
467507
/* Indicate whether ESR.EC==0x1A is for an ERETAx instruction */
468508
static inline bool esr_iss_is_eretax(unsigned long esr)
469509
{

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -305,7 +305,12 @@ static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vc
305305

306306
static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
307307
{
308-
return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
308+
u64 hpfar = vcpu->arch.fault.hpfar_el2;
309+
310+
if (unlikely(!(hpfar & HPFAR_EL2_NS)))
311+
return INVALID_GPA;
312+
313+
return FIELD_GET(HPFAR_EL2_FIPA, hpfar) << 12;
309314
}
310315

311316
static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)

arch/arm64/include/asm/kvm_ras.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
* Was this synchronous external abort a RAS notification?
1515
* Returns '0' for errors handled by some RAS subsystem, or -ENOENT.
1616
*/
17-
static inline int kvm_handle_guest_sea(phys_addr_t addr, u64 esr)
17+
static inline int kvm_handle_guest_sea(void)
1818
{
1919
/* apei_claim_sea(NULL) expects to mask interrupts itself */
2020
lockdep_assert_irqs_enabled();

arch/arm64/kvm/hyp/include/hyp/fault.h

Lines changed: 48 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,16 @@
1212
#include <asm/kvm_hyp.h>
1313
#include <asm/kvm_mmu.h>
1414

15+
static inline bool __fault_safe_to_translate(u64 esr)
16+
{
17+
u64 fsc = esr & ESR_ELx_FSC;
18+
19+
if (esr_fsc_is_sea_ttw(esr) || esr_fsc_is_secc_ttw(esr))
20+
return false;
21+
22+
return !(fsc == ESR_ELx_FSC_EXTABT && (esr & ESR_ELx_FnV));
23+
}
24+
1525
static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
1626
{
1727
int ret;
@@ -44,34 +54,50 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
4454
return true;
4555
}
4656

47-
static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
57+
/*
58+
* Checks for the conditions when HPFAR_EL2 is written, per ARM ARM R_FKLWR.
59+
*/
60+
static inline bool __hpfar_valid(u64 esr)
4861
{
49-
u64 hpfar, far;
50-
51-
far = read_sysreg_el2(SYS_FAR);
52-
5362
/*
54-
* The HPFAR can be invalid if the stage 2 fault did not
55-
* happen during a stage 1 page table walk (the ESR_EL2.S1PTW
56-
* bit is clear) and one of the two following cases are true:
57-
* 1. The fault was due to a permission fault
58-
* 2. The processor carries errata 834220
63+
* CPUs affected by ARM erratum #834220 may incorrectly report a
64+
* stage-2 translation fault when a stage-1 permission fault occurs.
5965
*
60-
* Therefore, for all non S1PTW faults where we either have a
61-
* permission fault or the errata workaround is enabled, we
62-
* resolve the IPA using the AT instruction.
66+
* Re-walk the page tables to determine if a stage-1 fault actually
67+
* occurred.
6368
*/
64-
if (!(esr & ESR_ELx_S1PTW) &&
65-
(cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
66-
esr_fsc_is_permission_fault(esr))) {
67-
if (!__translate_far_to_hpfar(far, &hpfar))
68-
return false;
69-
} else {
69+
if (cpus_have_final_cap(ARM64_WORKAROUND_834220) &&
70+
esr_fsc_is_translation_fault(esr))
71+
return false;
72+
73+
if (esr_fsc_is_translation_fault(esr) || esr_fsc_is_access_flag_fault(esr))
74+
return true;
75+
76+
if ((esr & ESR_ELx_S1PTW) && esr_fsc_is_permission_fault(esr))
77+
return true;
78+
79+
return esr_fsc_is_addr_sz_fault(esr);
80+
}
81+
82+
static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
83+
{
84+
u64 hpfar;
85+
86+
fault->far_el2 = read_sysreg_el2(SYS_FAR);
87+
fault->hpfar_el2 = 0;
88+
89+
if (__hpfar_valid(esr))
7090
hpfar = read_sysreg(hpfar_el2);
71-
}
91+
else if (unlikely(!__fault_safe_to_translate(esr)))
92+
return true;
93+
else if (!__translate_far_to_hpfar(fault->far_el2, &hpfar))
94+
return false;
7295

73-
fault->far_el2 = far;
74-
fault->hpfar_el2 = hpfar;
96+
/*
97+
* Hijack HPFAR_EL2.NS (RES0 in Non-secure) to indicate a valid
98+
* HPFAR value.
99+
*/
100+
fault->hpfar_el2 = hpfar | HPFAR_EL2_NS;
75101
return true;
76102
}
77103

arch/arm64/kvm/hyp/nvhe/ffa.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -730,10 +730,10 @@ static void do_ffa_version(struct arm_smccc_res *res,
730730
hyp_ffa_version = ffa_req_version;
731731
}
732732

733-
if (hyp_ffa_post_init())
733+
if (hyp_ffa_post_init()) {
734734
res->a0 = FFA_RET_NOT_SUPPORTED;
735-
else {
736-
has_version_negotiated = true;
735+
} else {
736+
smp_store_release(&has_version_negotiated, true);
737737
res->a0 = hyp_ffa_version;
738738
}
739739
unlock:
@@ -809,7 +809,8 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
809809
if (!is_ffa_call(func_id))
810810
return false;
811811

812-
if (!has_version_negotiated && func_id != FFA_VERSION) {
812+
if (func_id != FFA_VERSION &&
813+
!smp_load_acquire(&has_version_negotiated)) {
813814
ffa_to_smccc_error(&res, FFA_RET_INVALID_PARAMETERS);
814815
goto out_handled;
815816
}

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -578,7 +578,14 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
578578
return;
579579
}
580580

581-
addr = (fault.hpfar_el2 & HPFAR_MASK) << 8;
581+
582+
/*
583+
* Yikes, we couldn't resolve the fault IPA. This should reinject an
584+
* abort into the host when we figure out how to do that.
585+
*/
586+
BUG_ON(!(fault.hpfar_el2 & HPFAR_EL2_NS));
587+
addr = FIELD_GET(HPFAR_EL2_FIPA, fault.hpfar_el2) << 12;
588+
582589
ret = host_stage2_idmap(addr);
583590
BUG_ON(ret && ret != -EAGAIN);
584591
}

arch/arm64/kvm/mmu.c

Lines changed: 19 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1794,9 +1794,28 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
17941794
gfn_t gfn;
17951795
int ret, idx;
17961796

1797+
/* Synchronous External Abort? */
1798+
if (kvm_vcpu_abt_issea(vcpu)) {
1799+
/*
1800+
* For RAS the host kernel may handle this abort.
1801+
* There is no need to pass the error into the guest.
1802+
*/
1803+
if (kvm_handle_guest_sea())
1804+
kvm_inject_vabt(vcpu);
1805+
1806+
return 1;
1807+
}
1808+
17971809
esr = kvm_vcpu_get_esr(vcpu);
17981810

1811+
/*
1812+
* The fault IPA should be reliable at this point as we're not dealing
1813+
* with an SEA.
1814+
*/
17991815
ipa = fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
1816+
if (KVM_BUG_ON(ipa == INVALID_GPA, vcpu->kvm))
1817+
return -EFAULT;
1818+
18001819
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
18011820

18021821
if (esr_fsc_is_translation_fault(esr)) {
@@ -1818,18 +1837,6 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
18181837
}
18191838
}
18201839

1821-
/* Synchronous External Abort? */
1822-
if (kvm_vcpu_abt_issea(vcpu)) {
1823-
/*
1824-
* For RAS the host kernel may handle this abort.
1825-
* There is no need to pass the error into the guest.
1826-
*/
1827-
if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu)))
1828-
kvm_inject_vabt(vcpu);
1829-
1830-
return 1;
1831-
}
1832-
18331840
trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
18341841
kvm_vcpu_get_hfar(vcpu), fault_ipa);
18351842

arch/arm64/tools/sysreg

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3433,3 +3433,10 @@ Field 5 F
34333433
Field 4 P
34343434
Field 3:0 Align
34353435
EndSysreg
3436+
3437+
Sysreg HPFAR_EL2 3 4 6 0 4
3438+
Field 63 NS
3439+
Res0 62:48
3440+
Field 47:4 FIPA
3441+
Res0 3:0
3442+
EndSysreg

drivers/firmware/smccc/kvm_guest.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -95,15 +95,15 @@ void __init kvm_arm_target_impl_cpu_init(void)
9595

9696
for (i = 0; i < max_cpus; i++) {
9797
arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID,
98-
i, &res);
98+
i, 0, 0, &res);
9999
if (res.a0 != SMCCC_RET_SUCCESS) {
100100
pr_warn("Discovering target implementation CPUs failed\n");
101101
goto mem_free;
102102
}
103103
target[i].midr = res.a1;
104104
target[i].revidr = res.a2;
105105
target[i].aidr = res.a3;
106-
};
106+
}
107107

108108
if (!cpu_errata_set_target_impl(max_cpus, target)) {
109109
pr_warn("Failed to set target implementation CPUs\n");

tools/testing/selftests/kvm/arm64/page_fault_test.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,7 @@ static bool guest_set_ha(void)
199199
if (hadbs == 0)
200200
return false;
201201

202-
tcr = read_sysreg(tcr_el1) | TCR_EL1_HA;
202+
tcr = read_sysreg(tcr_el1) | TCR_HA;
203203
write_sysreg(tcr, tcr_el1);
204204
isb();
205205

0 commit comments

Comments
 (0)