Skip to content

Commit 35e4d7f

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/at-fixes-6.16 into kvmarm-master/next
* kvm-arm64/at-fixes-6.16: : . : Set of fixes for Address Translation (AT) instruction emulation, : which affect the (not yet upstream) NV support. : : From the cover letter: : : "Here's a small series of fixes for KVM's implementation of address : translation (aka the AT S1* instructions), addressing a number of : issues in increasing levels of severity: : : - We misreport PAR_EL1.PTW in a number of occasions, including state : that is not possible as per the architecture definition : : - We don't handle access faults at all, and that doesn't play very : well with the rest of the VNCR stuff : : - AT S1E{0,1} from EL2 with HCR_EL2.{E2H,TGE}={1,1} will absolutely : take the host down, no questions asked" : . KVM: arm64: Don't feed uninitialised data to HCR_EL2 KVM: arm64: Teach address translation about access faults KVM: arm64: Fix PAR_EL1.{PTW,S} reporting on AT S1E* Signed-off-by: Marc Zyngier <maz@kernel.org>
2 parents fef3acf + 3e4d597 commit 35e4d7f

File tree

1 file changed

+36
-23
lines changed

1 file changed

+36
-23
lines changed

arch/arm64/kvm/at.c

Lines changed: 36 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -60,11 +60,11 @@ struct s1_walk_result {
6060
bool failed;
6161
};
6262

63-
static void fail_s1_walk(struct s1_walk_result *wr, u8 fst, bool ptw, bool s2)
63+
static void fail_s1_walk(struct s1_walk_result *wr, u8 fst, bool s1ptw)
6464
{
6565
wr->fst = fst;
66-
wr->ptw = ptw;
67-
wr->s2 = s2;
66+
wr->ptw = s1ptw;
67+
wr->s2 = s1ptw;
6868
wr->failed = true;
6969
}
7070

@@ -345,11 +345,11 @@ static int setup_s1_walk(struct kvm_vcpu *vcpu, u32 op, struct s1_walk_info *wi,
345345
return 0;
346346

347347
addrsz: /* Address Size Fault level 0 */
348-
fail_s1_walk(wr, ESR_ELx_FSC_ADDRSZ_L(0), false, false);
348+
fail_s1_walk(wr, ESR_ELx_FSC_ADDRSZ_L(0), false);
349349
return -EFAULT;
350350

351351
transfault_l0: /* Translation Fault level 0 */
352-
fail_s1_walk(wr, ESR_ELx_FSC_FAULT_L(0), false, false);
352+
fail_s1_walk(wr, ESR_ELx_FSC_FAULT_L(0), false);
353353
return -EFAULT;
354354
}
355355

@@ -380,13 +380,13 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
380380
if (ret) {
381381
fail_s1_walk(wr,
382382
(s2_trans.esr & ~ESR_ELx_FSC_LEVEL) | level,
383-
true, true);
383+
true);
384384
return ret;
385385
}
386386

387387
if (!kvm_s2_trans_readable(&s2_trans)) {
388388
fail_s1_walk(wr, ESR_ELx_FSC_PERM_L(level),
389-
true, true);
389+
true);
390390

391391
return -EPERM;
392392
}
@@ -396,8 +396,7 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
396396

397397
ret = kvm_read_guest(vcpu->kvm, ipa, &desc, sizeof(desc));
398398
if (ret) {
399-
fail_s1_walk(wr, ESR_ELx_FSC_SEA_TTW(level),
400-
true, false);
399+
fail_s1_walk(wr, ESR_ELx_FSC_SEA_TTW(level), false);
401400
return ret;
402401
}
403402

@@ -457,6 +456,11 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
457456
if (check_output_size(desc & GENMASK(47, va_bottom), wi))
458457
goto addrsz;
459458

459+
if (!(desc & PTE_AF)) {
460+
fail_s1_walk(wr, ESR_ELx_FSC_ACCESS_L(level), false);
461+
return -EACCES;
462+
}
463+
460464
va_bottom += contiguous_bit_shift(desc, wi, level);
461465

462466
wr->failed = false;
@@ -468,10 +472,10 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
468472
return 0;
469473

470474
addrsz:
471-
fail_s1_walk(wr, ESR_ELx_FSC_ADDRSZ_L(level), true, false);
475+
fail_s1_walk(wr, ESR_ELx_FSC_ADDRSZ_L(level), false);
472476
return -EINVAL;
473477
transfault:
474-
fail_s1_walk(wr, ESR_ELx_FSC_FAULT_L(level), true, false);
478+
fail_s1_walk(wr, ESR_ELx_FSC_FAULT_L(level), false);
475479
return -ENOENT;
476480
}
477481

@@ -488,7 +492,6 @@ struct mmu_config {
488492
u64 sctlr;
489493
u64 vttbr;
490494
u64 vtcr;
491-
u64 hcr;
492495
};
493496

494497
static void __mmu_config_save(struct mmu_config *config)
@@ -511,13 +514,10 @@ static void __mmu_config_save(struct mmu_config *config)
511514
config->sctlr = read_sysreg_el1(SYS_SCTLR);
512515
config->vttbr = read_sysreg(vttbr_el2);
513516
config->vtcr = read_sysreg(vtcr_el2);
514-
config->hcr = read_sysreg(hcr_el2);
515517
}
516518

517519
static void __mmu_config_restore(struct mmu_config *config)
518520
{
519-
write_sysreg(config->hcr, hcr_el2);
520-
521521
/*
522522
* ARM errata 1165522 and 1530923 require TGE to be 1 before
523523
* we update the guest state.
@@ -1198,7 +1198,7 @@ static u64 handle_at_slow(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
11981198
}
11991199

12001200
if (perm_fail)
1201-
fail_s1_walk(&wr, ESR_ELx_FSC_PERM_L(wr.level), false, false);
1201+
fail_s1_walk(&wr, ESR_ELx_FSC_PERM_L(wr.level), false);
12021202

12031203
compute_par:
12041204
return compute_par_s1(vcpu, &wr, wi.regime);
@@ -1210,7 +1210,8 @@ static u64 handle_at_slow(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
12101210
* If the translation is unsuccessful, the value may only contain
12111211
* PAR_EL1.F, and cannot be taken at face value. It isn't an
12121212
* indication of the translation having failed, only that the fast
1213-
* path did not succeed, *unless* it indicates a S1 permission fault.
1213+
* path did not succeed, *unless* it indicates a S1 permission or
1214+
* access fault.
12141215
*/
12151216
static u64 __kvm_at_s1e01_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
12161217
{
@@ -1266,8 +1267,8 @@ static u64 __kvm_at_s1e01_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
12661267
__load_stage2(mmu, mmu->arch);
12671268

12681269
skip_mmu_switch:
1269-
/* Clear TGE, enable S2 translation, we're rolling */
1270-
write_sysreg((config.hcr & ~HCR_TGE) | HCR_VM, hcr_el2);
1270+
/* Temporarily switch back to guest context */
1271+
write_sysreg(vcpu->arch.hcr_el2, hcr_el2);
12711272
isb();
12721273

12731274
switch (op) {
@@ -1299,6 +1300,8 @@ static u64 __kvm_at_s1e01_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
12991300
if (!fail)
13001301
par = read_sysreg_par();
13011302

1303+
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
1304+
13021305
if (!(vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)))
13031306
__mmu_config_restore(&config);
13041307

@@ -1313,19 +1316,29 @@ static bool par_check_s1_perm_fault(u64 par)
13131316
!(par & SYS_PAR_EL1_S));
13141317
}
13151318

1319+
static bool par_check_s1_access_fault(u64 par)
1320+
{
1321+
u8 fst = FIELD_GET(SYS_PAR_EL1_FST, par);
1322+
1323+
return ((fst & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_ACCESS &&
1324+
!(par & SYS_PAR_EL1_S));
1325+
}
1326+
13161327
void __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
13171328
{
13181329
u64 par = __kvm_at_s1e01_fast(vcpu, op, vaddr);
13191330

13201331
/*
1321-
* If PAR_EL1 reports that AT failed on a S1 permission fault, we
1322-
* know for sure that the PTW was able to walk the S1 tables and
1323-
* there's nothing else to do.
1332+
* If PAR_EL1 reports that AT failed on a S1 permission or access
1333+
* fault, we know for sure that the PTW was able to walk the S1
1334+
* tables and there's nothing else to do.
13241335
*
13251336
* If AT failed for any other reason, then we must walk the guest S1
13261337
* to emulate the instruction.
13271338
*/
1328-
if ((par & SYS_PAR_EL1_F) && !par_check_s1_perm_fault(par))
1339+
if ((par & SYS_PAR_EL1_F) &&
1340+
!par_check_s1_perm_fault(par) &&
1341+
!par_check_s1_access_fault(par))
13291342
par = handle_at_slow(vcpu, op, vaddr);
13301343

13311344
vcpu_write_sys_reg(vcpu, par, PAR_EL1);

0 commit comments

Comments
 (0)