@@ -60,11 +60,11 @@ struct s1_walk_result {
60
60
bool failed ;
61
61
};
62
62
63
- static void fail_s1_walk (struct s1_walk_result * wr , u8 fst , bool ptw , bool s2 )
63
+ static void fail_s1_walk (struct s1_walk_result * wr , u8 fst , bool s1ptw )
64
64
{
65
65
wr -> fst = fst ;
66
- wr -> ptw = ptw ;
67
- wr -> s2 = s2 ;
66
+ wr -> ptw = s1ptw ;
67
+ wr -> s2 = s1ptw ;
68
68
wr -> failed = true;
69
69
}
70
70
@@ -345,11 +345,11 @@ static int setup_s1_walk(struct kvm_vcpu *vcpu, u32 op, struct s1_walk_info *wi,
345
345
return 0 ;
346
346
347
347
addrsz : /* Address Size Fault level 0 */
348
- fail_s1_walk (wr , ESR_ELx_FSC_ADDRSZ_L (0 ), false, false );
348
+ fail_s1_walk (wr , ESR_ELx_FSC_ADDRSZ_L (0 ), false);
349
349
return - EFAULT ;
350
350
351
351
transfault_l0 : /* Translation Fault level 0 */
352
- fail_s1_walk (wr , ESR_ELx_FSC_FAULT_L (0 ), false, false );
352
+ fail_s1_walk (wr , ESR_ELx_FSC_FAULT_L (0 ), false);
353
353
return - EFAULT ;
354
354
}
355
355
@@ -380,13 +380,13 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
380
380
if (ret ) {
381
381
fail_s1_walk (wr ,
382
382
(s2_trans .esr & ~ESR_ELx_FSC_LEVEL ) | level ,
383
- true, true );
383
+ true);
384
384
return ret ;
385
385
}
386
386
387
387
if (!kvm_s2_trans_readable (& s2_trans )) {
388
388
fail_s1_walk (wr , ESR_ELx_FSC_PERM_L (level ),
389
- true, true );
389
+ true);
390
390
391
391
return - EPERM ;
392
392
}
@@ -396,8 +396,7 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
396
396
397
397
ret = kvm_read_guest (vcpu -> kvm , ipa , & desc , sizeof (desc ));
398
398
if (ret ) {
399
- fail_s1_walk (wr , ESR_ELx_FSC_SEA_TTW (level ),
400
- true, false);
399
+ fail_s1_walk (wr , ESR_ELx_FSC_SEA_TTW (level ), false);
401
400
return ret ;
402
401
}
403
402
@@ -457,6 +456,11 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
457
456
if (check_output_size (desc & GENMASK (47 , va_bottom ), wi ))
458
457
goto addrsz ;
459
458
459
+ if (!(desc & PTE_AF )) {
460
+ fail_s1_walk (wr , ESR_ELx_FSC_ACCESS_L (level ), false);
461
+ return - EACCES ;
462
+ }
463
+
460
464
va_bottom += contiguous_bit_shift (desc , wi , level );
461
465
462
466
wr -> failed = false;
@@ -468,10 +472,10 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
468
472
return 0 ;
469
473
470
474
addrsz :
471
- fail_s1_walk (wr , ESR_ELx_FSC_ADDRSZ_L (level ), true, false);
475
+ fail_s1_walk (wr , ESR_ELx_FSC_ADDRSZ_L (level ), false);
472
476
return - EINVAL ;
473
477
transfault :
474
- fail_s1_walk (wr , ESR_ELx_FSC_FAULT_L (level ), true, false);
478
+ fail_s1_walk (wr , ESR_ELx_FSC_FAULT_L (level ), false);
475
479
return - ENOENT ;
476
480
}
477
481
@@ -488,7 +492,6 @@ struct mmu_config {
488
492
u64 sctlr ;
489
493
u64 vttbr ;
490
494
u64 vtcr ;
491
- u64 hcr ;
492
495
};
493
496
494
497
static void __mmu_config_save (struct mmu_config * config )
@@ -511,13 +514,10 @@ static void __mmu_config_save(struct mmu_config *config)
511
514
config -> sctlr = read_sysreg_el1 (SYS_SCTLR );
512
515
config -> vttbr = read_sysreg (vttbr_el2 );
513
516
config -> vtcr = read_sysreg (vtcr_el2 );
514
- config -> hcr = read_sysreg (hcr_el2 );
515
517
}
516
518
517
519
static void __mmu_config_restore (struct mmu_config * config )
518
520
{
519
- write_sysreg (config -> hcr , hcr_el2 );
520
-
521
521
/*
522
522
* ARM errata 1165522 and 1530923 require TGE to be 1 before
523
523
* we update the guest state.
@@ -1198,7 +1198,7 @@ static u64 handle_at_slow(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
1198
1198
}
1199
1199
1200
1200
if (perm_fail )
1201
- fail_s1_walk (& wr , ESR_ELx_FSC_PERM_L (wr .level ), false, false );
1201
+ fail_s1_walk (& wr , ESR_ELx_FSC_PERM_L (wr .level ), false);
1202
1202
1203
1203
compute_par :
1204
1204
return compute_par_s1 (vcpu , & wr , wi .regime );
@@ -1210,7 +1210,8 @@ static u64 handle_at_slow(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
1210
1210
* If the translation is unsuccessful, the value may only contain
1211
1211
* PAR_EL1.F, and cannot be taken at face value. It isn't an
1212
1212
* indication of the translation having failed, only that the fast
1213
- * path did not succeed, *unless* it indicates a S1 permission fault.
1213
+ * path did not succeed, *unless* it indicates a S1 permission or
1214
+ * access fault.
1214
1215
*/
1215
1216
static u64 __kvm_at_s1e01_fast (struct kvm_vcpu * vcpu , u32 op , u64 vaddr )
1216
1217
{
@@ -1266,8 +1267,8 @@ static u64 __kvm_at_s1e01_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
1266
1267
__load_stage2 (mmu , mmu -> arch );
1267
1268
1268
1269
skip_mmu_switch :
1269
- /* Clear TGE, enable S2 translation, we're rolling */
1270
- write_sysreg (( config . hcr & ~ HCR_TGE ) | HCR_VM , hcr_el2 );
1270
+ /* Temporarily switch back to guest context */
1271
+ write_sysreg (vcpu -> arch . hcr_el2 , hcr_el2 );
1271
1272
isb ();
1272
1273
1273
1274
switch (op ) {
@@ -1299,6 +1300,8 @@ static u64 __kvm_at_s1e01_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
1299
1300
if (!fail )
1300
1301
par = read_sysreg_par ();
1301
1302
1303
+ write_sysreg (HCR_HOST_VHE_FLAGS , hcr_el2 );
1304
+
1302
1305
if (!(vcpu_el2_e2h_is_set (vcpu ) && vcpu_el2_tge_is_set (vcpu )))
1303
1306
__mmu_config_restore (& config );
1304
1307
@@ -1313,19 +1316,29 @@ static bool par_check_s1_perm_fault(u64 par)
1313
1316
!(par & SYS_PAR_EL1_S ));
1314
1317
}
1315
1318
1319
+ static bool par_check_s1_access_fault (u64 par )
1320
+ {
1321
+ u8 fst = FIELD_GET (SYS_PAR_EL1_FST , par );
1322
+
1323
+ return ((fst & ESR_ELx_FSC_TYPE ) == ESR_ELx_FSC_ACCESS &&
1324
+ !(par & SYS_PAR_EL1_S ));
1325
+ }
1326
+
1316
1327
void __kvm_at_s1e01 (struct kvm_vcpu * vcpu , u32 op , u64 vaddr )
1317
1328
{
1318
1329
u64 par = __kvm_at_s1e01_fast (vcpu , op , vaddr );
1319
1330
1320
1331
/*
1321
- * If PAR_EL1 reports that AT failed on a S1 permission fault, we
1322
- * know for sure that the PTW was able to walk the S1 tables and
1323
- * there's nothing else to do.
1332
+ * If PAR_EL1 reports that AT failed on a S1 permission or access
1333
+ * fault, we know for sure that the PTW was able to walk the S1
1334
+ * tables and there's nothing else to do.
1324
1335
*
1325
1336
* If AT failed for any other reason, then we must walk the guest S1
1326
1337
* to emulate the instruction.
1327
1338
*/
1328
- if ((par & SYS_PAR_EL1_F ) && !par_check_s1_perm_fault (par ))
1339
+ if ((par & SYS_PAR_EL1_F ) &&
1340
+ !par_check_s1_perm_fault (par ) &&
1341
+ !par_check_s1_access_fault (par ))
1329
1342
par = handle_at_slow (vcpu , op , vaddr );
1330
1343
1331
1344
vcpu_write_sys_reg (vcpu , par , PAR_EL1 );
0 commit comments