@@ -2493,6 +2493,93 @@ static bool access_mdcr(struct kvm_vcpu *vcpu,
2493
2493
return true;
2494
2494
}
2495
2495
2496
+ /*
2497
+ * For historical (ahem ABI) reasons, KVM treated MIDR_EL1, REVIDR_EL1, and
2498
+ * AIDR_EL1 as "invariant" registers, meaning userspace cannot change them.
2499
+ * The values made visible to userspace were the register values of the boot
2500
+ * CPU.
2501
+ *
2502
+ * At the same time, reads from these registers at EL1 previously were not
2503
+ * trapped, allowing the guest to read the actual hardware value. On big-little
2504
+ * machines, this means the VM can see different values depending on where a
2505
+ * given vCPU got scheduled.
2506
+ *
2507
+ * These registers are now trapped as collateral damage from SME, and what
2508
+ * follows attempts to give a user / guest view consistent with the existing
2509
+ * ABI.
2510
+ */
2511
+ static bool access_imp_id_reg (struct kvm_vcpu * vcpu ,
2512
+ struct sys_reg_params * p ,
2513
+ const struct sys_reg_desc * r )
2514
+ {
2515
+ if (p -> is_write )
2516
+ return write_to_read_only (vcpu , p , r );
2517
+
2518
+ switch (reg_to_encoding (r )) {
2519
+ case SYS_REVIDR_EL1 :
2520
+ p -> regval = read_sysreg (revidr_el1 );
2521
+ break ;
2522
+ case SYS_AIDR_EL1 :
2523
+ p -> regval = read_sysreg (aidr_el1 );
2524
+ break ;
2525
+ default :
2526
+ WARN_ON_ONCE (1 );
2527
+ }
2528
+
2529
+ return true;
2530
+ }
2531
+
2532
+ static u64 __ro_after_init boot_cpu_midr_val ;
2533
+ static u64 __ro_after_init boot_cpu_revidr_val ;
2534
+ static u64 __ro_after_init boot_cpu_aidr_val ;
2535
+
2536
+ static void init_imp_id_regs (void )
2537
+ {
2538
+ boot_cpu_midr_val = read_sysreg (midr_el1 );
2539
+ boot_cpu_revidr_val = read_sysreg (revidr_el1 );
2540
+ boot_cpu_aidr_val = read_sysreg (aidr_el1 );
2541
+ }
2542
+
2543
+ static int get_imp_id_reg (struct kvm_vcpu * vcpu , const struct sys_reg_desc * r ,
2544
+ u64 * val )
2545
+ {
2546
+ switch (reg_to_encoding (r )) {
2547
+ case SYS_MIDR_EL1 :
2548
+ * val = boot_cpu_midr_val ;
2549
+ break ;
2550
+ case SYS_REVIDR_EL1 :
2551
+ * val = boot_cpu_revidr_val ;
2552
+ break ;
2553
+ case SYS_AIDR_EL1 :
2554
+ * val = boot_cpu_aidr_val ;
2555
+ break ;
2556
+ default :
2557
+ WARN_ON_ONCE (1 );
2558
+ return - EINVAL ;
2559
+ }
2560
+
2561
+ return 0 ;
2562
+ }
2563
+
2564
+ static int set_imp_id_reg (struct kvm_vcpu * vcpu , const struct sys_reg_desc * r ,
2565
+ u64 val )
2566
+ {
2567
+ u64 expected ;
2568
+ int ret ;
2569
+
2570
+ ret = get_imp_id_reg (vcpu , r , & expected );
2571
+ if (ret )
2572
+ return ret ;
2573
+
2574
+ return (expected == val ) ? 0 : - EINVAL ;
2575
+ }
2576
+
2577
+ #define IMPLEMENTATION_ID (reg ) { \
2578
+ SYS_DESC(SYS_##reg), \
2579
+ .access = access_imp_id_reg, \
2580
+ .get_user = get_imp_id_reg, \
2581
+ .set_user = set_imp_id_reg, \
2582
+ }
2496
2583
2497
2584
/*
2498
2585
* Architected system registers.
@@ -2542,7 +2629,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
2542
2629
2543
2630
{ SYS_DESC (SYS_DBGVCR32_EL2 ), undef_access , reset_val , DBGVCR32_EL2 , 0 },
2544
2631
2632
+ IMPLEMENTATION_ID (MIDR_EL1 ),
2545
2633
{ SYS_DESC (SYS_MPIDR_EL1 ), NULL , reset_mpidr , MPIDR_EL1 },
2634
+ IMPLEMENTATION_ID (REVIDR_EL1 ),
2546
2635
2547
2636
/*
2548
2637
* ID regs: all ID_SANITISED() entries here must have corresponding
@@ -2814,6 +2903,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
2814
2903
.set_user = set_clidr , .val = ~CLIDR_EL1_RES0 },
2815
2904
{ SYS_DESC (SYS_CCSIDR2_EL1 ), undef_access },
2816
2905
{ SYS_DESC (SYS_SMIDR_EL1 ), undef_access },
2906
+ IMPLEMENTATION_ID (AIDR_EL1 ),
2817
2907
{ SYS_DESC (SYS_CSSELR_EL1 ), access_csselr , reset_unknown , CSSELR_EL1 },
2818
2908
ID_FILTERED (CTR_EL0 , ctr_el0 ,
2819
2909
CTR_EL0_DIC_MASK |
@@ -4272,9 +4362,13 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
4272
4362
* Certain AArch32 ID registers are handled by rerouting to the AArch64
4273
4363
* system register table. Registers in the ID range where CRm=0 are
4274
4364
* excluded from this scheme as they do not trivially map into AArch64
4275
- * system register encodings.
4365
+ * system register encodings, except for AIDR/REVIDR .
4276
4366
*/
4277
- if (params .Op1 == 0 && params .CRn == 0 && params .CRm )
4367
+ if (params .Op1 == 0 && params .CRn == 0 &&
4368
+ (params .CRm || params .Op2 == 6 /* REVIDR */ ))
4369
+ return kvm_emulate_cp15_id_reg (vcpu , & params );
4370
+ if (params .Op1 == 1 && params .CRn == 0 &&
4371
+ params .CRm == 0 && params .Op2 == 7 /* AIDR */ )
4278
4372
return kvm_emulate_cp15_id_reg (vcpu , & params );
4279
4373
4280
4374
return kvm_handle_cp_32 (vcpu , & params , cp15_regs , ARRAY_SIZE (cp15_regs ));
@@ -4578,65 +4672,6 @@ id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
4578
4672
return r ;
4579
4673
}
4580
4674
4581
- /*
4582
- * These are the invariant sys_reg registers: we let the guest see the
4583
- * host versions of these, so they're part of the guest state.
4584
- *
4585
- * A future CPU may provide a mechanism to present different values to
4586
- * the guest, or a future kvm may trap them.
4587
- */
4588
-
4589
- #define FUNCTION_INVARIANT (reg ) \
4590
- static u64 reset_##reg(struct kvm_vcpu *v, \
4591
- const struct sys_reg_desc *r) \
4592
- { \
4593
- ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
4594
- return ((struct sys_reg_desc *)r)->val; \
4595
- }
4596
-
4597
- FUNCTION_INVARIANT (midr_el1 )
4598
- FUNCTION_INVARIANT (revidr_el1 )
4599
- FUNCTION_INVARIANT (aidr_el1 )
4600
-
4601
- /* ->val is filled in by kvm_sys_reg_table_init() */
4602
- static struct sys_reg_desc invariant_sys_regs [] __ro_after_init = {
4603
- { SYS_DESC (SYS_MIDR_EL1 ), NULL , reset_midr_el1 },
4604
- { SYS_DESC (SYS_REVIDR_EL1 ), NULL , reset_revidr_el1 },
4605
- { SYS_DESC (SYS_AIDR_EL1 ), NULL , reset_aidr_el1 },
4606
- };
4607
-
4608
- static int get_invariant_sys_reg (u64 id , u64 __user * uaddr )
4609
- {
4610
- const struct sys_reg_desc * r ;
4611
-
4612
- r = get_reg_by_id (id , invariant_sys_regs ,
4613
- ARRAY_SIZE (invariant_sys_regs ));
4614
- if (!r )
4615
- return - ENOENT ;
4616
-
4617
- return put_user (r -> val , uaddr );
4618
- }
4619
-
4620
- static int set_invariant_sys_reg (u64 id , u64 __user * uaddr )
4621
- {
4622
- const struct sys_reg_desc * r ;
4623
- u64 val ;
4624
-
4625
- r = get_reg_by_id (id , invariant_sys_regs ,
4626
- ARRAY_SIZE (invariant_sys_regs ));
4627
- if (!r )
4628
- return - ENOENT ;
4629
-
4630
- if (get_user (val , uaddr ))
4631
- return - EFAULT ;
4632
-
4633
- /* This is what we mean by invariant: you can't change it. */
4634
- if (r -> val != val )
4635
- return - EINVAL ;
4636
-
4637
- return 0 ;
4638
- }
4639
-
4640
4675
static int demux_c15_get (struct kvm_vcpu * vcpu , u64 id , void __user * uaddr )
4641
4676
{
4642
4677
u32 val ;
@@ -4718,15 +4753,10 @@ int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
4718
4753
int kvm_arm_sys_reg_get_reg (struct kvm_vcpu * vcpu , const struct kvm_one_reg * reg )
4719
4754
{
4720
4755
void __user * uaddr = (void __user * )(unsigned long )reg -> addr ;
4721
- int err ;
4722
4756
4723
4757
if ((reg -> id & KVM_REG_ARM_COPROC_MASK ) == KVM_REG_ARM_DEMUX )
4724
4758
return demux_c15_get (vcpu , reg -> id , uaddr );
4725
4759
4726
- err = get_invariant_sys_reg (reg -> id , uaddr );
4727
- if (err != - ENOENT )
4728
- return err ;
4729
-
4730
4760
return kvm_sys_reg_get_user (vcpu , reg ,
4731
4761
sys_reg_descs , ARRAY_SIZE (sys_reg_descs ));
4732
4762
}
@@ -4762,15 +4792,10 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
4762
4792
int kvm_arm_sys_reg_set_reg (struct kvm_vcpu * vcpu , const struct kvm_one_reg * reg )
4763
4793
{
4764
4794
void __user * uaddr = (void __user * )(unsigned long )reg -> addr ;
4765
- int err ;
4766
4795
4767
4796
if ((reg -> id & KVM_REG_ARM_COPROC_MASK ) == KVM_REG_ARM_DEMUX )
4768
4797
return demux_c15_set (vcpu , reg -> id , uaddr );
4769
4798
4770
- err = set_invariant_sys_reg (reg -> id , uaddr );
4771
- if (err != - ENOENT )
4772
- return err ;
4773
-
4774
4799
return kvm_sys_reg_set_user (vcpu , reg ,
4775
4800
sys_reg_descs , ARRAY_SIZE (sys_reg_descs ));
4776
4801
}
@@ -4859,23 +4884,14 @@ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
4859
4884
4860
4885
unsigned long kvm_arm_num_sys_reg_descs (struct kvm_vcpu * vcpu )
4861
4886
{
4862
- return ARRAY_SIZE (invariant_sys_regs )
4863
- + num_demux_regs ()
4887
+ return num_demux_regs ()
4864
4888
+ walk_sys_regs (vcpu , (u64 __user * )NULL );
4865
4889
}
4866
4890
4867
4891
int kvm_arm_copy_sys_reg_indices (struct kvm_vcpu * vcpu , u64 __user * uindices )
4868
4892
{
4869
- unsigned int i ;
4870
4893
int err ;
4871
4894
4872
- /* Then give them all the invariant registers' indices. */
4873
- for (i = 0 ; i < ARRAY_SIZE (invariant_sys_regs ); i ++ ) {
4874
- if (put_user (sys_reg_to_index (& invariant_sys_regs [i ]), uindices ))
4875
- return - EFAULT ;
4876
- uindices ++ ;
4877
- }
4878
-
4879
4895
err = walk_sys_regs (vcpu , uindices );
4880
4896
if (err < 0 )
4881
4897
return err ;
@@ -5101,15 +5117,12 @@ int __init kvm_sys_reg_table_init(void)
5101
5117
valid &= check_sysreg_table (cp14_64_regs , ARRAY_SIZE (cp14_64_regs ), true);
5102
5118
valid &= check_sysreg_table (cp15_regs , ARRAY_SIZE (cp15_regs ), true);
5103
5119
valid &= check_sysreg_table (cp15_64_regs , ARRAY_SIZE (cp15_64_regs ), true);
5104
- valid &= check_sysreg_table (invariant_sys_regs , ARRAY_SIZE (invariant_sys_regs ), false);
5105
5120
valid &= check_sysreg_table (sys_insn_descs , ARRAY_SIZE (sys_insn_descs ), false);
5106
5121
5107
5122
if (!valid )
5108
5123
return - EINVAL ;
5109
5124
5110
- /* We abuse the reset function to overwrite the table itself. */
5111
- for (i = 0 ; i < ARRAY_SIZE (invariant_sys_regs ); i ++ )
5112
- invariant_sys_regs [i ].reset (NULL , & invariant_sys_regs [i ]);
5125
+ init_imp_id_regs ();
5113
5126
5114
5127
ret = populate_nv_trap_config ();
5115
5128
0 commit comments