Skip to content

Commit 4f2774c

Browse files
committed
Merge branch 'kvm-arm64/writable-midr' into kvmarm/next
* kvm-arm64/writable-midr: : Writable implementation ID registers, courtesy of Sebastian Ott : : Introduce a new capability that allows userspace to set the : ID registers that identify a CPU implementation: MIDR_EL1, REVIDR_EL1, : and AIDR_EL1. Also plug a hole in KVM's trap configuration where : SMIDR_EL1 was readable at EL1, despite the fact that KVM does not : support SME. KVM: arm64: Fix documentation for KVM_CAP_ARM_WRITABLE_IMP_ID_REGS KVM: arm64: Copy MIDR_EL1 into hyp VM when it is writable KVM: arm64: Copy guest CTR_EL0 into hyp VM KVM: selftests: arm64: Test writes to MIDR,REVIDR,AIDR KVM: arm64: Allow userspace to change the implementation ID registers KVM: arm64: Load VPIDR_EL2 with the VM's MIDR_EL1 value KVM: arm64: Maintain per-VM copy of implementation ID regs KVM: arm64: Set HCR_EL2.TID1 unconditionally Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2 parents 1b1d1b1 + 5980a69 commit 4f2774c

File tree

11 files changed

+244
-119
lines changed

11 files changed

+244
-119
lines changed

Documentation/virt/kvm/api.rst

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8258,6 +8258,24 @@ KVM exits with the register state of either the L1 or L2 guest
82588258
depending on which executed at the time of an exit. Userspace must
82598259
take care to differentiate between these cases.
82608260

8261+
7.37 KVM_CAP_ARM_WRITABLE_IMP_ID_REGS
8262+
-------------------------------------
8263+
8264+
:Architectures: arm64
8265+
:Target: VM
8266+
:Parameters: None
8267+
:Returns: 0 on success, -EINVAL if vCPUs have been created before enabling this
8268+
capability.
8269+
8270+
This capability changes the behavior of the registers that identify a PE
8271+
implementation of the Arm architecture: MIDR_EL1, REVIDR_EL1, and AIDR_EL1.
8272+
By default, these registers are visible to userspace but treated as invariant.
8273+
8274+
When this capability is enabled, KVM allows userspace to change the
8275+
aforementioned registers before the first KVM_RUN. These registers are VM
8276+
scoped, meaning that the same set of values are presented on all vCPUs in a
8277+
given VM.
8278+
82618279
8. Other capabilities.
82628280
======================
82638281

arch/arm64/include/asm/kvm_arm.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,12 +92,12 @@
9292
* SWIO: Turn set/way invalidates into set/way clean+invalidate
9393
* PTW: Take a stage2 fault if a stage1 walk steps in device memory
9494
* TID3: Trap EL1 reads of group 3 ID registers
95-
* TID2: Trap CTR_EL0, CCSIDR2_EL1, CLIDR_EL1, and CSSELR_EL1
95+
* TID1: Trap REVIDR_EL1, AIDR_EL1, and SMIDR_EL1
9696
*/
9797
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
9898
HCR_BSU_IS | HCR_FB | HCR_TACR | \
9999
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
100-
HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3)
100+
HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 | HCR_TID1)
101101
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
102102
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
103103
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)

arch/arm64/include/asm/kvm_host.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -336,6 +336,8 @@ struct kvm_arch {
336336
#define KVM_ARCH_FLAG_FGU_INITIALIZED 8
337337
/* SVE exposed to guest */
338338
#define KVM_ARCH_FLAG_GUEST_HAS_SVE 9
339+
/* MIDR_EL1, REVIDR_EL1, and AIDR_EL1 are writable from userspace */
340+
#define KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS 10
339341
unsigned long flags;
340342

341343
/* VM-wide vCPU feature set */
@@ -375,6 +377,9 @@ struct kvm_arch {
375377
#define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
376378
u64 id_regs[KVM_ARM_ID_REG_NUM];
377379

380+
u64 midr_el1;
381+
u64 revidr_el1;
382+
u64 aidr_el1;
378383
u64 ctr_el0;
379384

380385
/* Masks for VNCR-backed and general EL2 sysregs */
@@ -1489,6 +1494,12 @@ static inline u64 *__vm_id_reg(struct kvm_arch *ka, u32 reg)
14891494
return &ka->id_regs[IDREG_IDX(reg)];
14901495
case SYS_CTR_EL0:
14911496
return &ka->ctr_el0;
1497+
case SYS_MIDR_EL1:
1498+
return &ka->midr_el1;
1499+
case SYS_REVIDR_EL1:
1500+
return &ka->revidr_el1;
1501+
case SYS_AIDR_EL1:
1502+
return &ka->aidr_el1;
14921503
default:
14931504
WARN_ON_ONCE(1);
14941505
return NULL;

arch/arm64/kvm/arm.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
125125
}
126126
mutex_unlock(&kvm->slots_lock);
127127
break;
128+
case KVM_CAP_ARM_WRITABLE_IMP_ID_REGS:
129+
mutex_lock(&kvm->lock);
130+
if (!kvm->created_vcpus) {
131+
r = 0;
132+
set_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags);
133+
}
134+
mutex_unlock(&kvm->lock);
135+
break;
128136
default:
129137
break;
130138
}
@@ -313,6 +321,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
313321
case KVM_CAP_ARM_SYSTEM_SUSPEND:
314322
case KVM_CAP_IRQFD_RESAMPLE:
315323
case KVM_CAP_COUNTER_OFFSET:
324+
case KVM_CAP_ARM_WRITABLE_IMP_ID_REGS:
316325
r = 1;
317326
break;
318327
case KVM_CAP_SET_GUEST_DEBUG2:

arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,17 @@ static inline u64 *ctxt_mdscr_el1(struct kvm_cpu_context *ctxt)
4343
return &ctxt_sys_reg(ctxt, MDSCR_EL1);
4444
}
4545

46+
static inline u64 ctxt_midr_el1(struct kvm_cpu_context *ctxt)
47+
{
48+
struct kvm *kvm = kern_hyp_va(ctxt_to_vcpu(ctxt)->kvm);
49+
50+
if (!(ctxt_is_guest(ctxt) &&
51+
test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags)))
52+
return read_cpuid_id();
53+
54+
return kvm_read_vm_id_reg(kvm, SYS_MIDR_EL1);
55+
}
56+
4657
static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
4758
{
4859
*ctxt_mdscr_el1(ctxt) = read_sysreg(mdscr_el1);
@@ -168,8 +179,9 @@ static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
168179
}
169180

170181
static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt,
171-
u64 mpidr)
182+
u64 midr, u64 mpidr)
172183
{
184+
write_sysreg(midr, vpidr_el2);
173185
write_sysreg(mpidr, vmpidr_el2);
174186

175187
if (has_vhe() ||

arch/arm64/kvm/hyp/nvhe/pkvm.c

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,8 @@ static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
4646
vcpu->arch.hcr_el2 |= HCR_FWB;
4747

4848
if (cpus_have_final_cap(ARM64_HAS_EVT) &&
49-
!cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
49+
!cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE) &&
50+
kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0) == read_cpuid(CTR_EL0))
5051
vcpu->arch.hcr_el2 |= HCR_TID4;
5152
else
5253
vcpu->arch.hcr_el2 |= HCR_TID2;
@@ -315,6 +316,9 @@ static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struc
315316
unsigned long host_arch_flags = READ_ONCE(host_kvm->arch.flags);
316317
DECLARE_BITMAP(allowed_features, KVM_VCPU_MAX_FEATURES);
317318

319+
/* CTR_EL0 is always under host control, even for protected VMs. */
320+
hyp_vm->kvm.arch.ctr_el0 = host_kvm->arch.ctr_el0;
321+
318322
if (test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &host_kvm->arch.flags))
319323
set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags);
320324

@@ -325,6 +329,10 @@ static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struc
325329
bitmap_copy(kvm->arch.vcpu_features,
326330
host_kvm->arch.vcpu_features,
327331
KVM_VCPU_MAX_FEATURES);
332+
333+
if (test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &host_arch_flags))
334+
hyp_vm->kvm.arch.midr_el1 = host_kvm->arch.midr_el1;
335+
328336
return;
329337
}
330338

arch/arm64/kvm/hyp/nvhe/sysreg-sr.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,9 @@ void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt)
2828

2929
void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt)
3030
{
31-
__sysreg_restore_el1_state(ctxt, ctxt_sys_reg(ctxt, MPIDR_EL1));
31+
u64 midr = ctxt_midr_el1(ctxt);
32+
33+
__sysreg_restore_el1_state(ctxt, midr, ctxt_sys_reg(ctxt, MPIDR_EL1));
3234
__sysreg_restore_common_state(ctxt);
3335
__sysreg_restore_user_state(ctxt);
3436
__sysreg_restore_el2_return_state(ctxt);

arch/arm64/kvm/hyp/vhe/sysreg-sr.c

Lines changed: 10 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -87,11 +87,12 @@ static void __sysreg_restore_vel2_state(struct kvm_vcpu *vcpu)
8787
write_sysreg(__vcpu_sys_reg(vcpu, PAR_EL1), par_el1);
8888
write_sysreg(__vcpu_sys_reg(vcpu, TPIDR_EL1), tpidr_el1);
8989

90-
write_sysreg(__vcpu_sys_reg(vcpu, MPIDR_EL1), vmpidr_el2);
91-
write_sysreg_el1(__vcpu_sys_reg(vcpu, MAIR_EL2), SYS_MAIR);
92-
write_sysreg_el1(__vcpu_sys_reg(vcpu, VBAR_EL2), SYS_VBAR);
93-
write_sysreg_el1(__vcpu_sys_reg(vcpu, CONTEXTIDR_EL2), SYS_CONTEXTIDR);
94-
write_sysreg_el1(__vcpu_sys_reg(vcpu, AMAIR_EL2), SYS_AMAIR);
90+
write_sysreg(ctxt_midr_el1(&vcpu->arch.ctxt), vpidr_el2);
91+
write_sysreg(__vcpu_sys_reg(vcpu, MPIDR_EL1), vmpidr_el2);
92+
write_sysreg_el1(__vcpu_sys_reg(vcpu, MAIR_EL2), SYS_MAIR);
93+
write_sysreg_el1(__vcpu_sys_reg(vcpu, VBAR_EL2), SYS_VBAR);
94+
write_sysreg_el1(__vcpu_sys_reg(vcpu, CONTEXTIDR_EL2), SYS_CONTEXTIDR);
95+
write_sysreg_el1(__vcpu_sys_reg(vcpu, AMAIR_EL2), SYS_AMAIR);
9596

9697
if (vcpu_el2_e2h_is_set(vcpu)) {
9798
/*
@@ -191,7 +192,7 @@ void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
191192
{
192193
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
193194
struct kvm_cpu_context *host_ctxt;
194-
u64 mpidr;
195+
u64 midr, mpidr;
195196

196197
host_ctxt = host_data_ptr(host_ctxt);
197198
__sysreg_save_user_state(host_ctxt);
@@ -220,23 +221,18 @@ void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
220221
__sysreg_restore_vel2_state(vcpu);
221222
} else {
222223
if (vcpu_has_nv(vcpu)) {
223-
/*
224-
* Use the guest hypervisor's VPIDR_EL2 when in a
225-
* nested state. The hardware value of MIDR_EL1 gets
226-
* restored on put.
227-
*/
228-
write_sysreg(ctxt_sys_reg(guest_ctxt, VPIDR_EL2), vpidr_el2);
229-
230224
/*
231225
* As we're restoring a nested guest, set the value
232226
* provided by the guest hypervisor.
233227
*/
228+
midr = ctxt_sys_reg(guest_ctxt, VPIDR_EL2);
234229
mpidr = ctxt_sys_reg(guest_ctxt, VMPIDR_EL2);
235230
} else {
231+
midr = ctxt_midr_el1(guest_ctxt);
236232
mpidr = ctxt_sys_reg(guest_ctxt, MPIDR_EL1);
237233
}
238234

239-
__sysreg_restore_el1_state(guest_ctxt, mpidr);
235+
__sysreg_restore_el1_state(guest_ctxt, midr, mpidr);
240236
}
241237

242238
vcpu_set_flag(vcpu, SYSREGS_ON_CPU);
@@ -271,9 +267,5 @@ void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu)
271267
/* Restore host user state */
272268
__sysreg_restore_user_state(host_ctxt);
273269

274-
/* If leaving a nesting guest, restore MIDR_EL1 default view */
275-
if (vcpu_has_nv(vcpu))
276-
write_sysreg(read_cpuid_id(), vpidr_el2);
277-
278270
vcpu_clear_flag(vcpu, SYSREGS_ON_CPU);
279271
}

0 commit comments

Comments
 (0)