Skip to content

Commit fbf3372

Browse files
committed
Merge branch kvm-arm64/misc into kvmarm/next
* kvm-arm64/misc: : Miscellaneous updates : : - Drop useless check against vgic state in ICC_CLTR_EL1.SEIS read : emulation : : - Fix trap configuration for pKVM : : - Close the door on initialization bugs surrounding userspace irqchip : static key by removing it. KVM: selftests: Don't bother deleting memslots in KVM when freeing VMs KVM: arm64: Get rid of userspace_irqchip_in_use KVM: arm64: Initialize trap register values in hyp in pKVM KVM: arm64: Initialize the hypervisor's VM state at EL2 KVM: arm64: Refactor kvm_vcpu_enable_ptrauth() for hyp use KVM: arm64: Move pkvm_vcpu_init_traps() to init_pkvm_hyp_vcpu() KVM: arm64: Don't map 'kvm_vgic_global_state' at EL2 with pKVM KVM: arm64: Just advertise SEIS as 0 when emulating ICC_CTLR_EL1 Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2 parents 24bb181 + 5afe18d commit fbf3372

File tree

12 files changed

+132
-69
lines changed

12 files changed

+132
-69
lines changed

arch/arm64/include/asm/kvm_asm.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,6 @@ enum __kvm_host_smccc_func {
7676
__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
7777
__KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs,
7878
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
79-
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_init_traps,
8079
__KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
8180
__KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
8281
__KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -693,4 +693,8 @@ static inline bool guest_hyp_sve_traps_enabled(const struct kvm_vcpu *vcpu)
693693
return __guest_hyp_cptr_xen_trap_enabled(vcpu, ZEN);
694694
}
695695

696+
static inline void kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
697+
{
698+
vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
699+
}
696700
#endif /* __ARM64_KVM_EMULATE_H__ */

arch/arm64/include/asm/kvm_host.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,6 @@ enum kvm_mode kvm_get_mode(void);
7474
static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
7575
#endif
7676

77-
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
78-
7977
extern unsigned int __ro_after_init kvm_sve_max_vl;
8078
extern unsigned int __ro_after_init kvm_host_sve_max_vl;
8179
int __init kvm_arm_init_sve(void);

arch/arm64/kvm/arch_timer.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -206,8 +206,7 @@ void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
206206

207207
static inline bool userspace_irqchip(struct kvm *kvm)
208208
{
209-
return static_branch_unlikely(&userspace_irqchip_in_use) &&
210-
unlikely(!irqchip_in_kernel(kvm));
209+
return unlikely(!irqchip_in_kernel(kvm));
211210
}
212211

213212
static void soft_timer_start(struct hrtimer *hrt, u64 ns)

arch/arm64/kvm/arm.c

Lines changed: 3 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,6 @@ DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
6969
static bool vgic_present, kvm_arm_initialised;
7070

7171
static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized);
72-
DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
7372

7473
bool is_kvm_arm_initialised(void)
7574
{
@@ -503,9 +502,6 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
503502

504503
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
505504
{
506-
if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm)))
507-
static_branch_dec(&userspace_irqchip_in_use);
508-
509505
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
510506
kvm_timer_vcpu_terminate(vcpu);
511507
kvm_pmu_vcpu_destroy(vcpu);
@@ -848,22 +844,6 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
848844
return ret;
849845
}
850846

851-
if (!irqchip_in_kernel(kvm)) {
852-
/*
853-
* Tell the rest of the code that there are userspace irqchip
854-
* VMs in the wild.
855-
*/
856-
static_branch_inc(&userspace_irqchip_in_use);
857-
}
858-
859-
/*
860-
* Initialize traps for protected VMs.
861-
* NOTE: Move to run in EL2 directly, rather than via a hypercall, once
862-
* the code is in place for first run initialization at EL2.
863-
*/
864-
if (kvm_vm_is_protected(kvm))
865-
kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
866-
867847
mutex_lock(&kvm->arch.config_lock);
868848
set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
869849
mutex_unlock(&kvm->arch.config_lock);
@@ -1077,7 +1057,7 @@ static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
10771057
* state gets updated in kvm_timer_update_run and
10781058
* kvm_pmu_update_run below).
10791059
*/
1080-
if (static_branch_unlikely(&userspace_irqchip_in_use)) {
1060+
if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
10811061
if (kvm_timer_should_notify_user(vcpu) ||
10821062
kvm_pmu_should_notify_user(vcpu)) {
10831063
*ret = -EINTR;
@@ -1199,7 +1179,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
11991179
vcpu->mode = OUTSIDE_GUEST_MODE;
12001180
isb(); /* Ensure work in x_flush_hwstate is committed */
12011181
kvm_pmu_sync_hwstate(vcpu);
1202-
if (static_branch_unlikely(&userspace_irqchip_in_use))
1182+
if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
12031183
kvm_timer_sync_user(vcpu);
12041184
kvm_vgic_sync_hwstate(vcpu);
12051185
local_irq_enable();
@@ -1245,7 +1225,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
12451225
* we don't want vtimer interrupts to race with syncing the
12461226
* timer virtual interrupt state.
12471227
*/
1248-
if (static_branch_unlikely(&userspace_irqchip_in_use))
1228+
if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
12491229
kvm_timer_sync_user(vcpu);
12501230

12511231
kvm_arch_vcpu_ctxsync_fp(vcpu);

arch/arm64/kvm/hyp/include/nvhe/trap_handler.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,4 @@
1515
#define DECLARE_REG(type, name, ctxt, reg) \
1616
type name = (type)cpu_reg(ctxt, (reg))
1717

18-
void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu);
19-
2018
#endif /* __ARM64_KVM_NVHE_TRAP_HANDLER_H__ */

arch/arm64/kvm/hyp/nvhe/hyp-main.c

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -105,8 +105,10 @@ static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
105105

106106
hyp_vcpu->vcpu.arch.hw_mmu = host_vcpu->arch.hw_mmu;
107107

108-
hyp_vcpu->vcpu.arch.hcr_el2 = host_vcpu->arch.hcr_el2;
109108
hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2;
109+
hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWI | HCR_TWE);
110+
hyp_vcpu->vcpu.arch.hcr_el2 |= READ_ONCE(host_vcpu->arch.hcr_el2) &
111+
(HCR_TWI | HCR_TWE);
110112

111113
hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags;
112114

@@ -349,13 +351,6 @@ static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
349351
cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
350352
}
351353

352-
static void handle___pkvm_vcpu_init_traps(struct kvm_cpu_context *host_ctxt)
353-
{
354-
DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
355-
356-
__pkvm_vcpu_init_traps(kern_hyp_va(vcpu));
357-
}
358-
359354
static void handle___pkvm_init_vm(struct kvm_cpu_context *host_ctxt)
360355
{
361356
DECLARE_REG(struct kvm *, host_kvm, host_ctxt, 1);
@@ -411,7 +406,6 @@ static const hcall_t host_hcall[] = {
411406
HANDLE_FUNC(__kvm_timer_set_cntvoff),
412407
HANDLE_FUNC(__vgic_v3_save_vmcr_aprs),
413408
HANDLE_FUNC(__vgic_v3_restore_vmcr_aprs),
414-
HANDLE_FUNC(__pkvm_vcpu_init_traps),
415409
HANDLE_FUNC(__pkvm_init_vm),
416410
HANDLE_FUNC(__pkvm_init_vcpu),
417411
HANDLE_FUNC(__pkvm_teardown_vm),

arch/arm64/kvm/hyp/nvhe/pkvm.c

Lines changed: 115 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@
66

77
#include <linux/kvm_host.h>
88
#include <linux/mm.h>
9+
10+
#include <asm/kvm_emulate.h>
11+
912
#include <nvhe/fixed_config.h>
1013
#include <nvhe/mem_protect.h>
1114
#include <nvhe/memory.h>
@@ -201,11 +204,46 @@ static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
201204
}
202205
}
203206

207+
static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
208+
{
209+
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
210+
211+
if (has_hvhe())
212+
vcpu->arch.hcr_el2 |= HCR_E2H;
213+
214+
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
215+
/* route synchronous external abort exceptions to EL2 */
216+
vcpu->arch.hcr_el2 |= HCR_TEA;
217+
/* trap error record accesses */
218+
vcpu->arch.hcr_el2 |= HCR_TERR;
219+
}
220+
221+
if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
222+
vcpu->arch.hcr_el2 |= HCR_FWB;
223+
224+
if (cpus_have_final_cap(ARM64_HAS_EVT) &&
225+
!cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
226+
vcpu->arch.hcr_el2 |= HCR_TID4;
227+
else
228+
vcpu->arch.hcr_el2 |= HCR_TID2;
229+
230+
if (vcpu_has_ptrauth(vcpu))
231+
vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
232+
}
233+
204234
/*
205235
* Initialize trap register values in protected mode.
206236
*/
207-
void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
237+
static void pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
208238
{
239+
vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
240+
vcpu->arch.mdcr_el2 = 0;
241+
242+
pkvm_vcpu_reset_hcr(vcpu);
243+
244+
if ((!vcpu_is_protected(vcpu)))
245+
return;
246+
209247
pvm_init_trap_regs(vcpu);
210248
pvm_init_traps_aa64pfr0(vcpu);
211249
pvm_init_traps_aa64pfr1(vcpu);
@@ -289,6 +327,65 @@ void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
289327
hyp_spin_unlock(&vm_table_lock);
290328
}
291329

330+
static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struct kvm *host_kvm)
331+
{
332+
struct kvm *kvm = &hyp_vm->kvm;
333+
DECLARE_BITMAP(allowed_features, KVM_VCPU_MAX_FEATURES);
334+
335+
/* No restrictions for non-protected VMs. */
336+
if (!kvm_vm_is_protected(kvm)) {
337+
bitmap_copy(kvm->arch.vcpu_features,
338+
host_kvm->arch.vcpu_features,
339+
KVM_VCPU_MAX_FEATURES);
340+
return;
341+
}
342+
343+
bitmap_zero(allowed_features, KVM_VCPU_MAX_FEATURES);
344+
345+
/*
346+
* For protected VMs, always allow:
347+
* - CPU starting in poweroff state
348+
* - PSCI v0.2
349+
*/
350+
set_bit(KVM_ARM_VCPU_POWER_OFF, allowed_features);
351+
set_bit(KVM_ARM_VCPU_PSCI_0_2, allowed_features);
352+
353+
/*
354+
* Check if remaining features are allowed:
355+
* - Performance Monitoring
356+
* - Scalable Vectors
357+
* - Pointer Authentication
358+
*/
359+
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), PVM_ID_AA64DFR0_ALLOW))
360+
set_bit(KVM_ARM_VCPU_PMU_V3, allowed_features);
361+
362+
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), PVM_ID_AA64PFR0_ALLOW))
363+
set_bit(KVM_ARM_VCPU_SVE, allowed_features);
364+
365+
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED) &&
366+
FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED))
367+
set_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, allowed_features);
368+
369+
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI), PVM_ID_AA64ISAR1_ALLOW) &&
370+
FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA), PVM_ID_AA64ISAR1_ALLOW))
371+
set_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, allowed_features);
372+
373+
bitmap_and(kvm->arch.vcpu_features, host_kvm->arch.vcpu_features,
374+
allowed_features, KVM_VCPU_MAX_FEATURES);
375+
}
376+
377+
static void pkvm_vcpu_init_ptrauth(struct pkvm_hyp_vcpu *hyp_vcpu)
378+
{
379+
struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
380+
381+
if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) ||
382+
vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC)) {
383+
kvm_vcpu_enable_ptrauth(vcpu);
384+
} else {
385+
vcpu_clear_flag(&hyp_vcpu->vcpu, GUEST_HAS_PTRAUTH);
386+
}
387+
}
388+
292389
static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
293390
{
294391
if (host_vcpu)
@@ -310,6 +407,18 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
310407
hyp_vm->host_kvm = host_kvm;
311408
hyp_vm->kvm.created_vcpus = nr_vcpus;
312409
hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr;
410+
hyp_vm->kvm.arch.pkvm.enabled = READ_ONCE(host_kvm->arch.pkvm.enabled);
411+
pkvm_init_features_from_host(hyp_vm, host_kvm);
412+
}
413+
414+
static void pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *host_vcpu)
415+
{
416+
struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
417+
418+
if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) {
419+
vcpu_clear_flag(vcpu, GUEST_HAS_SVE);
420+
vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED);
421+
}
313422
}
314423

315424
static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
@@ -335,6 +444,11 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
335444

336445
hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
337446
hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
447+
hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
448+
449+
pkvm_vcpu_init_sve(hyp_vcpu, host_vcpu);
450+
pkvm_vcpu_init_ptrauth(hyp_vcpu);
451+
pkvm_vcpu_init_traps(&hyp_vcpu->vcpu);
338452
done:
339453
if (ret)
340454
unpin_host_vcpu(host_vcpu);

arch/arm64/kvm/hyp/nvhe/setup.c

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,6 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
9595
{
9696
void *start, *end, *virt = hyp_phys_to_virt(phys);
9797
unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
98-
enum kvm_pgtable_prot prot;
9998
int ret, i;
10099

101100
/* Recreate the hyp page-table using the early page allocator */
@@ -148,22 +147,6 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
148147
}
149148

150149
pkvm_create_host_sve_mappings();
151-
152-
/*
153-
* Map the host sections RO in the hypervisor, but transfer the
154-
* ownership from the host to the hypervisor itself to make sure they
155-
* can't be donated or shared with another entity.
156-
*
157-
* The ownership transition requires matching changes in the host
158-
* stage-2. This will be done later (see finalize_host_mappings()) once
159-
* the hyp_vmemmap is addressable.
160-
*/
161-
prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_SHARED_OWNED);
162-
ret = pkvm_create_mappings(&kvm_vgic_global_state,
163-
&kvm_vgic_global_state + 1, prot);
164-
if (ret)
165-
return ret;
166-
167150
return 0;
168151
}
169152

arch/arm64/kvm/hyp/vgic-v3-sr.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1012,9 +1012,6 @@ static void __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
10121012
val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
10131013
/* IDbits */
10141014
val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
1015-
/* SEIS */
1016-
if (kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK)
1017-
val |= BIT(ICC_CTLR_EL1_SEIS_SHIFT);
10181015
/* A3V */
10191016
val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
10201017
/* EOImode */

0 commit comments

Comments
 (0)