Skip to content

Commit ca19dd4

Browse files
committed
Merge branch 'kvm-arm64/pkvm-6.15' into kvmarm/next
* kvm-arm64/pkvm-6.15: : pKVM updates for 6.15 : : - SecPageTable stats for stage-2 table pages allocated by the protected : hypervisor (Vincent Donnefort) : : - HCRX_EL2 trap + vCPU initialization fixes for pKVM (Fuad Tabba) KVM: arm64: Create each pKVM hyp vcpu after its corresponding host vcpu KVM: arm64: Factor out pKVM hyp vcpu creation to separate function KVM: arm64: Initialize HCRX_EL2 traps in pKVM KVM: arm64: Factor out setting HCRX_EL2 traps into separate function KVM: arm64: Count pKVM stage-2 usage in secondary pagetable stats KVM: arm64: Distinct pKVM teardown memcache for stage-2 KVM: arm64: Add flags to kvm_hyp_memcache Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2 parents 4f2774c + 1eab115 commit ca19dd4

File tree

11 files changed

+136
-95
lines changed

11 files changed

+136
-95
lines changed

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -662,4 +662,28 @@ static inline bool guest_hyp_sve_traps_enabled(const struct kvm_vcpu *vcpu)
662662
{
663663
return __guest_hyp_cptr_xen_trap_enabled(vcpu, ZEN);
664664
}
665+
666+
static inline void vcpu_set_hcrx(struct kvm_vcpu *vcpu)
667+
{
668+
struct kvm *kvm = vcpu->kvm;
669+
670+
if (cpus_have_final_cap(ARM64_HAS_HCX)) {
671+
/*
672+
* In general, all HCRX_EL2 bits are gated by a feature.
673+
* The only reason we can set SMPME without checking any
674+
* feature is that its effects are not directly observable
675+
* from the guest.
676+
*/
677+
vcpu->arch.hcrx_el2 = HCRX_EL2_SMPME;
678+
679+
if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
680+
vcpu->arch.hcrx_el2 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
681+
682+
if (kvm_has_tcr2(kvm))
683+
vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En;
684+
685+
if (kvm_has_fpmr(kvm))
686+
vcpu->arch.hcrx_el2 |= HCRX_EL2_EnFPM;
687+
}
688+
}
665689
#endif /* __ARM64_KVM_EMULATE_H__ */

arch/arm64/include/asm/kvm_host.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,9 @@ struct kvm_hyp_memcache {
8787
phys_addr_t head;
8888
unsigned long nr_pages;
8989
struct pkvm_mapping *mapping; /* only used from EL1 */
90+
91+
#define HYP_MEMCACHE_ACCOUNT_STAGE2 BIT(1)
92+
unsigned long flags;
9093
};
9194

9295
static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
@@ -247,6 +250,7 @@ typedef unsigned int pkvm_handle_t;
247250
struct kvm_protected_vm {
248251
pkvm_handle_t handle;
249252
struct kvm_hyp_memcache teardown_mc;
253+
struct kvm_hyp_memcache stage2_teardown_mc;
250254
bool enabled;
251255
};
252256

@@ -902,6 +906,8 @@ struct kvm_vcpu_arch {
902906
#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(0))
903907
/* SVE config completed */
904908
#define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
909+
/* pKVM VCPU setup completed */
910+
#define VCPU_PKVM_FINALIZED __vcpu_single_flag(cflags, BIT(2))
905911

906912
/* Exception pending */
907913
#define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))

arch/arm64/include/asm/kvm_pkvm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
int pkvm_init_host_vm(struct kvm *kvm);
2020
int pkvm_create_hyp_vm(struct kvm *kvm);
2121
void pkvm_destroy_hyp_vm(struct kvm *kvm);
22+
int pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu);
2223

2324
/*
2425
* This functions as an allow-list of protected VM capabilities.

arch/arm64/kvm/arm.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -866,6 +866,10 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
866866
ret = pkvm_create_hyp_vm(kvm);
867867
if (ret)
868868
return ret;
869+
870+
ret = pkvm_create_hyp_vcpu(vcpu);
871+
if (ret)
872+
return ret;
869873
}
870874

871875
mutex_lock(&kvm->arch.config_lock);

arch/arm64/kvm/hyp/include/nvhe/mem_protect.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
5656

5757
int hyp_pin_shared_mem(void *from, void *to);
5858
void hyp_unpin_shared_mem(void *from, void *to);
59-
void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
59+
void reclaim_pgtable_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
6060
int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
6161
struct kvm_hyp_memcache *host_mc);
6262

arch/arm64/kvm/hyp/include/nvhe/pkvm.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -43,12 +43,6 @@ struct pkvm_hyp_vm {
4343
struct hyp_pool pool;
4444
hyp_spinlock_t lock;
4545

46-
/*
47-
* The number of vcpus initialized and ready to run.
48-
* Modifying this is protected by 'vm_table_lock'.
49-
*/
50-
unsigned int nr_vcpus;
51-
5246
/* Array of the hyp vCPU structures for this VM. */
5347
struct pkvm_hyp_vcpu *vcpus[];
5448
};

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,7 @@ int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
266266
return 0;
267267
}
268268

269-
void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
269+
void reclaim_pgtable_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
270270
{
271271
struct hyp_page *page;
272272
void *addr;

arch/arm64/kvm/hyp/nvhe/pkvm.c

Lines changed: 42 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -167,15 +167,21 @@ static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
167167

168168
pkvm_vcpu_reset_hcr(vcpu);
169169

170-
if ((!pkvm_hyp_vcpu_is_protected(hyp_vcpu)))
170+
if ((!pkvm_hyp_vcpu_is_protected(hyp_vcpu))) {
171+
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
172+
173+
/* Trust the host for non-protected vcpu features. */
174+
vcpu->arch.hcrx_el2 = host_vcpu->arch.hcrx_el2;
171175
return 0;
176+
}
172177

173178
ret = pkvm_check_pvm_cpu_features(vcpu);
174179
if (ret)
175180
return ret;
176181

177182
pvm_init_traps_hcr(vcpu);
178183
pvm_init_traps_mdcr(vcpu);
184+
vcpu_set_hcrx(vcpu);
179185

180186
return 0;
181187
}
@@ -240,10 +246,12 @@ struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
240246

241247
hyp_spin_lock(&vm_table_lock);
242248
hyp_vm = get_vm_by_handle(handle);
243-
if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
249+
if (!hyp_vm || hyp_vm->kvm.created_vcpus <= vcpu_idx)
244250
goto unlock;
245251

246252
hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
253+
if (!hyp_vcpu)
254+
goto unlock;
247255

248256
/* Ensure vcpu isn't loaded on more than one cpu simultaneously. */
249257
if (unlikely(hyp_vcpu->loaded_hyp_vcpu)) {
@@ -369,8 +377,14 @@ static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
369377
{
370378
int i;
371379

372-
for (i = 0; i < nr_vcpus; i++)
373-
unpin_host_vcpu(hyp_vcpus[i]->host_vcpu);
380+
for (i = 0; i < nr_vcpus; i++) {
381+
struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vcpus[i];
382+
383+
if (!hyp_vcpu)
384+
continue;
385+
386+
unpin_host_vcpu(hyp_vcpu->host_vcpu);
387+
}
374388
}
375389

376390
static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
@@ -394,24 +408,18 @@ static void pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *
394408

395409
static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
396410
struct pkvm_hyp_vm *hyp_vm,
397-
struct kvm_vcpu *host_vcpu,
398-
unsigned int vcpu_idx)
411+
struct kvm_vcpu *host_vcpu)
399412
{
400413
int ret = 0;
401414

402415
if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1))
403416
return -EBUSY;
404417

405-
if (host_vcpu->vcpu_idx != vcpu_idx) {
406-
ret = -EINVAL;
407-
goto done;
408-
}
409-
410418
hyp_vcpu->host_vcpu = host_vcpu;
411419

412420
hyp_vcpu->vcpu.kvm = &hyp_vm->kvm;
413421
hyp_vcpu->vcpu.vcpu_id = READ_ONCE(host_vcpu->vcpu_id);
414-
hyp_vcpu->vcpu.vcpu_idx = vcpu_idx;
422+
hyp_vcpu->vcpu.vcpu_idx = READ_ONCE(host_vcpu->vcpu_idx);
415423

416424
hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
417425
hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
@@ -649,27 +657,28 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
649657
goto unlock;
650658
}
651659

652-
idx = hyp_vm->nr_vcpus;
660+
ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu);
661+
if (ret)
662+
goto unlock;
663+
664+
idx = hyp_vcpu->vcpu.vcpu_idx;
653665
if (idx >= hyp_vm->kvm.created_vcpus) {
654666
ret = -EINVAL;
655667
goto unlock;
656668
}
657669

658-
ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu, idx);
659-
if (ret)
670+
if (hyp_vm->vcpus[idx]) {
671+
ret = -EINVAL;
660672
goto unlock;
673+
}
661674

662675
hyp_vm->vcpus[idx] = hyp_vcpu;
663-
hyp_vm->nr_vcpus++;
664676
unlock:
665677
hyp_spin_unlock(&vm_table_lock);
666678

667-
if (ret) {
679+
if (ret)
668680
unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
669-
return ret;
670-
}
671-
672-
return 0;
681+
return ret;
673682
}
674683

675684
static void
@@ -686,7 +695,7 @@ teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
686695

687696
int __pkvm_teardown_vm(pkvm_handle_t handle)
688697
{
689-
struct kvm_hyp_memcache *mc;
698+
struct kvm_hyp_memcache *mc, *stage2_mc;
690699
struct pkvm_hyp_vm *hyp_vm;
691700
struct kvm *host_kvm;
692701
unsigned int idx;
@@ -714,18 +723,24 @@ int __pkvm_teardown_vm(pkvm_handle_t handle)
714723

715724
/* Reclaim guest pages (including page-table pages) */
716725
mc = &host_kvm->arch.pkvm.teardown_mc;
717-
reclaim_guest_pages(hyp_vm, mc);
718-
unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
726+
stage2_mc = &host_kvm->arch.pkvm.stage2_teardown_mc;
727+
reclaim_pgtable_pages(hyp_vm, stage2_mc);
728+
unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->kvm.created_vcpus);
719729

720730
/* Push the metadata pages to the teardown memcache */
721-
for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
731+
for (idx = 0; idx < hyp_vm->kvm.created_vcpus; ++idx) {
722732
struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
723-
struct kvm_hyp_memcache *vcpu_mc = &hyp_vcpu->vcpu.arch.pkvm_memcache;
733+
struct kvm_hyp_memcache *vcpu_mc;
734+
735+
if (!hyp_vcpu)
736+
continue;
737+
738+
vcpu_mc = &hyp_vcpu->vcpu.arch.pkvm_memcache;
724739

725740
while (vcpu_mc->nr_pages) {
726741
void *addr = pop_hyp_memcache(vcpu_mc, hyp_phys_to_virt);
727742

728-
push_hyp_memcache(mc, addr, hyp_virt_to_phys);
743+
push_hyp_memcache(stage2_mc, addr, hyp_virt_to_phys);
729744
unmap_donated_memory_noclear(addr, PAGE_SIZE);
730745
}
731746

arch/arm64/kvm/mmu.c

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1086,14 +1086,26 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
10861086
}
10871087
}
10881088

1089-
static void hyp_mc_free_fn(void *addr, void *unused)
1089+
static void hyp_mc_free_fn(void *addr, void *mc)
10901090
{
1091+
struct kvm_hyp_memcache *memcache = mc;
1092+
1093+
if (memcache->flags & HYP_MEMCACHE_ACCOUNT_STAGE2)
1094+
kvm_account_pgtable_pages(addr, -1);
1095+
10911096
free_page((unsigned long)addr);
10921097
}
10931098

1094-
static void *hyp_mc_alloc_fn(void *unused)
1099+
static void *hyp_mc_alloc_fn(void *mc)
10951100
{
1096-
return (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
1101+
struct kvm_hyp_memcache *memcache = mc;
1102+
void *addr;
1103+
1104+
addr = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
1105+
if (addr && memcache->flags & HYP_MEMCACHE_ACCOUNT_STAGE2)
1106+
kvm_account_pgtable_pages(addr, 1);
1107+
1108+
return addr;
10971109
}
10981110

10991111
void free_hyp_memcache(struct kvm_hyp_memcache *mc)
@@ -1102,7 +1114,7 @@ void free_hyp_memcache(struct kvm_hyp_memcache *mc)
11021114
return;
11031115

11041116
kfree(mc->mapping);
1105-
__free_hyp_memcache(mc, hyp_mc_free_fn, kvm_host_va, NULL);
1117+
__free_hyp_memcache(mc, hyp_mc_free_fn, kvm_host_va, mc);
11061118
}
11071119

11081120
int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages)
@@ -1117,7 +1129,7 @@ int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages)
11171129
}
11181130

11191131
return __topup_hyp_memcache(mc, min_pages, hyp_mc_alloc_fn,
1120-
kvm_host_pa, NULL);
1132+
kvm_host_pa, mc);
11211133
}
11221134

11231135
/**

0 commit comments

Comments
 (0)