Skip to content

Commit e0fb12c

Browse files
committed
Merge tag 'kvmarm-6.6' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 updates for Linux 6.6 - Add support for TLB range invalidation of Stage-2 page tables, avoiding unnecessary invalidations. Systems that do not implement range invalidation still rely on a full invalidation when dealing with large ranges. - Add infrastructure for forwarding traps taken from a L2 guest to the L1 guest, with L0 acting as the dispatcher, another baby step towards the full nested support. - Simplify the way we deal with the (long deprecated) 'CPU target', resulting in a much needed cleanup. - Fix another set of PMU bugs, both on the guest and host sides, as we seem to never have any shortage of those... - Relax the alignment requirements of EL2 VA allocations for non-stack allocations, as we were otherwise wasting a lot of that precious VA space. - The usual set of non-functional cleanups, although I note the lack of spelling fixes...
2 parents 2dde18c + 1f66f12 commit e0fb12c

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

46 files changed

+2993
-328
lines changed

arch/arm/include/asm/arm_pmuv3.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -227,6 +227,8 @@ static inline bool kvm_set_pmuserenr(u64 val)
227227
return false;
228228
}
229229

230+
static inline void kvm_vcpu_pmu_resync_el0(void) {}
231+
230232
/* PMU Version in DFR Register */
231233
#define ARMV8_PMU_DFR_VER_NI 0
232234
#define ARMV8_PMU_DFR_VER_V3P4 0x5

arch/arm64/include/asm/kvm_arm.h

Lines changed: 50 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,19 @@
1818
#define HCR_DCT (UL(1) << 57)
1919
#define HCR_ATA_SHIFT 56
2020
#define HCR_ATA (UL(1) << HCR_ATA_SHIFT)
21+
#define HCR_TTLBOS (UL(1) << 55)
22+
#define HCR_TTLBIS (UL(1) << 54)
23+
#define HCR_ENSCXT (UL(1) << 53)
24+
#define HCR_TOCU (UL(1) << 52)
2125
#define HCR_AMVOFFEN (UL(1) << 51)
26+
#define HCR_TICAB (UL(1) << 50)
2227
#define HCR_TID4 (UL(1) << 49)
2328
#define HCR_FIEN (UL(1) << 47)
2429
#define HCR_FWB (UL(1) << 46)
30+
#define HCR_NV2 (UL(1) << 45)
31+
#define HCR_AT (UL(1) << 44)
32+
#define HCR_NV1 (UL(1) << 43)
33+
#define HCR_NV (UL(1) << 42)
2534
#define HCR_API (UL(1) << 41)
2635
#define HCR_APK (UL(1) << 40)
2736
#define HCR_TEA (UL(1) << 37)
@@ -89,7 +98,6 @@
8998
HCR_BSU_IS | HCR_FB | HCR_TACR | \
9099
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
91100
HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3)
92-
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
93101
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
94102
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
95103
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
@@ -324,6 +332,47 @@
324332
BIT(18) | \
325333
GENMASK(16, 15))
326334

335+
/*
336+
* FGT register definitions
337+
*
338+
* RES0 and polarity masks as of DDI0487J.a, to be updated as needed.
339+
* We're not using the generated masks as they are usually ahead of
340+
* the published ARM ARM, which we use as a reference.
341+
*
342+
* Once we get to a point where the two describe the same thing, we'll
343+
* merge the definitions. One day.
344+
*/
345+
#define __HFGRTR_EL2_RES0 (GENMASK(63, 56) | GENMASK(53, 51))
346+
#define __HFGRTR_EL2_MASK GENMASK(49, 0)
347+
#define __HFGRTR_EL2_nMASK (GENMASK(55, 54) | BIT(50))
348+
349+
#define __HFGWTR_EL2_RES0 (GENMASK(63, 56) | GENMASK(53, 51) | \
350+
BIT(46) | BIT(42) | BIT(40) | BIT(28) | \
351+
GENMASK(26, 25) | BIT(21) | BIT(18) | \
352+
GENMASK(15, 14) | GENMASK(10, 9) | BIT(2))
353+
#define __HFGWTR_EL2_MASK GENMASK(49, 0)
354+
#define __HFGWTR_EL2_nMASK (GENMASK(55, 54) | BIT(50))
355+
356+
#define __HFGITR_EL2_RES0 GENMASK(63, 57)
357+
#define __HFGITR_EL2_MASK GENMASK(54, 0)
358+
#define __HFGITR_EL2_nMASK GENMASK(56, 55)
359+
360+
#define __HDFGRTR_EL2_RES0 (BIT(49) | BIT(42) | GENMASK(39, 38) | \
361+
GENMASK(21, 20) | BIT(8))
362+
#define __HDFGRTR_EL2_MASK ~__HDFGRTR_EL2_nMASK
363+
#define __HDFGRTR_EL2_nMASK GENMASK(62, 59)
364+
365+
#define __HDFGWTR_EL2_RES0 (BIT(63) | GENMASK(59, 58) | BIT(51) | BIT(47) | \
366+
BIT(43) | GENMASK(40, 38) | BIT(34) | BIT(30) | \
367+
BIT(22) | BIT(9) | BIT(6))
368+
#define __HDFGWTR_EL2_MASK ~__HDFGWTR_EL2_nMASK
369+
#define __HDFGWTR_EL2_nMASK GENMASK(62, 60)
370+
371+
/* Similar definitions for HCRX_EL2 */
372+
#define __HCRX_EL2_RES0 (GENMASK(63, 16) | GENMASK(13, 12))
373+
#define __HCRX_EL2_MASK (0)
374+
#define __HCRX_EL2_nMASK (GENMASK(15, 14) | GENMASK(4, 0))
375+
327376
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
328377
#define HPFAR_MASK (~UL(0xf))
329378
/*

arch/arm64/include/asm/kvm_asm.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,7 @@ enum __kvm_host_smccc_func {
7070
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
7171
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa_nsh,
7272
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
73+
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range,
7374
__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
7475
__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
7576
__KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr,
@@ -229,6 +230,8 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
229230
extern void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
230231
phys_addr_t ipa,
231232
int level);
233+
extern void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
234+
phys_addr_t start, unsigned long pages);
232235
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
233236

234237
extern void __kvm_timer_set_cntvoff(u64 cntvoff);

arch/arm64/include/asm/kvm_host.h

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@
4949
#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
5050
#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
5151
#define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
52+
#define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7)
5253

5354
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
5455
KVM_DIRTY_LOG_INITIALLY_SET)
@@ -380,6 +381,7 @@ enum vcpu_sysreg {
380381
CPTR_EL2, /* Architectural Feature Trap Register (EL2) */
381382
HSTR_EL2, /* Hypervisor System Trap Register */
382383
HACR_EL2, /* Hypervisor Auxiliary Control Register */
384+
HCRX_EL2, /* Extended Hypervisor Configuration Register */
383385
TTBR0_EL2, /* Translation Table Base Register 0 (EL2) */
384386
TTBR1_EL2, /* Translation Table Base Register 1 (EL2) */
385387
TCR_EL2, /* Translation Control Register (EL2) */
@@ -400,6 +402,11 @@ enum vcpu_sysreg {
400402
TPIDR_EL2, /* EL2 Software Thread ID Register */
401403
CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */
402404
SP_EL2, /* EL2 Stack Pointer */
405+
HFGRTR_EL2,
406+
HFGWTR_EL2,
407+
HFGITR_EL2,
408+
HDFGRTR_EL2,
409+
HDFGWTR_EL2,
403410
CNTHP_CTL_EL2,
404411
CNTHP_CVAL_EL2,
405412
CNTHV_CTL_EL2,
@@ -567,8 +574,7 @@ struct kvm_vcpu_arch {
567574
/* Cache some mmu pages needed inside spinlock regions */
568575
struct kvm_mmu_memory_cache mmu_page_cache;
569576

570-
/* Target CPU and feature flags */
571-
int target;
577+
/* feature flags */
572578
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
573579

574580
/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
@@ -669,6 +675,8 @@ struct kvm_vcpu_arch {
669675
#define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
670676
/* PTRAUTH exposed to guest */
671677
#define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2))
678+
/* KVM_ARM_VCPU_INIT completed */
679+
#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(3))
672680

673681
/* Exception pending */
674682
#define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
@@ -899,7 +907,6 @@ struct kvm_vcpu_stat {
899907
u64 exits;
900908
};
901909

902-
void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
903910
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
904911
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
905912
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
@@ -967,8 +974,6 @@ void kvm_arm_resume_guest(struct kvm *kvm);
967974
#define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
968975
#endif /* __KVM_NVHE_HYPERVISOR__ */
969976

970-
void force_vm_exit(const cpumask_t *mask);
971-
972977
int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
973978
void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
974979

@@ -983,6 +988,7 @@ int kvm_handle_cp10_id(struct kvm_vcpu *vcpu);
983988
void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
984989

985990
int __init kvm_sys_reg_table_init(void);
991+
int __init populate_nv_trap_config(void);
986992

987993
bool lock_all_vcpus(struct kvm *kvm);
988994
void unlock_all_vcpus(struct kvm *kvm);
@@ -1049,8 +1055,6 @@ static inline bool kvm_system_needs_idmapped_vectors(void)
10491055
return cpus_have_const_cap(ARM64_SPECTRE_V3A);
10501056
}
10511057

1052-
void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
1053-
10541058
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
10551059
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
10561060

@@ -1113,13 +1117,15 @@ int __init kvm_set_ipa_limit(void);
11131117
#define __KVM_HAVE_ARCH_VM_ALLOC
11141118
struct kvm *kvm_arch_alloc_vm(void);
11151119

1120+
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
1121+
1122+
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
1123+
11161124
static inline bool kvm_vm_is_protected(struct kvm *kvm)
11171125
{
11181126
return false;
11191127
}
11201128

1121-
void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
1122-
11231129
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
11241130
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
11251131

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -168,6 +168,7 @@ int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
168168
void __iomem **haddr);
169169
int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
170170
void **haddr);
171+
int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr);
171172
void __init free_hyp_pgds(void);
172173

173174
void stage2_unmap_vm(struct kvm *kvm);

arch/arm64/include/asm/kvm_nested.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,8 @@ static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
1111
test_bit(KVM_ARM_VCPU_HAS_EL2, vcpu->arch.features));
1212
}
1313

14+
extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu);
15+
1416
struct sys_reg_params;
1517
struct sys_reg_desc;
1618

arch/arm64/include/asm/kvm_pgtable.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -746,4 +746,14 @@ enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte);
746746
* kvm_pgtable_prot format.
747747
*/
748748
enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte);
749+
750+
/**
751+
* kvm_tlb_flush_vmid_range() - Invalidate/flush a range of TLB entries
752+
*
753+
* @mmu: Stage-2 KVM MMU struct
754+
* @addr: The base Intermediate physical address from which to invalidate
755+
* @size: Size of the range from the base to invalidate
756+
*/
757+
void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
758+
phys_addr_t addr, size_t size);
749759
#endif /* __ARM64_KVM_PGTABLE_H__ */

0 commit comments

Comments
 (0)