Skip to content

Commit 089d1c3

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini: "ARM: - Fix the pKVM stage-1 walker erronously using the stage-2 accessor - Correctly convert vcpu->kvm to a hyp pointer when generating an exception in a nVHE+MTE configuration - Check that KVM_CAP_DIRTY_LOG_* are valid before enabling them - Fix SMPRI_EL1/TPIDR2_EL0 trapping on VHE - Document the boot requirements for FGT when entering the kernel at EL1 x86: - Use SRCU to protect zap in __kvm_set_or_clear_apicv_inhibit() - Make argument order consistent for kvcalloc() - Userspace API fixes for DEBUGCTL and LBRs" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: x86: Fix a typo about the usage of kvcalloc() KVM: x86: Use SRCU to protect zap in __kvm_set_or_clear_apicv_inhibit() KVM: VMX: Ignore guest CPUID for host userspace writes to DEBUGCTL KVM: VMX: Fold vmx_supported_debugctl() into vcpu_supported_debugctl() KVM: VMX: Advertise PMU LBRs if and only if perf supports LBRs arm64: booting: Document our requirements for fine grained traps with SME KVM: arm64: Fix SMPRI_EL1/TPIDR2_EL0 trapping on VHE KVM: Check KVM_CAP_DIRTY_LOG_{RING, RING_ACQ_REL} prior to enabling them KVM: arm64: Fix bad dereference on MTE-enabled systems KVM: arm64: Use correct accessor to parse stage-1 PTEs
2 parents 6e8c78d + f4298ca commit 089d1c3

File tree

11 files changed

+52
-60
lines changed

11 files changed

+52
-60
lines changed

Documentation/arm64/booting.rst

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -340,6 +340,14 @@ Before jumping into the kernel, the following conditions must be met:
340340
- SMCR_EL2.LEN must be initialised to the same value for all CPUs the
341341
kernel will execute on.
342342

343+
- HWFGRTR_EL2.nTPIDR2_EL0 (bit 55) must be initialised to 0b01.
344+
345+
- HWFGWTR_EL2.nTPIDR2_EL0 (bit 55) must be initialised to 0b01.
346+
347+
- HWFGRTR_EL2.nSMPRI_EL1 (bit 54) must be initialised to 0b01.
348+
349+
- HWFGWTR_EL2.nSMPRI_EL1 (bit 54) must be initialised to 0b01.
350+
343351
For CPUs with the Scalable Matrix Extension FA64 feature (FEAT_SME_FA64)
344352

345353
- If EL3 is present:

arch/arm64/kvm/hyp/exception.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
#include <hyp/adjust_pc.h>
1414
#include <linux/kvm_host.h>
1515
#include <asm/kvm_emulate.h>
16+
#include <asm/kvm_mmu.h>
1617

1718
#if !defined (__KVM_NVHE_HYPERVISOR__) && !defined (__KVM_VHE_HYPERVISOR__)
1819
#error Hypervisor code only!
@@ -115,7 +116,7 @@ static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
115116
new |= (old & PSR_C_BIT);
116117
new |= (old & PSR_V_BIT);
117118

118-
if (kvm_has_mte(vcpu->kvm))
119+
if (kvm_has_mte(kern_hyp_va(vcpu->kvm)))
119120
new |= PSR_TCO_BIT;
120121

121122
new |= (old & PSR_DIT_BIT);

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,17 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
8787

8888
vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2);
8989
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
90+
91+
if (cpus_have_final_cap(ARM64_SME)) {
92+
sysreg_clear_set_s(SYS_HFGRTR_EL2,
93+
HFGxTR_EL2_nSMPRI_EL1_MASK |
94+
HFGxTR_EL2_nTPIDR2_EL0_MASK,
95+
0);
96+
sysreg_clear_set_s(SYS_HFGWTR_EL2,
97+
HFGxTR_EL2_nSMPRI_EL1_MASK |
98+
HFGxTR_EL2_nTPIDR2_EL0_MASK,
99+
0);
100+
}
90101
}
91102

92103
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
@@ -96,6 +107,15 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
96107
write_sysreg(0, hstr_el2);
97108
if (kvm_arm_support_pmu_v3())
98109
write_sysreg(0, pmuserenr_el0);
110+
111+
if (cpus_have_final_cap(ARM64_SME)) {
112+
sysreg_clear_set_s(SYS_HFGRTR_EL2, 0,
113+
HFGxTR_EL2_nSMPRI_EL1_MASK |
114+
HFGxTR_EL2_nTPIDR2_EL0_MASK);
115+
sysreg_clear_set_s(SYS_HFGWTR_EL2, 0,
116+
HFGxTR_EL2_nSMPRI_EL1_MASK |
117+
HFGxTR_EL2_nTPIDR2_EL0_MASK);
118+
}
99119
}
100120

101121
static inline void ___activate_traps(struct kvm_vcpu *vcpu)

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -516,7 +516,7 @@ static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte)
516516
if (!kvm_pte_valid(pte))
517517
return PKVM_NOPAGE;
518518

519-
return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
519+
return pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte));
520520
}
521521

522522
static int __hyp_check_page_state_range(u64 addr, u64 size,

arch/arm64/kvm/hyp/nvhe/switch.c

Lines changed: 0 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -55,18 +55,6 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
5555
write_sysreg(val, cptr_el2);
5656
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
5757

58-
if (cpus_have_final_cap(ARM64_SME)) {
59-
val = read_sysreg_s(SYS_HFGRTR_EL2);
60-
val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
61-
HFGxTR_EL2_nSMPRI_EL1_MASK);
62-
write_sysreg_s(val, SYS_HFGRTR_EL2);
63-
64-
val = read_sysreg_s(SYS_HFGWTR_EL2);
65-
val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
66-
HFGxTR_EL2_nSMPRI_EL1_MASK);
67-
write_sysreg_s(val, SYS_HFGWTR_EL2);
68-
}
69-
7058
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
7159
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
7260

@@ -110,20 +98,6 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
11098

11199
write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
112100

113-
if (cpus_have_final_cap(ARM64_SME)) {
114-
u64 val;
115-
116-
val = read_sysreg_s(SYS_HFGRTR_EL2);
117-
val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
118-
HFGxTR_EL2_nSMPRI_EL1_MASK;
119-
write_sysreg_s(val, SYS_HFGRTR_EL2);
120-
121-
val = read_sysreg_s(SYS_HFGWTR_EL2);
122-
val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
123-
HFGxTR_EL2_nSMPRI_EL1_MASK;
124-
write_sysreg_s(val, SYS_HFGWTR_EL2);
125-
}
126-
127101
cptr = CPTR_EL2_DEFAULT;
128102
if (vcpu_has_sve(vcpu) && (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
129103
cptr |= CPTR_EL2_TZ;

arch/arm64/kvm/hyp/vhe/switch.c

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -63,10 +63,6 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
6363
__activate_traps_fpsimd32(vcpu);
6464
}
6565

66-
if (cpus_have_final_cap(ARM64_SME))
67-
write_sysreg(read_sysreg(sctlr_el2) & ~SCTLR_ELx_ENTP2,
68-
sctlr_el2);
69-
7066
write_sysreg(val, cpacr_el1);
7167

7268
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el1);
@@ -88,10 +84,6 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
8884
*/
8985
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
9086

91-
if (cpus_have_final_cap(ARM64_SME))
92-
write_sysreg(read_sysreg(sctlr_el2) | SCTLR_ELx_ENTP2,
93-
sctlr_el2);
94-
9587
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
9688

9789
if (!arm64_kernel_unmapped_at_el0())

arch/x86/kvm/cpuid.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1338,7 +1338,7 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
13381338
if (sanity_check_entries(entries, cpuid->nent, type))
13391339
return -EINVAL;
13401340

1341-
array.entries = kvcalloc(sizeof(struct kvm_cpuid_entry2), cpuid->nent, GFP_KERNEL);
1341+
array.entries = kvcalloc(cpuid->nent, sizeof(struct kvm_cpuid_entry2), GFP_KERNEL);
13421342
if (!array.entries)
13431343
return -ENOMEM;
13441344

arch/x86/kvm/vmx/capabilities.h

Lines changed: 3 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,6 @@ extern int __read_mostly pt_mode;
2424
#define PMU_CAP_FW_WRITES (1ULL << 13)
2525
#define PMU_CAP_LBR_FMT 0x3f
2626

27-
#define DEBUGCTLMSR_LBR_MASK (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI)
28-
2927
struct nested_vmx_msrs {
3028
/*
3129
* We only store the "true" versions of the VMX capability MSRs. We
@@ -400,6 +398,7 @@ static inline bool vmx_pebs_supported(void)
400398
static inline u64 vmx_get_perf_capabilities(void)
401399
{
402400
u64 perf_cap = PMU_CAP_FW_WRITES;
401+
struct x86_pmu_lbr lbr;
403402
u64 host_perf_cap = 0;
404403

405404
if (!enable_pmu)
@@ -408,7 +407,8 @@ static inline u64 vmx_get_perf_capabilities(void)
408407
if (boot_cpu_has(X86_FEATURE_PDCM))
409408
rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
410409

411-
perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
410+
if (x86_perf_get_lbr(&lbr) >= 0 && lbr.nr)
411+
perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
412412

413413
if (vmx_pebs_supported()) {
414414
perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
@@ -419,19 +419,6 @@ static inline u64 vmx_get_perf_capabilities(void)
419419
return perf_cap;
420420
}
421421

422-
static inline u64 vmx_supported_debugctl(void)
423-
{
424-
u64 debugctl = 0;
425-
426-
if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
427-
debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
428-
429-
if (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT)
430-
debugctl |= DEBUGCTLMSR_LBR_MASK;
431-
432-
return debugctl;
433-
}
434-
435422
static inline bool cpu_has_notify_vmexit(void)
436423
{
437424
return vmcs_config.cpu_based_2nd_exec_ctrl &

arch/x86/kvm/vmx/vmx.c

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2021,15 +2021,17 @@ static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu,
20212021
return (unsigned long)data;
20222022
}
20232023

2024-
static u64 vcpu_supported_debugctl(struct kvm_vcpu *vcpu)
2024+
static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated)
20252025
{
2026-
u64 debugctl = vmx_supported_debugctl();
2026+
u64 debugctl = 0;
20272027

2028-
if (!intel_pmu_lbr_is_enabled(vcpu))
2029-
debugctl &= ~DEBUGCTLMSR_LBR_MASK;
2028+
if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) &&
2029+
(host_initiated || guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)))
2030+
debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
20302031

2031-
if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT))
2032-
debugctl &= ~DEBUGCTLMSR_BUS_LOCK_DETECT;
2032+
if ((vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT) &&
2033+
(host_initiated || intel_pmu_lbr_is_enabled(vcpu)))
2034+
debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
20332035

20342036
return debugctl;
20352037
}
@@ -2103,7 +2105,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
21032105
vmcs_writel(GUEST_SYSENTER_ESP, data);
21042106
break;
21052107
case MSR_IA32_DEBUGCTLMSR: {
2106-
u64 invalid = data & ~vcpu_supported_debugctl(vcpu);
2108+
u64 invalid;
2109+
2110+
invalid = data & ~vmx_get_supported_debugctl(vcpu, msr_info->host_initiated);
21072111
if (invalid & (DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR)) {
21082112
if (report_ignored_msrs)
21092113
vcpu_unimpl(vcpu, "%s: BTF|LBR in IA32_DEBUGCTLMSR 0x%llx, nop\n",

arch/x86/kvm/x86.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10404,7 +10404,10 @@ void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
1040410404
kvm->arch.apicv_inhibit_reasons = new;
1040510405
if (new) {
1040610406
unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE);
10407+
int idx = srcu_read_lock(&kvm->srcu);
10408+
1040710409
kvm_zap_gfn_range(kvm, gfn, gfn+1);
10410+
srcu_read_unlock(&kvm->srcu, idx);
1040810411
}
1040910412
} else {
1041010413
kvm->arch.apicv_inhibit_reasons = new;

0 commit comments

Comments
 (0)