Skip to content

Commit 0bf9601

Browse files
committed
Merge tag 'kvmarm-fixes-6.3-3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 fixes for 6.3, part #3 - Ensure the guest PMU context is restored before the first KVM_RUN, fixing an issue where EL0 event counting is broken after vCPU save/restore - Actually initialize ID_AA64PFR0_EL1.{CSV2,CSV3} based on the sanitized, system-wide values for protected VMs
2 parents fb5015b + e816252 commit 0bf9601

File tree

5 files changed

+30
-10
lines changed

5 files changed

+30
-10
lines changed

arch/arm64/kvm/arm.c

Lines changed: 25 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1890,9 +1890,33 @@ static int __init do_pkvm_init(u32 hyp_va_bits)
18901890
return ret;
18911891
}
18921892

1893+
static u64 get_hyp_id_aa64pfr0_el1(void)
1894+
{
1895+
/*
1896+
* Track whether the system isn't affected by spectre/meltdown in the
1897+
* hypervisor's view of id_aa64pfr0_el1, used for protected VMs.
1898+
* Although this is per-CPU, we make it global for simplicity, e.g., not
1899+
* to have to worry about vcpu migration.
1900+
*
1901+
* Unlike for non-protected VMs, userspace cannot override this for
1902+
* protected VMs.
1903+
*/
1904+
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1905+
1906+
val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
1907+
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
1908+
1909+
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
1910+
arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
1911+
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
1912+
arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
1913+
1914+
return val;
1915+
}
1916+
18931917
static void kvm_hyp_init_symbols(void)
18941918
{
1895-
kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1919+
kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1();
18961920
kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
18971921
kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
18981922
kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);

arch/arm64/kvm/hyp/include/nvhe/fixed_config.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,11 +33,14 @@
3333
* Allow for protected VMs:
3434
* - Floating-point and Advanced SIMD
3535
* - Data Independent Timing
36+
* - Spectre/Meltdown Mitigation
3637
*/
3738
#define PVM_ID_AA64PFR0_ALLOW (\
3839
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
3940
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \
40-
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \
41+
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) | \
42+
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | \
43+
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3) \
4144
)
4245

4346
/*

arch/arm64/kvm/hyp/nvhe/sys_regs.c

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -85,19 +85,12 @@ static u64 get_restricted_features_unsigned(u64 sys_reg_val,
8585

8686
static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
8787
{
88-
const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
8988
u64 set_mask = 0;
9089
u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
9190

9291
set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
9392
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
9493

95-
/* Spectre and Meltdown mitigation in KVM */
96-
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
97-
(u64)kvm->arch.pfr0_csv2);
98-
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
99-
(u64)kvm->arch.pfr0_csv3);
100-
10194
return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
10295
}
10396

arch/arm64/kvm/pmu-emul.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -558,6 +558,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
558558
for_each_set_bit(i, &mask, 32)
559559
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
560560
}
561+
kvm_vcpu_pmu_restore_guest(vcpu);
561562
}
562563

563564
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)

arch/arm64/kvm/sys_regs.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -794,7 +794,6 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
794794
if (!kvm_supports_32bit_el0())
795795
val |= ARMV8_PMU_PMCR_LC;
796796
kvm_pmu_handle_pmcr(vcpu, val);
797-
kvm_vcpu_pmu_restore_guest(vcpu);
798797
} else {
799798
/* PMCR.P & PMCR.C are RAZ */
800799
val = __vcpu_sys_reg(vcpu, PMCR_EL0)

0 commit comments

Comments
 (0)