Skip to content

Commit 50a40ff

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/6.6/pmu-fixes into kvmarm-master/next
* kvm-arm64/6.6/pmu-fixes: : . : Another set of PMU fixes, coutrtesy of Reiji Watanabe. : From the cover letter: : : "This series fixes a couple of PMUver related handling of : vPMU support. : : On systems where the PMUVer is not uniform across all PEs, : KVM currently does not advertise PMUv3 to the guest, : even if userspace successfully runs KVM_ARM_VCPU_INIT with : KVM_ARM_VCPU_PMU_V3." : : Additionally, a fix for an obscure counter oversubscription : issue happening when the hsot profines the guest's EL0. : . KVM: arm64: pmu: Guard PMU emulation definitions with CONFIG_KVM KVM: arm64: pmu: Resync EL0 state on counter rotation KVM: arm64: PMU: Don't advertise STALL_SLOT_{FRONTEND,BACKEND} KVM: arm64: PMU: Don't advertise the STALL_SLOT event KVM: arm64: PMU: Avoid inappropriate use of host's PMUVer KVM: arm64: PMU: Disallow vPMU on non-uniform PMUVer Signed-off-by: Marc Zyngier <maz@kernel.org>
2 parents d58335d + 9b80b96 commit 50a40ff

File tree

7 files changed

+55
-12
lines changed

7 files changed

+55
-12
lines changed

arch/arm/include/asm/arm_pmuv3.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -227,6 +227,8 @@ static inline bool kvm_set_pmuserenr(u64 val)
227227
return false;
228228
}
229229

230+
static inline void kvm_vcpu_pmu_resync_el0(void) {}
231+
230232
/* PMU Version in DFR Register */
231233
#define ARMV8_PMU_DFR_VER_NI 0
232234
#define ARMV8_PMU_DFR_VER_V3P4 0x5

arch/arm64/include/asm/kvm_host.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@
4949
#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
5050
#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
5151
#define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
52+
#define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7)
5253

5354
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
5455
KVM_DIRTY_LOG_INITIALLY_SET)

arch/arm64/kvm/arm.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -804,6 +804,9 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
804804
kvm_pmu_handle_pmcr(vcpu,
805805
__vcpu_sys_reg(vcpu, PMCR_EL0));
806806

807+
if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu))
808+
kvm_vcpu_pmu_restore_guest(vcpu);
809+
807810
if (kvm_check_request(KVM_REQ_SUSPEND, vcpu))
808811
return kvm_vcpu_suspend(vcpu);
809812

arch/arm64/kvm/pmu-emul.c

Lines changed: 26 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#include <asm/kvm_emulate.h>
1515
#include <kvm/arm_pmu.h>
1616
#include <kvm/arm_vgic.h>
17+
#include <asm/arm_pmuv3.h>
1718

1819
#define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0)
1920

@@ -35,12 +36,8 @@ static struct kvm_pmc *kvm_vcpu_idx_to_pmc(struct kvm_vcpu *vcpu, int cnt_idx)
3536
return &vcpu->arch.pmu.pmc[cnt_idx];
3637
}
3738

38-
static u32 kvm_pmu_event_mask(struct kvm *kvm)
39+
static u32 __kvm_pmu_event_mask(unsigned int pmuver)
3940
{
40-
unsigned int pmuver;
41-
42-
pmuver = kvm->arch.arm_pmu->pmuver;
43-
4441
switch (pmuver) {
4542
case ID_AA64DFR0_EL1_PMUVer_IMP:
4643
return GENMASK(9, 0);
@@ -55,6 +52,14 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
5552
}
5653
}
5754

55+
static u32 kvm_pmu_event_mask(struct kvm *kvm)
56+
{
57+
u64 dfr0 = IDREG(kvm, SYS_ID_AA64DFR0_EL1);
58+
u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, dfr0);
59+
60+
return __kvm_pmu_event_mask(pmuver);
61+
}
62+
5863
/**
5964
* kvm_pmc_is_64bit - determine if counter is 64bit
6065
* @pmc: counter context
@@ -672,8 +677,11 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)
672677
{
673678
struct arm_pmu_entry *entry;
674679

675-
if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI ||
676-
pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
680+
/*
681+
* Check the sanitised PMU version for the system, as KVM does not
682+
* support implementations where PMUv3 exists on a subset of CPUs.
683+
*/
684+
if (!pmuv3_implemented(kvm_arm_pmu_get_pmuver_limit()))
677685
return;
678686

679687
mutex_lock(&arm_pmus_lock);
@@ -750,11 +758,12 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
750758
} else {
751759
val = read_sysreg(pmceid1_el0);
752760
/*
753-
* Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
761+
* Don't advertise STALL_SLOT*, as PMMIR_EL0 is handled
754762
* as RAZ
755763
*/
756-
if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4)
757-
val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
764+
val &= ~(BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32) |
765+
BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND - 32) |
766+
BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND - 32));
758767
base = 32;
759768
}
760769

@@ -950,11 +959,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
950959
return 0;
951960
}
952961
case KVM_ARM_VCPU_PMU_V3_FILTER: {
962+
u8 pmuver = kvm_arm_pmu_get_pmuver_limit();
953963
struct kvm_pmu_event_filter __user *uaddr;
954964
struct kvm_pmu_event_filter filter;
955965
int nr_events;
956966

957-
nr_events = kvm_pmu_event_mask(kvm) + 1;
967+
/*
968+
* Allow userspace to specify an event filter for the entire
969+
* event range supported by PMUVer of the hardware, rather
970+
* than the guest's PMUVer for KVM backward compatibility.
971+
*/
972+
nr_events = __kvm_pmu_event_mask(pmuver) + 1;
958973

959974
uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
960975

arch/arm64/kvm/pmu.c

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -236,3 +236,21 @@ bool kvm_set_pmuserenr(u64 val)
236236
ctxt_sys_reg(hctxt, PMUSERENR_EL0) = val;
237237
return true;
238238
}
239+
240+
/*
241+
* If we interrupted the guest to update the host PMU context, make
242+
* sure we re-apply the guest EL0 state.
243+
*/
244+
void kvm_vcpu_pmu_resync_el0(void)
245+
{
246+
struct kvm_vcpu *vcpu;
247+
248+
if (!has_vhe() || !in_interrupt())
249+
return;
250+
251+
vcpu = kvm_get_running_vcpu();
252+
if (!vcpu)
253+
return;
254+
255+
kvm_make_request(KVM_REQ_RESYNC_PMU_EL0, vcpu);
256+
}

drivers/perf/arm_pmuv3.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -772,6 +772,8 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu)
772772

773773
/* Enable all counters */
774774
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
775+
776+
kvm_vcpu_pmu_resync_el0();
775777
}
776778

777779
static void armv8pmu_stop(struct arm_pmu *cpu_pmu)

include/kvm/arm_pmu.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212

1313
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
1414

15-
#ifdef CONFIG_HW_PERF_EVENTS
15+
#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
1616

1717
struct kvm_pmc {
1818
u8 idx; /* index into the pmu->pmc array */
@@ -74,6 +74,7 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
7474
struct kvm_pmu_events *kvm_get_pmu_events(void);
7575
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
7676
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
77+
void kvm_vcpu_pmu_resync_el0(void);
7778

7879
#define kvm_vcpu_has_pmu(vcpu) \
7980
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
@@ -171,6 +172,7 @@ static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
171172
{
172173
return 0;
173174
}
175+
static inline void kvm_vcpu_pmu_resync_el0(void) {}
174176

175177
#endif
176178

0 commit comments

Comments
 (0)