Skip to content

Commit e22c369

Browse files
committed
KVM: arm64: Add unified helper for reprogramming counters by mask
Having separate helpers for enabling/disabling counters provides the wrong abstraction, as the state of each counter needs to be evaluated independently and, in some cases, use a different global enable bit. Collapse the enable/disable accessors into a single, common helper that reconfigures every counter set in @Mask, leaving the complexity of determining if an event is actually enabled in kvm_pmu_counter_is_enabled(). Reviewed-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20241217175513.3658056-1-oliver.upton@linux.dev Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
1 parent 985bb51 commit e22c369

File tree

3 files changed

+29
-53
lines changed

3 files changed

+29
-53
lines changed

arch/arm64/kvm/pmu-emul.c

Lines changed: 23 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ static DEFINE_MUTEX(arm_pmus_lock);
2424

2525
static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
2626
static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
27+
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc);
2728

2829
static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
2930
{
@@ -327,65 +328,44 @@ u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
327328
return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
328329
}
329330

330-
/**
331-
* kvm_pmu_enable_counter_mask - enable selected PMU counters
332-
* @vcpu: The vcpu pointer
333-
* @val: the value guest writes to PMCNTENSET register
334-
*
335-
* Call perf_event_enable to start counting the perf event
336-
*/
337-
void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
331+
static void kvm_pmc_enable_perf_event(struct kvm_pmc *pmc)
338332
{
339-
int i;
340-
if (!kvm_vcpu_has_pmu(vcpu))
341-
return;
342-
343-
if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val)
333+
if (!pmc->perf_event) {
334+
kvm_pmu_create_perf_event(pmc);
344335
return;
336+
}
345337

346-
for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
347-
struct kvm_pmc *pmc;
348-
349-
if (!(val & BIT(i)))
350-
continue;
351-
352-
pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
338+
perf_event_enable(pmc->perf_event);
339+
if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
340+
kvm_debug("fail to enable perf event\n");
341+
}
353342

354-
if (!pmc->perf_event) {
355-
kvm_pmu_create_perf_event(pmc);
356-
} else {
357-
perf_event_enable(pmc->perf_event);
358-
if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
359-
kvm_debug("fail to enable perf event\n");
360-
}
361-
}
343+
static void kvm_pmc_disable_perf_event(struct kvm_pmc *pmc)
344+
{
345+
if (pmc->perf_event)
346+
perf_event_disable(pmc->perf_event);
362347
}
363348

364-
/**
365-
* kvm_pmu_disable_counter_mask - disable selected PMU counters
366-
* @vcpu: The vcpu pointer
367-
* @val: the value guest writes to PMCNTENCLR register
368-
*
369-
* Call perf_event_disable to stop counting the perf event
370-
*/
371-
void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
349+
void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val)
372350
{
373351
int i;
374352

375353
if (!kvm_vcpu_has_pmu(vcpu) || !val)
376354
return;
377355

378356
for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
379-
struct kvm_pmc *pmc;
357+
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
380358

381359
if (!(val & BIT(i)))
382360
continue;
383361

384-
pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
385-
386-
if (pmc->perf_event)
387-
perf_event_disable(pmc->perf_event);
362+
if (kvm_pmu_counter_is_enabled(pmc))
363+
kvm_pmc_enable_perf_event(pmc);
364+
else
365+
kvm_pmc_disable_perf_event(pmc);
388366
}
367+
368+
kvm_vcpu_pmu_restore_guest(vcpu);
389369
}
390370

391371
/*
@@ -630,10 +610,10 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
630610
__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
631611

632612
if (val & ARMV8_PMU_PMCR_E) {
633-
kvm_pmu_enable_counter_mask(vcpu,
613+
kvm_pmu_reprogram_counter_mask(vcpu,
634614
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
635615
} else {
636-
kvm_pmu_disable_counter_mask(vcpu,
616+
kvm_pmu_reprogram_counter_mask(vcpu,
637617
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
638618
}
639619

arch/arm64/kvm/sys_regs.c

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1208,16 +1208,14 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
12081208
mask = kvm_pmu_accessible_counter_mask(vcpu);
12091209
if (p->is_write) {
12101210
val = p->regval & mask;
1211-
if (r->Op2 & 0x1) {
1211+
if (r->Op2 & 0x1)
12121212
/* accessing PMCNTENSET_EL0 */
12131213
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
1214-
kvm_pmu_enable_counter_mask(vcpu, val);
1215-
kvm_vcpu_pmu_restore_guest(vcpu);
1216-
} else {
1214+
else
12171215
/* accessing PMCNTENCLR_EL0 */
12181216
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
1219-
kvm_pmu_disable_counter_mask(vcpu, val);
1220-
}
1217+
1218+
kvm_pmu_reprogram_counter_mask(vcpu, val);
12211219
} else {
12221220
p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
12231221
}

include/kvm/arm_pmu.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -53,8 +53,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
5353
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
5454
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
5555
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
56-
void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
57-
void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
56+
void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val);
5857
void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
5958
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
6059
bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
@@ -127,8 +126,7 @@ static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
127126
static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
128127
static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
129128
static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
130-
static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
131-
static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
129+
static inline void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
132130
static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
133131
static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
134132
static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)

0 commit comments

Comments
 (0)