Skip to content

Commit be5ccac

Browse files
akihikodakioupton
authored andcommitted
KVM: arm64: PMU: Assume PMU presence in pmu-emul.c
Many functions in pmu-emul.c checks kvm_vcpu_has_pmu(vcpu). A favorable interpretation is defensive programming, but it also has downsides: - It is confusing as it implies these functions are called without PMU although most of them are called only when a PMU is present. - It makes semantics of functions fuzzy. For example, calling kvm_pmu_disable_counter_mask() without PMU may result in no-op as there are no enabled counters, but it's unclear what kvm_pmu_get_counter_value() returns when there is no PMU. - It allows callers without checking kvm_vcpu_has_pmu(vcpu), but it is often wrong to call these functions without PMU. - It is error-prone to duplicate kvm_vcpu_has_pmu(vcpu) checks into multiple functions. Many functions are called for system registers, and the system register infrastructure already employs less error-prone, comprehensive checks. Check kvm_vcpu_has_pmu(vcpu) in callers of these functions instead, and remove the obsolete checks from pmu-emul.c. The only exceptions are the functions that implement ioctls as they have definitive semantics even when the PMU is not present. Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com> Reviewed-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20250315-pmc-v5-2-ecee87dab216@daynix.com Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
1 parent f2aeb7b commit be5ccac

File tree

4 files changed

+20
-35
lines changed

4 files changed

+20
-35
lines changed

arch/arm64/kvm/arm.c

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -835,9 +835,11 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
835835
if (ret)
836836
return ret;
837837

838-
ret = kvm_arm_pmu_v3_enable(vcpu);
839-
if (ret)
840-
return ret;
838+
if (kvm_vcpu_has_pmu(vcpu)) {
839+
ret = kvm_arm_pmu_v3_enable(vcpu);
840+
if (ret)
841+
return ret;
842+
}
841843

842844
if (is_protected_kvm_enabled()) {
843845
ret = pkvm_create_hyp_vm(kvm);
@@ -1148,7 +1150,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
11481150
*/
11491151
preempt_disable();
11501152

1151-
kvm_pmu_flush_hwstate(vcpu);
1153+
if (kvm_vcpu_has_pmu(vcpu))
1154+
kvm_pmu_flush_hwstate(vcpu);
11521155

11531156
local_irq_disable();
11541157

@@ -1167,7 +1170,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
11671170
if (ret <= 0 || kvm_vcpu_exit_request(vcpu, &ret)) {
11681171
vcpu->mode = OUTSIDE_GUEST_MODE;
11691172
isb(); /* Ensure work in x_flush_hwstate is committed */
1170-
kvm_pmu_sync_hwstate(vcpu);
1173+
if (kvm_vcpu_has_pmu(vcpu))
1174+
kvm_pmu_sync_hwstate(vcpu);
11711175
if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
11721176
kvm_timer_sync_user(vcpu);
11731177
kvm_vgic_sync_hwstate(vcpu);
@@ -1197,7 +1201,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
11971201
* that the vgic can properly sample the updated state of the
11981202
* interrupt line.
11991203
*/
1200-
kvm_pmu_sync_hwstate(vcpu);
1204+
if (kvm_vcpu_has_pmu(vcpu))
1205+
kvm_pmu_sync_hwstate(vcpu);
12011206

12021207
/*
12031208
* Sync the vgic state before syncing the timer state because

arch/arm64/kvm/emulate-nested.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2516,7 +2516,8 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
25162516
kvm_arch_vcpu_load(vcpu, smp_processor_id());
25172517
preempt_enable();
25182518

2519-
kvm_pmu_nested_transition(vcpu);
2519+
if (kvm_vcpu_has_pmu(vcpu))
2520+
kvm_pmu_nested_transition(vcpu);
25202521
}
25212522

25222523
static void kvm_inject_el2_exception(struct kvm_vcpu *vcpu, u64 esr_el2,
@@ -2599,7 +2600,8 @@ static int kvm_inject_nested(struct kvm_vcpu *vcpu, u64 esr_el2,
25992600
kvm_arch_vcpu_load(vcpu, smp_processor_id());
26002601
preempt_enable();
26012602

2602-
kvm_pmu_nested_transition(vcpu);
2603+
if (kvm_vcpu_has_pmu(vcpu))
2604+
kvm_pmu_nested_transition(vcpu);
26032605

26042606
return 1;
26052607
}

arch/arm64/kvm/pmu-emul.c

Lines changed: 1 addition & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -150,9 +150,6 @@ static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc)
150150
*/
151151
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
152152
{
153-
if (!kvm_vcpu_has_pmu(vcpu))
154-
return 0;
155-
156153
return kvm_pmu_get_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
157154
}
158155

@@ -191,9 +188,6 @@ static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force)
191188
*/
192189
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
193190
{
194-
if (!kvm_vcpu_has_pmu(vcpu))
195-
return;
196-
197191
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx), val, false);
198192
}
199193

@@ -350,7 +344,7 @@ void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val)
350344
{
351345
int i;
352346

353-
if (!kvm_vcpu_has_pmu(vcpu) || !val)
347+
if (!val)
354348
return;
355349

356350
for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
@@ -401,9 +395,6 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
401395
struct kvm_pmu *pmu = &vcpu->arch.pmu;
402396
bool overflow;
403397

404-
if (!kvm_vcpu_has_pmu(vcpu))
405-
return;
406-
407398
overflow = kvm_pmu_overflow_status(vcpu);
408399
if (pmu->irq_level == overflow)
409400
return;
@@ -599,9 +590,6 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
599590
{
600591
int i;
601592

602-
if (!kvm_vcpu_has_pmu(vcpu))
603-
return;
604-
605593
/* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
606594
if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
607595
val &= ~ARMV8_PMU_PMCR_LP;
@@ -766,9 +754,6 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
766754
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx);
767755
u64 reg;
768756

769-
if (!kvm_vcpu_has_pmu(vcpu))
770-
return;
771-
772757
reg = counter_index_to_evtreg(pmc->idx);
773758
__vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm);
774759

@@ -848,9 +833,6 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
848833
u64 val, mask = 0;
849834
int base, i, nr_events;
850835

851-
if (!kvm_vcpu_has_pmu(vcpu))
852-
return 0;
853-
854836
if (!pmceid1) {
855837
val = read_sysreg(pmceid0_el0);
856838
/* always support CHAIN */
@@ -900,9 +882,6 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
900882

901883
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
902884
{
903-
if (!kvm_vcpu_has_pmu(vcpu))
904-
return 0;
905-
906885
if (!vcpu->arch.pmu.created)
907886
return -EINVAL;
908887

@@ -1231,9 +1210,6 @@ void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu)
12311210
unsigned long mask;
12321211
int i;
12331212

1234-
if (!kvm_vcpu_has_pmu(vcpu))
1235-
return;
1236-
12371213
mask = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
12381214
for_each_set_bit(i, &mask, 32) {
12391215
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);

arch/arm64/kvm/sys_regs.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1853,12 +1853,14 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
18531853
static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
18541854
const struct sys_reg_desc *rd)
18551855
{
1856-
u8 perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
1856+
u8 perfmon;
18571857
u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
18581858

18591859
val &= ~ID_DFR0_EL1_PerfMon_MASK;
1860-
if (kvm_vcpu_has_pmu(vcpu))
1860+
if (kvm_vcpu_has_pmu(vcpu)) {
1861+
perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
18611862
val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
1863+
}
18621864

18631865
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8);
18641866

0 commit comments

Comments
 (0)