Skip to content

Commit a3034da

Browse files
committed
KVM: arm64: Rename kvm_pmu_valid_counter_mask()
Nested PMU support requires dynamically changing the visible range of PMU counters based on the exception level and value of MDCR_EL2.HPMN. At the same time, the PMU emulation code needs to know the absolute number of implemented counters, regardless of context. Rename the existing helper to make it obvious that it returns the number of implemented counters and not anything else. Reviewed-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20241025182354.3364124-13-oliver.upton@linux.dev Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
1 parent 166b77a commit a3034da

File tree

3 files changed

+12
-12
lines changed

3 files changed

+12
-12
lines changed

arch/arm64/kvm/pmu-emul.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
244244
*/
245245
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
246246
{
247-
unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
247+
unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu);
248248
int i;
249249

250250
for_each_set_bit(i, &mask, 32)
@@ -283,7 +283,7 @@ bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
283283
return idx >= hpmn;
284284
}
285285

286-
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
286+
u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
287287
{
288288
u64 val = FIELD_GET(ARMV8_PMU_PMCR_N, kvm_vcpu_read_pmcr(vcpu));
289289

@@ -592,7 +592,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
592592
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
593593

594594
if (val & ARMV8_PMU_PMCR_P) {
595-
unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
595+
unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu);
596596
mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
597597
for_each_set_bit(i, &mask, 32)
598598
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
@@ -822,7 +822,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
822822

823823
void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
824824
{
825-
u64 mask = kvm_pmu_valid_counter_mask(vcpu);
825+
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
826826

827827
kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
828828

arch/arm64/kvm/sys_regs.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1168,7 +1168,7 @@ static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 va
11681168
{
11691169
bool set;
11701170

1171-
val &= kvm_pmu_valid_counter_mask(vcpu);
1171+
val &= kvm_pmu_implemented_counter_mask(vcpu);
11721172

11731173
switch (r->reg) {
11741174
case PMOVSSET_EL0:
@@ -1191,7 +1191,7 @@ static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 va
11911191

11921192
static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
11931193
{
1194-
u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1194+
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
11951195

11961196
*val = __vcpu_sys_reg(vcpu, r->reg) & mask;
11971197
return 0;
@@ -1205,7 +1205,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
12051205
if (pmu_access_el0_disabled(vcpu))
12061206
return false;
12071207

1208-
mask = kvm_pmu_valid_counter_mask(vcpu);
1208+
mask = kvm_pmu_implemented_counter_mask(vcpu);
12091209
if (p->is_write) {
12101210
val = p->regval & mask;
12111211
if (r->Op2 & 0x1) {
@@ -1228,7 +1228,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
12281228
static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
12291229
const struct sys_reg_desc *r)
12301230
{
1231-
u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1231+
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
12321232

12331233
if (check_pmu_access_disabled(vcpu, 0))
12341234
return false;
@@ -1252,7 +1252,7 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
12521252
static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
12531253
const struct sys_reg_desc *r)
12541254
{
1255-
u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1255+
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
12561256

12571257
if (pmu_access_el0_disabled(vcpu))
12581258
return false;
@@ -1282,7 +1282,7 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
12821282
if (pmu_write_swinc_el0_disabled(vcpu))
12831283
return false;
12841284

1285-
mask = kvm_pmu_valid_counter_mask(vcpu);
1285+
mask = kvm_pmu_implemented_counter_mask(vcpu);
12861286
kvm_pmu_software_increment(vcpu, p->regval & mask);
12871287
return true;
12881288
}

include/kvm/arm_pmu.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ static __always_inline bool kvm_arm_support_pmu_v3(void)
4747
#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
4848
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
4949
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
50-
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
50+
u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu);
5151
u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
5252
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
5353
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
@@ -114,7 +114,7 @@ static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
114114
}
115115
static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
116116
u64 select_idx, u64 val) {}
117-
static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
117+
static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
118118
{
119119
return 0;
120120
}

0 commit comments

Comments
 (0)