Skip to content

Commit 9a1c58c

Browse files
committed
KVM: arm64: nv: Adjust range of accessible PMCs according to HPMN
The value of MDCR_EL2.HPMN controls the number of event counters made visible to EL0 and EL1. This means it is possible for the guest hypervisor to allow direct access to event counters to the L2. Rework KVM's PMU register emulation to take the effects of HPMN into account when handling a trap. For bitmask-style registers, writes only affect accessible registers. Reviewed-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20241025182354.3364124-14-oliver.upton@linux.dev Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
1 parent a3034da commit 9a1c58c

File tree

3 files changed

+24
-7
lines changed

3 files changed

+24
-7
lines changed

arch/arm64/kvm/pmu-emul.c

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -283,6 +283,18 @@ bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
283283
return idx >= hpmn;
284284
}
285285

286+
u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
287+
{
288+
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
289+
u64 hpmn;
290+
291+
if (!vcpu_has_nv(vcpu) || vcpu_is_el2(vcpu))
292+
return mask;
293+
294+
hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
295+
return mask & ~GENMASK(vcpu->kvm->arch.pmcr_n - 1, hpmn);
296+
}
297+
286298
u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
287299
{
288300
u64 val = FIELD_GET(ARMV8_PMU_PMCR_N, kvm_vcpu_read_pmcr(vcpu));
@@ -592,7 +604,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
592604
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
593605

594606
if (val & ARMV8_PMU_PMCR_P) {
595-
unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu);
607+
unsigned long mask = kvm_pmu_accessible_counter_mask(vcpu);
596608
mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
597609
for_each_set_bit(i, &mask, 32)
598610
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);

arch/arm64/kvm/sys_regs.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1168,7 +1168,7 @@ static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 va
11681168
{
11691169
bool set;
11701170

1171-
val &= kvm_pmu_implemented_counter_mask(vcpu);
1171+
val &= kvm_pmu_accessible_counter_mask(vcpu);
11721172

11731173
switch (r->reg) {
11741174
case PMOVSSET_EL0:
@@ -1191,7 +1191,7 @@ static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 va
11911191

11921192
static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
11931193
{
1194-
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
1194+
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
11951195

11961196
*val = __vcpu_sys_reg(vcpu, r->reg) & mask;
11971197
return 0;
@@ -1205,7 +1205,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
12051205
if (pmu_access_el0_disabled(vcpu))
12061206
return false;
12071207

1208-
mask = kvm_pmu_implemented_counter_mask(vcpu);
1208+
mask = kvm_pmu_accessible_counter_mask(vcpu);
12091209
if (p->is_write) {
12101210
val = p->regval & mask;
12111211
if (r->Op2 & 0x1) {
@@ -1228,7 +1228,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
12281228
static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
12291229
const struct sys_reg_desc *r)
12301230
{
1231-
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
1231+
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
12321232

12331233
if (check_pmu_access_disabled(vcpu, 0))
12341234
return false;
@@ -1252,7 +1252,7 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
12521252
static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
12531253
const struct sys_reg_desc *r)
12541254
{
1255-
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
1255+
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
12561256

12571257
if (pmu_access_el0_disabled(vcpu))
12581258
return false;
@@ -1282,7 +1282,7 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
12821282
if (pmu_write_swinc_el0_disabled(vcpu))
12831283
return false;
12841284

1285-
mask = kvm_pmu_implemented_counter_mask(vcpu);
1285+
mask = kvm_pmu_accessible_counter_mask(vcpu);
12861286
kvm_pmu_software_increment(vcpu, p->regval & mask);
12871287
return true;
12881288
}

include/kvm/arm_pmu.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ static __always_inline bool kvm_arm_support_pmu_v3(void)
4848
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
4949
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
5050
u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu);
51+
u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu);
5152
u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
5253
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
5354
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
@@ -118,6 +119,10 @@ static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
118119
{
119120
return 0;
120121
}
122+
static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
123+
{
124+
return 0;
125+
}
121126
static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
122127
static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
123128
static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}

0 commit comments

Comments
 (0)