Skip to content

Commit 369c012

Browse files
committed
Merge branch 'kvm-arm64/pmu-fixes' into kvmarm/next
* kvm-arm64/pmu-fixes: : vPMU fixes for 6.15 courtesy of Akihiko Odaki : : Various fixes to KVM's vPMU implementation, notably ensuring : userspace-directed changes to the PMCs are reflected in the backing perf : events. KVM: arm64: PMU: Reload when resetting KVM: arm64: PMU: Reload when user modifies registers KVM: arm64: PMU: Fix SET_ONE_REG for vPMC regs KVM: arm64: PMU: Assume PMU presence in pmu-emul.c KVM: arm64: PMU: Set raw values from user to PM{C,I}NTEN{SET,CLR}, PMOVS{SET,CLR} Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2 parents ca19dd4 + fe53538 commit 369c012

File tree

6 files changed

+63
-76
lines changed

6 files changed

+63
-76
lines changed

arch/arm64/kvm/arm.c

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -858,9 +858,11 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
858858
if (ret)
859859
return ret;
860860

861-
ret = kvm_arm_pmu_v3_enable(vcpu);
862-
if (ret)
863-
return ret;
861+
if (kvm_vcpu_has_pmu(vcpu)) {
862+
ret = kvm_arm_pmu_v3_enable(vcpu);
863+
if (ret)
864+
return ret;
865+
}
864866

865867
if (is_protected_kvm_enabled()) {
866868
ret = pkvm_create_hyp_vm(kvm);
@@ -1175,7 +1177,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
11751177
*/
11761178
preempt_disable();
11771179

1178-
kvm_pmu_flush_hwstate(vcpu);
1180+
if (kvm_vcpu_has_pmu(vcpu))
1181+
kvm_pmu_flush_hwstate(vcpu);
11791182

11801183
local_irq_disable();
11811184

@@ -1194,7 +1197,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
11941197
if (ret <= 0 || kvm_vcpu_exit_request(vcpu, &ret)) {
11951198
vcpu->mode = OUTSIDE_GUEST_MODE;
11961199
isb(); /* Ensure work in x_flush_hwstate is committed */
1197-
kvm_pmu_sync_hwstate(vcpu);
1200+
if (kvm_vcpu_has_pmu(vcpu))
1201+
kvm_pmu_sync_hwstate(vcpu);
11981202
if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
11991203
kvm_timer_sync_user(vcpu);
12001204
kvm_vgic_sync_hwstate(vcpu);
@@ -1224,7 +1228,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
12241228
* that the vgic can properly sample the updated state of the
12251229
* interrupt line.
12261230
*/
1227-
kvm_pmu_sync_hwstate(vcpu);
1231+
if (kvm_vcpu_has_pmu(vcpu))
1232+
kvm_pmu_sync_hwstate(vcpu);
12281233

12291234
/*
12301235
* Sync the vgic state before syncing the timer state because

arch/arm64/kvm/emulate-nested.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2518,7 +2518,8 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
25182518
vcpu_clear_flag(vcpu, IN_NESTED_ERET);
25192519
preempt_enable();
25202520

2521-
kvm_pmu_nested_transition(vcpu);
2521+
if (kvm_vcpu_has_pmu(vcpu))
2522+
kvm_pmu_nested_transition(vcpu);
25222523
}
25232524

25242525
static void kvm_inject_el2_exception(struct kvm_vcpu *vcpu, u64 esr_el2,
@@ -2601,7 +2602,8 @@ static int kvm_inject_nested(struct kvm_vcpu *vcpu, u64 esr_el2,
26012602
kvm_arch_vcpu_load(vcpu, smp_processor_id());
26022603
preempt_enable();
26032604

2604-
kvm_pmu_nested_transition(vcpu);
2605+
if (kvm_vcpu_has_pmu(vcpu))
2606+
kvm_pmu_nested_transition(vcpu);
26052607

26062608
return 1;
26072609
}

arch/arm64/kvm/pmu-emul.c

Lines changed: 14 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -154,9 +154,6 @@ static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc)
154154
*/
155155
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
156156
{
157-
if (!kvm_vcpu_has_pmu(vcpu))
158-
return 0;
159-
160157
return kvm_pmu_get_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
161158
}
162159

@@ -195,12 +192,22 @@ static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force)
195192
*/
196193
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
197194
{
198-
if (!kvm_vcpu_has_pmu(vcpu))
199-
return;
200-
201195
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx), val, false);
202196
}
203197

198+
/**
199+
* kvm_pmu_set_counter_value_user - set PMU counter value from user
200+
* @vcpu: The vcpu pointer
201+
* @select_idx: The counter index
202+
* @val: The counter value
203+
*/
204+
void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
205+
{
206+
kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
207+
__vcpu_sys_reg(vcpu, counter_index_to_reg(select_idx)) = val;
208+
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
209+
}
210+
204211
/**
205212
* kvm_pmu_release_perf_event - remove the perf event
206213
* @pmc: The PMU counter pointer
@@ -251,20 +258,6 @@ void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
251258
pmu->pmc[i].idx = i;
252259
}
253260

254-
/**
255-
* kvm_pmu_vcpu_reset - reset pmu state for cpu
256-
* @vcpu: The vcpu pointer
257-
*
258-
*/
259-
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
260-
{
261-
unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu);
262-
int i;
263-
264-
for_each_set_bit(i, &mask, 32)
265-
kvm_pmu_stop_counter(kvm_vcpu_idx_to_pmc(vcpu, i));
266-
}
267-
268261
/**
269262
* kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
270263
* @vcpu: The vcpu pointer
@@ -354,7 +347,7 @@ void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val)
354347
{
355348
int i;
356349

357-
if (!kvm_vcpu_has_pmu(vcpu) || !val)
350+
if (!val)
358351
return;
359352

360353
for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
@@ -405,9 +398,6 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
405398
struct kvm_pmu *pmu = &vcpu->arch.pmu;
406399
bool overflow;
407400

408-
if (!kvm_vcpu_has_pmu(vcpu))
409-
return;
410-
411401
overflow = kvm_pmu_overflow_status(vcpu);
412402
if (pmu->irq_level == overflow)
413403
return;
@@ -603,9 +593,6 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
603593
{
604594
int i;
605595

606-
if (!kvm_vcpu_has_pmu(vcpu))
607-
return;
608-
609596
/* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
610597
if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
611598
val &= ~ARMV8_PMU_PMCR_LP;
@@ -793,9 +780,6 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
793780
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx);
794781
u64 reg;
795782

796-
if (!kvm_vcpu_has_pmu(vcpu))
797-
return;
798-
799783
reg = counter_index_to_evtreg(pmc->idx);
800784
__vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm);
801785

@@ -901,9 +885,6 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
901885
u64 val, mask = 0;
902886
int base, i, nr_events;
903887

904-
if (!kvm_vcpu_has_pmu(vcpu))
905-
return 0;
906-
907888
if (!pmceid1) {
908889
val = compute_pmceid0(cpu_pmu);
909890
base = 0;
@@ -944,9 +925,6 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
944925

945926
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
946927
{
947-
if (!kvm_vcpu_has_pmu(vcpu))
948-
return 0;
949-
950928
if (!vcpu->arch.pmu.created)
951929
return -EINVAL;
952930

@@ -969,9 +947,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
969947
return -EINVAL;
970948
}
971949

972-
/* One-off reload of the PMU on first run */
973-
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
974-
975950
return 0;
976951
}
977952

@@ -1295,9 +1270,6 @@ void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu)
12951270
unsigned long mask;
12961271
int i;
12971272

1298-
if (!kvm_vcpu_has_pmu(vcpu))
1299-
return;
1300-
13011273
mask = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
13021274
for_each_set_bit(i, &mask, 32) {
13031275
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);

arch/arm64/kvm/reset.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -196,9 +196,6 @@ void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
196196
vcpu->arch.reset_state.reset = false;
197197
spin_unlock(&vcpu->arch.mp_state_lock);
198198

199-
/* Reset PMU outside of the non-preemptible section */
200-
kvm_pmu_vcpu_reset(vcpu);
201-
202199
preempt_disable();
203200
loaded = (vcpu->cpu != -1);
204201
if (loaded)

arch/arm64/kvm/sys_regs.c

Lines changed: 31 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -967,6 +967,22 @@ static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
967967
return 0;
968968
}
969969

970+
static int set_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
971+
u64 val)
972+
{
973+
u64 idx;
974+
975+
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
976+
/* PMCCNTR_EL0 */
977+
idx = ARMV8_PMU_CYCLE_IDX;
978+
else
979+
/* PMEVCNTRn_EL0 */
980+
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
981+
982+
kvm_pmu_set_counter_value_user(vcpu, idx, val);
983+
return 0;
984+
}
985+
970986
static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
971987
struct sys_reg_params *p,
972988
const struct sys_reg_desc *r)
@@ -1058,25 +1074,10 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
10581074

10591075
static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
10601076
{
1061-
bool set;
1062-
1063-
val &= kvm_pmu_accessible_counter_mask(vcpu);
1064-
1065-
switch (r->reg) {
1066-
case PMOVSSET_EL0:
1067-
/* CRm[1] being set indicates a SET register, and CLR otherwise */
1068-
set = r->CRm & 2;
1069-
break;
1070-
default:
1071-
/* Op2[0] being set indicates a SET register, and CLR otherwise */
1072-
set = r->Op2 & 1;
1073-
break;
1074-
}
1077+
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
10751078

1076-
if (set)
1077-
__vcpu_sys_reg(vcpu, r->reg) |= val;
1078-
else
1079-
__vcpu_sys_reg(vcpu, r->reg) &= ~val;
1079+
__vcpu_sys_reg(vcpu, r->reg) = val & mask;
1080+
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
10801081

10811082
return 0;
10821083
}
@@ -1236,6 +1237,8 @@ static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
12361237
val |= ARMV8_PMU_PMCR_LC;
12371238

12381239
__vcpu_sys_reg(vcpu, r->reg) = val;
1240+
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
1241+
12391242
return 0;
12401243
}
12411244

@@ -1262,6 +1265,7 @@ static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
12621265
#define PMU_PMEVCNTR_EL0(n) \
12631266
{ PMU_SYS_REG(PMEVCNTRn_EL0(n)), \
12641267
.reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
1268+
.set_user = set_pmu_evcntr, \
12651269
.access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
12661270

12671271
/* Macro to expand the PMEVTYPERn_EL0 register */
@@ -1880,12 +1884,14 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
18801884
static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
18811885
const struct sys_reg_desc *rd)
18821886
{
1883-
u8 perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
1887+
u8 perfmon;
18841888
u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
18851889

18861890
val &= ~ID_DFR0_EL1_PerfMon_MASK;
1887-
if (kvm_vcpu_has_pmu(vcpu))
1891+
if (kvm_vcpu_has_pmu(vcpu)) {
1892+
perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
18881893
val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
1894+
}
18891895

18901896
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8);
18911897

@@ -3052,7 +3058,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
30523058
.access = access_pmceid, .reset = NULL },
30533059
{ PMU_SYS_REG(PMCCNTR_EL0),
30543060
.access = access_pmu_evcntr, .reset = reset_unknown,
3055-
.reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
3061+
.reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr,
3062+
.set_user = set_pmu_evcntr },
30563063
{ PMU_SYS_REG(PMXEVTYPER_EL0),
30573064
.access = access_pmu_evtyper, .reset = NULL },
30583065
{ PMU_SYS_REG(PMXEVCNTR_EL0),
@@ -4712,6 +4719,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
47124719
}
47134720

47144721
set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
4722+
4723+
if (kvm_vcpu_has_pmu(vcpu))
4724+
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
47154725
}
47164726

47174727
/**

include/kvm/arm_pmu.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,11 +41,11 @@ bool kvm_supports_guest_pmuv3(void);
4141
#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
4242
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
4343
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
44+
void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
4445
u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu);
4546
u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu);
4647
u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
4748
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
48-
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
4949
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
5050
void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val);
5151
void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
@@ -109,6 +109,8 @@ static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
109109
}
110110
static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
111111
u64 select_idx, u64 val) {}
112+
static inline void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu,
113+
u64 select_idx, u64 val) {}
112114
static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
113115
{
114116
return 0;
@@ -118,7 +120,6 @@ static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
118120
return 0;
119121
}
120122
static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
121-
static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
122123
static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
123124
static inline void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
124125
static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}

0 commit comments

Comments
 (0)