Skip to content

Commit ae323e0

Browse files
committed
KVM: arm64: nv: Reprogram PMU events affected by nested transition
Start reprogramming PMU events at nested boundaries now that everything is in place to handle the EL2 event filter. Only repaint events where the filter differs between EL1 and EL2 as a slight optimization. PMU now 'works' for nested VMs, albeit slow. Reviewed-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20241025182559.3364829-1-oliver.upton@linux.dev Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
1 parent 8a34979 commit ae323e0

File tree

3 files changed

+36
-0
lines changed

3 files changed

+36
-0
lines changed

arch/arm64/kvm/emulate-nested.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2450,6 +2450,8 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
24502450

24512451
kvm_arch_vcpu_load(vcpu, smp_processor_id());
24522452
preempt_enable();
2453+
2454+
kvm_pmu_nested_transition(vcpu);
24532455
}
24542456

24552457
static void kvm_inject_el2_exception(struct kvm_vcpu *vcpu, u64 esr_el2,
@@ -2532,6 +2534,8 @@ static int kvm_inject_nested(struct kvm_vcpu *vcpu, u64 esr_el2,
25322534
kvm_arch_vcpu_load(vcpu, smp_processor_id());
25332535
preempt_enable();
25342536

2537+
kvm_pmu_nested_transition(vcpu);
2538+
25352539
return 1;
25362540
}
25372541

arch/arm64/kvm/pmu-emul.c

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1215,3 +1215,32 @@ u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
12151215

12161216
return u64_replace_bits(pmcr, vcpu->kvm->arch.pmcr_n, ARMV8_PMU_PMCR_N);
12171217
}
1218+
1219+
void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu)
1220+
{
1221+
bool reprogrammed = false;
1222+
unsigned long mask;
1223+
int i;
1224+
1225+
if (!kvm_vcpu_has_pmu(vcpu))
1226+
return;
1227+
1228+
mask = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
1229+
for_each_set_bit(i, &mask, 32) {
1230+
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
1231+
1232+
/*
1233+
* We only need to reconfigure events where the filter is
1234+
* different at EL1 vs. EL2, as we're multiplexing the true EL1
1235+
* event filter bit for nested.
1236+
*/
1237+
if (kvm_pmc_counts_at_el1(pmc) == kvm_pmc_counts_at_el2(pmc))
1238+
continue;
1239+
1240+
kvm_pmu_create_perf_event(pmc);
1241+
reprogrammed = true;
1242+
}
1243+
1244+
if (reprogrammed)
1245+
kvm_vcpu_pmu_restore_guest(vcpu);
1246+
}

include/kvm/arm_pmu.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,7 @@ u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm);
9898

9999
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu);
100100
bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx);
101+
void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu);
101102
#else
102103
struct kvm_pmu {
103104
};
@@ -198,6 +199,8 @@ static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int id
198199
return false;
199200
}
200201

202+
static inline void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu) {}
203+
201204
#endif
202205

203206
#endif

0 commit comments

Comments
 (0)