Skip to content

Commit f19063b

Browse files
committed
KVM: x86/pmu: Snapshot event selectors that KVM emulates in software
Snapshot the event selectors for the events that KVM emulates in software, which is currently instructions retired and branch instructions retired. The event selectors a tied to the underlying CPU, i.e. are constant for a given platform even though perf doesn't manage the mappings as such. Getting the event selectors from perf isn't exactly cheap, especially if mitigations are enabled, as at least one indirect call is involved. Snapshot the values in KVM instead of optimizing perf as working with the raw event selectors will be required if KVM ever wants to emulate events that aren't part of perf's uABI, i.e. that don't have an "enum perf_hw_id" entry. Link: https://lore.kernel.org/r/20231110022857.1273836-8-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent d2b321e commit f19063b

File tree

4 files changed

+24
-14
lines changed

4 files changed

+24
-14
lines changed

arch/x86/kvm/pmu.c

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,9 @@
2929
struct x86_pmu_capability __read_mostly kvm_pmu_cap;
3030
EXPORT_SYMBOL_GPL(kvm_pmu_cap);
3131

32+
struct kvm_pmu_emulated_event_selectors __read_mostly kvm_pmu_eventsel;
33+
EXPORT_SYMBOL_GPL(kvm_pmu_eventsel);
34+
3235
/* Precise Distribution of Instructions Retired (PDIR) */
3336
static const struct x86_cpu_id vmx_pebs_pdir_cpu[] = {
3437
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
@@ -819,13 +822,6 @@ static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
819822
kvm_pmu_request_counter_reprogram(pmc);
820823
}
821824

822-
static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
823-
unsigned int perf_hw_id)
824-
{
825-
return !((pmc->eventsel ^ perf_get_hw_event_config(perf_hw_id)) &
826-
AMD64_RAW_EVENT_MASK_NB);
827-
}
828-
829825
static inline bool cpl_is_matched(struct kvm_pmc *pmc)
830826
{
831827
bool select_os, select_user;
@@ -845,7 +841,7 @@ static inline bool cpl_is_matched(struct kvm_pmc *pmc)
845841
return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user;
846842
}
847843

848-
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
844+
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel)
849845
{
850846
DECLARE_BITMAP(bitmap, X86_PMC_IDX_MAX);
851847
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -865,7 +861,10 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
865861
continue;
866862

867863
/* Ignore checks for edge detect, pin control, invert and CMASK bits */
868-
if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc))
864+
if ((pmc->eventsel ^ eventsel) & AMD64_RAW_EVENT_MASK_NB)
865+
continue;
866+
867+
if (cpl_is_matched(pmc))
869868
kvm_pmu_incr_counter(pmc);
870869
}
871870
}

arch/x86/kvm/pmu.h

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,11 @@
2222

2323
#define KVM_FIXED_PMC_BASE_IDX INTEL_PMC_IDX_FIXED
2424

25+
struct kvm_pmu_emulated_event_selectors {
26+
u64 INSTRUCTIONS_RETIRED;
27+
u64 BRANCH_INSTRUCTIONS_RETIRED;
28+
};
29+
2530
struct kvm_pmu_ops {
2631
struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
2732
unsigned int idx, u64 *mask);
@@ -171,6 +176,7 @@ static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
171176
}
172177

173178
extern struct x86_pmu_capability kvm_pmu_cap;
179+
extern struct kvm_pmu_emulated_event_selectors kvm_pmu_eventsel;
174180

175181
static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
176182
{
@@ -212,6 +218,11 @@ static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
212218
pmu_ops->MAX_NR_GP_COUNTERS);
213219
kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
214220
KVM_PMC_MAX_FIXED);
221+
222+
kvm_pmu_eventsel.INSTRUCTIONS_RETIRED =
223+
perf_get_hw_event_config(PERF_COUNT_HW_INSTRUCTIONS);
224+
kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED =
225+
perf_get_hw_event_config(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
215226
}
216227

217228
static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
@@ -259,7 +270,7 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu);
259270
void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
260271
void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
261272
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
262-
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id);
273+
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel);
263274

264275
bool is_vmware_backdoor_pmc(u32 pmc_idx);
265276

arch/x86/kvm/vmx/nested.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3606,7 +3606,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
36063606
return 1;
36073607
}
36083608

3609-
kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
3609+
kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED);
36103610

36113611
if (CC(evmptrld_status == EVMPTRLD_VMFAIL))
36123612
return nested_vmx_failInvalid(vcpu);

arch/x86/kvm/x86.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8903,7 +8903,7 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
89038903
if (unlikely(!r))
89048904
return 0;
89058905

8906-
kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS);
8906+
kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED);
89078907

89088908
/*
89098909
* rflags is the old, "raw" value of the flags. The new value has
@@ -9216,9 +9216,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
92169216
*/
92179217
if (!ctxt->have_exception ||
92189218
exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
9219-
kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS);
9219+
kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED);
92209220
if (ctxt->is_branch)
9221-
kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
9221+
kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED);
92229222
kvm_rip_write(vcpu, ctxt->eip);
92239223
if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
92249224
r = kvm_vcpu_do_singlestep(vcpu);

0 commit comments

Comments
 (0)