|
22 | 22 |
|
23 | 23 | #define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
|
24 | 24 |
|
| 25 | +enum intel_pmu_architectural_events { |
| 26 | + /* |
| 27 | + * The order of the architectural events matters as support for each |
| 28 | + * event is enumerated via CPUID using the index of the event. |
| 29 | + */ |
| 30 | + INTEL_ARCH_CPU_CYCLES, |
| 31 | + INTEL_ARCH_INSTRUCTIONS_RETIRED, |
| 32 | + INTEL_ARCH_REFERENCE_CYCLES, |
| 33 | + INTEL_ARCH_LLC_REFERENCES, |
| 34 | + INTEL_ARCH_LLC_MISSES, |
| 35 | + INTEL_ARCH_BRANCHES_RETIRED, |
| 36 | + INTEL_ARCH_BRANCHES_MISPREDICTED, |
| 37 | + |
| 38 | + NR_REAL_INTEL_ARCH_EVENTS, |
| 39 | + |
| 40 | + /* |
| 41 | + * Pseudo-architectural event used to implement IA32_FIXED_CTR2, a.k.a. |
| 42 | + * TSC reference cycles. The architectural reference cycles event may |
| 43 | + * or may not actually use the TSC as the reference, e.g. might use the |
| 44 | + * core crystal clock or the bus clock (yeah, "architectural"). |
| 45 | + */ |
| 46 | + PSEUDO_ARCH_REFERENCE_CYCLES = NR_REAL_INTEL_ARCH_EVENTS, |
| 47 | + NR_INTEL_ARCH_EVENTS, |
| 48 | +}; |
| 49 | + |
25 | 50 | static struct {
|
26 | 51 | u8 eventsel;
|
27 | 52 | u8 unit_mask;
|
28 | 53 | } const intel_arch_events[] = {
|
29 |
| - [0] = { 0x3c, 0x00 }, |
30 |
| - [1] = { 0xc0, 0x00 }, |
31 |
| - [2] = { 0x3c, 0x01 }, |
32 |
| - [3] = { 0x2e, 0x4f }, |
33 |
| - [4] = { 0x2e, 0x41 }, |
34 |
| - [5] = { 0xc4, 0x00 }, |
35 |
| - [6] = { 0xc5, 0x00 }, |
36 |
| - /* The above index must match CPUID 0x0A.EBX bit vector */ |
37 |
| - [7] = { 0x00, 0x03 }, |
| 54 | + [INTEL_ARCH_CPU_CYCLES] = { 0x3c, 0x00 }, |
| 55 | + [INTEL_ARCH_INSTRUCTIONS_RETIRED] = { 0xc0, 0x00 }, |
| 56 | + [INTEL_ARCH_REFERENCE_CYCLES] = { 0x3c, 0x01 }, |
| 57 | + [INTEL_ARCH_LLC_REFERENCES] = { 0x2e, 0x4f }, |
| 58 | + [INTEL_ARCH_LLC_MISSES] = { 0x2e, 0x41 }, |
| 59 | + [INTEL_ARCH_BRANCHES_RETIRED] = { 0xc4, 0x00 }, |
| 60 | + [INTEL_ARCH_BRANCHES_MISPREDICTED] = { 0xc5, 0x00 }, |
| 61 | + [PSEUDO_ARCH_REFERENCE_CYCLES] = { 0x00, 0x03 }, |
38 | 62 | };
|
39 | 63 |
|
40 | 64 | /* mapping between fixed pmc index and intel_arch_events array */
|
41 |
| -static int fixed_pmc_events[] = {1, 0, 7}; |
| 65 | +static int fixed_pmc_events[] = { |
| 66 | + [0] = INTEL_ARCH_INSTRUCTIONS_RETIRED, |
| 67 | + [1] = INTEL_ARCH_CPU_CYCLES, |
| 68 | + [2] = PSEUDO_ARCH_REFERENCE_CYCLES, |
| 69 | +}; |
42 | 70 |
|
43 | 71 | static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
|
44 | 72 | {
|
@@ -80,16 +108,18 @@ static bool intel_hw_event_available(struct kvm_pmc *pmc)
|
80 | 108 | u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
|
81 | 109 | int i;
|
82 | 110 |
|
83 |
| - for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) { |
| 111 | + BUILD_BUG_ON(ARRAY_SIZE(intel_arch_events) != NR_INTEL_ARCH_EVENTS); |
| 112 | + |
| 113 | + /* |
| 114 | + * Disallow events reported as unavailable in guest CPUID. Note, this |
| 115 | + * doesn't apply to pseudo-architectural events. |
| 116 | + */ |
| 117 | + for (i = 0; i < NR_REAL_INTEL_ARCH_EVENTS; i++) { |
84 | 118 | if (intel_arch_events[i].eventsel != event_select ||
|
85 | 119 | intel_arch_events[i].unit_mask != unit_mask)
|
86 | 120 | continue;
|
87 | 121 |
|
88 |
| - /* disable event that reported as not present by cpuid */ |
89 |
| - if ((i < 7) && !(pmu->available_event_types & (1 << i))) |
90 |
| - return false; |
91 |
| - |
92 |
| - break; |
| 122 | + return pmu->available_event_types & BIT(i); |
93 | 123 | }
|
94 | 124 |
|
95 | 125 | return true;
|
@@ -438,16 +468,17 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
438 | 468 |
|
439 | 469 | static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu)
|
440 | 470 | {
|
441 |
| - size_t size = ARRAY_SIZE(fixed_pmc_events); |
442 |
| - struct kvm_pmc *pmc; |
443 |
| - u32 event; |
444 | 471 | int i;
|
445 | 472 |
|
| 473 | + BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_events) != KVM_PMC_MAX_FIXED); |
| 474 | + |
446 | 475 | for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
|
447 |
| - pmc = &pmu->fixed_counters[i]; |
448 |
| - event = fixed_pmc_events[array_index_nospec(i, size)]; |
| 476 | + int index = array_index_nospec(i, KVM_PMC_MAX_FIXED); |
| 477 | + struct kvm_pmc *pmc = &pmu->fixed_counters[index]; |
| 478 | + u32 event = fixed_pmc_events[index]; |
| 479 | + |
449 | 480 | pmc->eventsel = (intel_arch_events[event].unit_mask << 8) |
|
450 |
| - intel_arch_events[event].eventsel; |
| 481 | + intel_arch_events[event].eventsel; |
451 | 482 | }
|
452 | 483 | }
|
453 | 484 |
|
@@ -508,10 +539,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
|
508 | 539 | if (pmu->version == 1) {
|
509 | 540 | pmu->nr_arch_fixed_counters = 0;
|
510 | 541 | } else {
|
511 |
| - pmu->nr_arch_fixed_counters = |
512 |
| - min3(ARRAY_SIZE(fixed_pmc_events), |
513 |
| - (size_t) edx.split.num_counters_fixed, |
514 |
| - (size_t)kvm_pmu_cap.num_counters_fixed); |
| 542 | + pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed, |
| 543 | + kvm_pmu_cap.num_counters_fixed); |
515 | 544 | edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed,
|
516 | 545 | kvm_pmu_cap.bit_width_fixed);
|
517 | 546 | pmu->counter_bitmask[KVM_PMC_FIXED] =
|
|
0 commit comments