|
22 | 22 |
|
23 | 23 | #define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
|
24 | 24 |
|
| 25 | +enum intel_pmu_architectural_events { |
| 26 | + /* |
| 27 | + * The order of the architectural events matters as support for each |
| 28 | + * event is enumerated via CPUID using the index of the event. |
| 29 | + */ |
| 30 | + INTEL_ARCH_CPU_CYCLES, |
| 31 | + INTEL_ARCH_INSTRUCTIONS_RETIRED, |
| 32 | + INTEL_ARCH_REFERENCE_CYCLES, |
| 33 | + INTEL_ARCH_LLC_REFERENCES, |
| 34 | + INTEL_ARCH_LLC_MISSES, |
| 35 | + INTEL_ARCH_BRANCHES_RETIRED, |
| 36 | + INTEL_ARCH_BRANCHES_MISPREDICTED, |
| 37 | + |
| 38 | + NR_REAL_INTEL_ARCH_EVENTS, |
| 39 | + |
| 40 | + /* |
| 41 | + * Pseudo-architectural event used to implement IA32_FIXED_CTR2, a.k.a. |
| 42 | + * TSC reference cycles. The architectural reference cycles event may |
| 43 | + * or may not actually use the TSC as the reference, e.g. might use the |
| 44 | + * core crystal clock or the bus clock (yeah, "architectural"). |
| 45 | + */ |
| 46 | + PSEUDO_ARCH_REFERENCE_CYCLES = NR_REAL_INTEL_ARCH_EVENTS, |
| 47 | + NR_INTEL_ARCH_EVENTS, |
| 48 | +}; |
| 49 | + |
25 | 50 | static struct {
|
26 | 51 | u8 eventsel;
|
27 | 52 | u8 unit_mask;
|
28 | 53 | } const intel_arch_events[] = {
|
29 |
| - [0] = { 0x3c, 0x00 }, |
30 |
| - [1] = { 0xc0, 0x00 }, |
31 |
| - [2] = { 0x3c, 0x01 }, |
32 |
| - [3] = { 0x2e, 0x4f }, |
33 |
| - [4] = { 0x2e, 0x41 }, |
34 |
| - [5] = { 0xc4, 0x00 }, |
35 |
| - [6] = { 0xc5, 0x00 }, |
36 |
| - /* The above index must match CPUID 0x0A.EBX bit vector */ |
37 |
| - [7] = { 0x00, 0x03 }, |
| 54 | + [INTEL_ARCH_CPU_CYCLES] = { 0x3c, 0x00 }, |
| 55 | + [INTEL_ARCH_INSTRUCTIONS_RETIRED] = { 0xc0, 0x00 }, |
| 56 | + [INTEL_ARCH_REFERENCE_CYCLES] = { 0x3c, 0x01 }, |
| 57 | + [INTEL_ARCH_LLC_REFERENCES] = { 0x2e, 0x4f }, |
| 58 | + [INTEL_ARCH_LLC_MISSES] = { 0x2e, 0x41 }, |
| 59 | + [INTEL_ARCH_BRANCHES_RETIRED] = { 0xc4, 0x00 }, |
| 60 | + [INTEL_ARCH_BRANCHES_MISPREDICTED] = { 0xc5, 0x00 }, |
| 61 | + [PSEUDO_ARCH_REFERENCE_CYCLES] = { 0x00, 0x03 }, |
38 | 62 | };
|
39 | 63 |
|
40 | 64 | /* mapping between fixed pmc index and intel_arch_events array */
|
41 |
| -static int fixed_pmc_events[] = {1, 0, 7}; |
| 65 | +static int fixed_pmc_events[] = { |
| 66 | + [0] = INTEL_ARCH_INSTRUCTIONS_RETIRED, |
| 67 | + [1] = INTEL_ARCH_CPU_CYCLES, |
| 68 | + [2] = PSEUDO_ARCH_REFERENCE_CYCLES, |
| 69 | +}; |
42 | 70 |
|
43 | 71 | static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
|
44 | 72 | {
|
@@ -80,13 +108,16 @@ static bool intel_hw_event_available(struct kvm_pmc *pmc)
|
80 | 108 | u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
|
81 | 109 | int i;
|
82 | 110 |
|
83 |
| - for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) { |
| 111 | + BUILD_BUG_ON(ARRAY_SIZE(intel_arch_events) != NR_INTEL_ARCH_EVENTS); |
| 112 | + |
| 113 | + for (i = 0; i < NR_INTEL_ARCH_EVENTS; i++) { |
84 | 114 | if (intel_arch_events[i].eventsel != event_select ||
|
85 | 115 | intel_arch_events[i].unit_mask != unit_mask)
|
86 | 116 | continue;
|
87 | 117 |
|
88 | 118 | /* disable event that reported as not present by cpuid */
|
89 |
| - if ((i < 7) && !(pmu->available_event_types & (1 << i))) |
| 119 | + if ((i < PSEUDO_ARCH_REFERENCE_CYCLES) && |
| 120 | + !(pmu->available_event_types & (1 << i))) |
90 | 121 | return false;
|
91 | 122 |
|
92 | 123 | break;
|
|
0 commit comments