|
17 | 17 | /* HW holds 8 symbols + one for null terminator */
|
18 | 18 | #define ARCPMU_EVENT_NAME_LEN 9
|
19 | 19 |
|
| 20 | +/* |
| 21 | + * Some ARC pct quirks: |
| 22 | + * |
| 23 | + * PERF_COUNT_HW_STALLED_CYCLES_BACKEND |
| 24 | + * PERF_COUNT_HW_STALLED_CYCLES_FRONTEND |
| 25 | + * The ARC 700 can either measure stalls per pipeline stage, or all stalls |
| 26 | + * combined; for now we assign all stalls to STALLED_CYCLES_BACKEND |
| 27 | + * and all pipeline flushes (e.g. caused by mispredicts, etc.) to |
| 28 | + * STALLED_CYCLES_FRONTEND. |
| 29 | + * |
| 30 | + * We could start multiple performance counters and combine everything |
| 31 | + * afterwards, but that makes it complicated. |
| 32 | + * |
| 33 | + * Note that I$ cache misses aren't counted by either of the two! |
| 34 | + */ |
| 35 | + |
| 36 | +/* |
| 37 | + * ARC PCT has hardware conditions with fixed "names" but variable "indexes" |
| 38 | + * (based on a specific RTL build) |
| 39 | + * Below is the static map between perf generic/arc specific event_id and |
| 40 | + * h/w condition names. |
| 41 | + * At the time of probe, we loop thru each index and find it's name to |
| 42 | + * complete the mapping of perf event_id to h/w index as latter is needed |
| 43 | + * to program the counter really |
| 44 | + */ |
| 45 | +static const char * const arc_pmu_ev_hw_map[] = { |
| 46 | + /* count cycles */ |
| 47 | + [PERF_COUNT_HW_CPU_CYCLES] = "crun", |
| 48 | + [PERF_COUNT_HW_REF_CPU_CYCLES] = "crun", |
| 49 | + [PERF_COUNT_HW_BUS_CYCLES] = "crun", |
| 50 | + |
| 51 | + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush", |
| 52 | + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall", |
| 53 | + |
| 54 | + /* counts condition */ |
| 55 | + [PERF_COUNT_HW_INSTRUCTIONS] = "iall", |
| 56 | + /* All jump instructions that are taken */ |
| 57 | + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak", |
| 58 | +#ifdef CONFIG_ISA_ARCV2 |
| 59 | + [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp", |
| 60 | +#else |
| 61 | + [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */ |
| 62 | + [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */ |
| 63 | +#endif |
| 64 | + [PERF_COUNT_ARC_LDC] = "imemrdc", /* Instr: mem read cached */ |
| 65 | + [PERF_COUNT_ARC_STC] = "imemwrc", /* Instr: mem write cached */ |
| 66 | + |
| 67 | + [PERF_COUNT_ARC_DCLM] = "dclm", /* D-cache Load Miss */ |
| 68 | + [PERF_COUNT_ARC_DCSM] = "dcsm", /* D-cache Store Miss */ |
| 69 | + [PERF_COUNT_ARC_ICM] = "icm", /* I-cache Miss */ |
| 70 | + [PERF_COUNT_ARC_EDTLB] = "edtlb", /* D-TLB Miss */ |
| 71 | + [PERF_COUNT_ARC_EITLB] = "eitlb", /* I-TLB Miss */ |
| 72 | + |
| 73 | + [PERF_COUNT_HW_CACHE_REFERENCES] = "imemrdc", /* Instr: mem read cached */ |
| 74 | + [PERF_COUNT_HW_CACHE_MISSES] = "dclm", /* D-cache Load Miss */ |
| 75 | +}; |
| 76 | + |
| 77 | +#define C(_x) PERF_COUNT_HW_CACHE_##_x |
| 78 | +#define CACHE_OP_UNSUPPORTED 0xffff |
| 79 | + |
| 80 | +static const unsigned int arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { |
| 81 | + [C(L1D)] = { |
| 82 | + [C(OP_READ)] = { |
| 83 | + [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC, |
| 84 | + [C(RESULT_MISS)] = PERF_COUNT_ARC_DCLM, |
| 85 | + }, |
| 86 | + [C(OP_WRITE)] = { |
| 87 | + [C(RESULT_ACCESS)] = PERF_COUNT_ARC_STC, |
| 88 | + [C(RESULT_MISS)] = PERF_COUNT_ARC_DCSM, |
| 89 | + }, |
| 90 | + [C(OP_PREFETCH)] = { |
| 91 | + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
| 92 | + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
| 93 | + }, |
| 94 | + }, |
| 95 | + [C(L1I)] = { |
| 96 | + [C(OP_READ)] = { |
| 97 | + [C(RESULT_ACCESS)] = PERF_COUNT_HW_INSTRUCTIONS, |
| 98 | + [C(RESULT_MISS)] = PERF_COUNT_ARC_ICM, |
| 99 | + }, |
| 100 | + [C(OP_WRITE)] = { |
| 101 | + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
| 102 | + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
| 103 | + }, |
| 104 | + [C(OP_PREFETCH)] = { |
| 105 | + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
| 106 | + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
| 107 | + }, |
| 108 | + }, |
| 109 | + [C(LL)] = { |
| 110 | + [C(OP_READ)] = { |
| 111 | + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
| 112 | + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
| 113 | + }, |
| 114 | + [C(OP_WRITE)] = { |
| 115 | + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
| 116 | + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
| 117 | + }, |
| 118 | + [C(OP_PREFETCH)] = { |
| 119 | + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
| 120 | + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
| 121 | + }, |
| 122 | + }, |
| 123 | + [C(DTLB)] = { |
| 124 | + [C(OP_READ)] = { |
| 125 | + [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC, |
| 126 | + [C(RESULT_MISS)] = PERF_COUNT_ARC_EDTLB, |
| 127 | + }, |
| 128 | + /* DTLB LD/ST Miss not segregated by h/w*/ |
| 129 | + [C(OP_WRITE)] = { |
| 130 | + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
| 131 | + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
| 132 | + }, |
| 133 | + [C(OP_PREFETCH)] = { |
| 134 | + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
| 135 | + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
| 136 | + }, |
| 137 | + }, |
| 138 | + [C(ITLB)] = { |
| 139 | + [C(OP_READ)] = { |
| 140 | + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
| 141 | + [C(RESULT_MISS)] = PERF_COUNT_ARC_EITLB, |
| 142 | + }, |
| 143 | + [C(OP_WRITE)] = { |
| 144 | + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
| 145 | + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
| 146 | + }, |
| 147 | + [C(OP_PREFETCH)] = { |
| 148 | + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
| 149 | + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
| 150 | + }, |
| 151 | + }, |
| 152 | + [C(BPU)] = { |
| 153 | + [C(OP_READ)] = { |
| 154 | + [C(RESULT_ACCESS)] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS, |
| 155 | + [C(RESULT_MISS)] = PERF_COUNT_HW_BRANCH_MISSES, |
| 156 | + }, |
| 157 | + [C(OP_WRITE)] = { |
| 158 | + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
| 159 | + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
| 160 | + }, |
| 161 | + [C(OP_PREFETCH)] = { |
| 162 | + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
| 163 | + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
| 164 | + }, |
| 165 | + }, |
| 166 | + [C(NODE)] = { |
| 167 | + [C(OP_READ)] = { |
| 168 | + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
| 169 | + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
| 170 | + }, |
| 171 | + [C(OP_WRITE)] = { |
| 172 | + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
| 173 | + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
| 174 | + }, |
| 175 | + [C(OP_PREFETCH)] = { |
| 176 | + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
| 177 | + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
| 178 | + }, |
| 179 | + }, |
| 180 | +}; |
| 181 | + |
20 | 182 | enum arc_pmu_attr_groups {
|
21 | 183 | ARCPMU_ATTR_GR_EVENTS,
|
22 | 184 | ARCPMU_ATTR_GR_FORMATS,
|
@@ -328,7 +490,7 @@ static void arc_pmu_stop(struct perf_event *event, int flags)
|
328 | 490 | }
|
329 | 491 |
|
330 | 492 | if (!(event->hw.state & PERF_HES_STOPPED)) {
|
331 |
| - /* stop ARC pmu here */ |
| 493 | + /* stop hw counter here */ |
332 | 494 | write_aux_reg(ARC_REG_PCT_INDEX, idx);
|
333 | 495 |
|
334 | 496 | /* condition code #0 is always "never" */
|
@@ -361,7 +523,7 @@ static int arc_pmu_add(struct perf_event *event, int flags)
|
361 | 523 | {
|
362 | 524 | struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
|
363 | 525 | struct hw_perf_event *hwc = &event->hw;
|
364 |
| - int idx = hwc->idx; |
| 526 | + int idx; |
365 | 527 |
|
366 | 528 | idx = ffz(pmu_cpu->used_mask[0]);
|
367 | 529 | if (idx == arc_pmu->n_counters)
|
|
0 commit comments