@@ -94,15 +94,40 @@ void __init intel_pmu_pebs_data_source_nhm(void)
94
94
pebs_data_source [0x07 ] = OP_LH | P (LVL , L3 ) | LEVEL (L3 ) | P (SNOOP , HITM );
95
95
}
96
96
97
- void __init intel_pmu_pebs_data_source_skl (bool pmem )
97
+ static void __init __intel_pmu_pebs_data_source_skl (bool pmem , u64 * data_source )
98
98
{
99
99
u64 pmem_or_l4 = pmem ? LEVEL (PMEM ) : LEVEL (L4 );
100
100
101
- pebs_data_source [0x08 ] = OP_LH | pmem_or_l4 | P (SNOOP , HIT );
102
- pebs_data_source [0x09 ] = OP_LH | pmem_or_l4 | REM | P (SNOOP , HIT );
103
- pebs_data_source [0x0b ] = OP_LH | LEVEL (RAM ) | REM | P (SNOOP , NONE );
104
- pebs_data_source [0x0c ] = OP_LH | LEVEL (ANY_CACHE ) | REM | P (SNOOPX , FWD );
105
- pebs_data_source [0x0d ] = OP_LH | LEVEL (ANY_CACHE ) | REM | P (SNOOP , HITM );
101
+ data_source [0x08 ] = OP_LH | pmem_or_l4 | P (SNOOP , HIT );
102
+ data_source [0x09 ] = OP_LH | pmem_or_l4 | REM | P (SNOOP , HIT );
103
+ data_source [0x0b ] = OP_LH | LEVEL (RAM ) | REM | P (SNOOP , NONE );
104
+ data_source [0x0c ] = OP_LH | LEVEL (ANY_CACHE ) | REM | P (SNOOPX , FWD );
105
+ data_source [0x0d ] = OP_LH | LEVEL (ANY_CACHE ) | REM | P (SNOOP , HITM );
106
+ }
107
+
108
+ void __init intel_pmu_pebs_data_source_skl (bool pmem )
109
+ {
110
+ __intel_pmu_pebs_data_source_skl (pmem , pebs_data_source );
111
+ }
112
+
113
+ static void __init intel_pmu_pebs_data_source_grt (u64 * data_source )
114
+ {
115
+ data_source [0x05 ] = OP_LH | P (LVL , L3 ) | LEVEL (L3 ) | P (SNOOP , HIT );
116
+ data_source [0x06 ] = OP_LH | P (LVL , L3 ) | LEVEL (L3 ) | P (SNOOP , HITM );
117
+ data_source [0x08 ] = OP_LH | P (LVL , L3 ) | LEVEL (L3 ) | P (SNOOPX , FWD );
118
+ }
119
+
120
+ void __init intel_pmu_pebs_data_source_adl (void )
121
+ {
122
+ u64 * data_source ;
123
+
124
+ data_source = x86_pmu .hybrid_pmu [X86_HYBRID_PMU_CORE_IDX ].pebs_data_source ;
125
+ memcpy (data_source , pebs_data_source , sizeof (pebs_data_source ));
126
+ __intel_pmu_pebs_data_source_skl (false, data_source );
127
+
128
+ data_source = x86_pmu .hybrid_pmu [X86_HYBRID_PMU_ATOM_IDX ].pebs_data_source ;
129
+ memcpy (data_source , pebs_data_source , sizeof (pebs_data_source ));
130
+ intel_pmu_pebs_data_source_grt (data_source );
106
131
}
107
132
108
133
static u64 precise_store_data (u64 status )
@@ -198,7 +223,7 @@ u64 adl_latency_data_small(struct perf_event *event, u64 status)
198
223
199
224
dse .val = status ;
200
225
201
- val = pebs_data_source [dse .ld_dse ];
226
+ val = hybrid_var ( event -> pmu , pebs_data_source ) [dse .ld_dse ];
202
227
203
228
/*
204
229
* For the atom core on ADL,
@@ -214,7 +239,7 @@ u64 adl_latency_data_small(struct perf_event *event, u64 status)
214
239
return val ;
215
240
}
216
241
217
- static u64 load_latency_data (u64 status )
242
+ static u64 load_latency_data (struct perf_event * event , u64 status )
218
243
{
219
244
union intel_x86_pebs_dse dse ;
220
245
u64 val ;
@@ -224,7 +249,7 @@ static u64 load_latency_data(u64 status)
224
249
/*
225
250
* use the mapping table for bit 0-3
226
251
*/
227
- val = pebs_data_source [dse .ld_dse ];
252
+ val = hybrid_var ( event -> pmu , pebs_data_source ) [dse .ld_dse ];
228
253
229
254
/*
230
255
* Nehalem models do not support TLB, Lock infos
@@ -263,7 +288,7 @@ static u64 load_latency_data(u64 status)
263
288
return val ;
264
289
}
265
290
266
- static u64 store_latency_data (u64 status )
291
+ static u64 store_latency_data (struct perf_event * event , u64 status )
267
292
{
268
293
union intel_x86_pebs_dse dse ;
269
294
u64 val ;
@@ -273,7 +298,7 @@ static u64 store_latency_data(u64 status)
273
298
/*
274
299
* use the mapping table for bit 0-3
275
300
*/
276
- val = pebs_data_source [dse .st_lat_dse ];
301
+ val = hybrid_var ( event -> pmu , pebs_data_source ) [dse .st_lat_dse ];
277
302
278
303
pebs_set_tlb_lock (& val , dse .st_lat_stlb_miss , dse .st_lat_locked );
279
304
@@ -1459,9 +1484,9 @@ static u64 get_data_src(struct perf_event *event, u64 aux)
1459
1484
bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC );
1460
1485
1461
1486
if (fl & PERF_X86_EVENT_PEBS_LDLAT )
1462
- val = load_latency_data (aux );
1487
+ val = load_latency_data (event , aux );
1463
1488
else if (fl & PERF_X86_EVENT_PEBS_STLAT )
1464
- val = store_latency_data (aux );
1489
+ val = store_latency_data (event , aux );
1465
1490
else if (fl & PERF_X86_EVENT_PEBS_LAT_HYBRID )
1466
1491
val = x86_pmu .pebs_latency_data (event , aux );
1467
1492
else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC ))
0 commit comments