@@ -186,29 +186,28 @@ static inline int get_mode_idx_from_str(const char *str, size_t size)
186
186
static DEFINE_MUTEX (amd_pstate_limits_lock );
187
187
static DEFINE_MUTEX (amd_pstate_driver_lock );
188
188
189
- static s16 msr_get_epp (struct amd_cpudata * cpudata , u64 cppc_req_cached )
189
+ static s16 msr_get_epp (struct amd_cpudata * cpudata )
190
190
{
191
+ u64 value ;
191
192
int ret ;
192
193
193
- if (!cppc_req_cached ) {
194
- ret = rdmsrl_on_cpu (cpudata -> cpu , MSR_AMD_CPPC_REQ , & cppc_req_cached );
195
- if (ret < 0 ) {
196
- pr_debug ("Could not retrieve energy perf value (%d)\n" , ret );
197
- return ret ;
198
- }
194
+ ret = rdmsrl_on_cpu (cpudata -> cpu , MSR_AMD_CPPC_REQ , & value );
195
+ if (ret < 0 ) {
196
+ pr_debug ("Could not retrieve energy perf value (%d)\n" , ret );
197
+ return ret ;
199
198
}
200
199
201
- return FIELD_GET (AMD_CPPC_EPP_PERF_MASK , cppc_req_cached );
200
+ return FIELD_GET (AMD_CPPC_EPP_PERF_MASK , value );
202
201
}
203
202
204
203
DEFINE_STATIC_CALL (amd_pstate_get_epp , msr_get_epp );
205
204
206
- static inline s16 amd_pstate_get_epp (struct amd_cpudata * cpudata , u64 cppc_req_cached )
205
+ static inline s16 amd_pstate_get_epp (struct amd_cpudata * cpudata )
207
206
{
208
- return static_call (amd_pstate_get_epp )(cpudata , cppc_req_cached );
207
+ return static_call (amd_pstate_get_epp )(cpudata );
209
208
}
210
209
211
- static s16 shmem_get_epp (struct amd_cpudata * cpudata , u64 dummy )
210
+ static s16 shmem_get_epp (struct amd_cpudata * cpudata )
212
211
{
213
212
u64 epp ;
214
213
int ret ;
@@ -222,35 +221,6 @@ static s16 shmem_get_epp(struct amd_cpudata *cpudata, u64 dummy)
222
221
return (s16 )(epp & 0xff );
223
222
}
224
223
225
- static int amd_pstate_get_energy_pref_index (struct amd_cpudata * cpudata )
226
- {
227
- s16 epp ;
228
- int index = - EINVAL ;
229
-
230
- epp = amd_pstate_get_epp (cpudata , 0 );
231
- if (epp < 0 )
232
- return epp ;
233
-
234
- switch (epp ) {
235
- case AMD_CPPC_EPP_PERFORMANCE :
236
- index = EPP_INDEX_PERFORMANCE ;
237
- break ;
238
- case AMD_CPPC_EPP_BALANCE_PERFORMANCE :
239
- index = EPP_INDEX_BALANCE_PERFORMANCE ;
240
- break ;
241
- case AMD_CPPC_EPP_BALANCE_POWERSAVE :
242
- index = EPP_INDEX_BALANCE_POWERSAVE ;
243
- break ;
244
- case AMD_CPPC_EPP_POWERSAVE :
245
- index = EPP_INDEX_POWERSAVE ;
246
- break ;
247
- default :
248
- break ;
249
- }
250
-
251
- return index ;
252
- }
253
-
254
224
static int msr_update_perf (struct amd_cpudata * cpudata , u32 min_perf ,
255
225
u32 des_perf , u32 max_perf , bool fast_switch )
256
226
{
@@ -275,19 +245,23 @@ static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
275
245
276
246
static int msr_set_epp (struct amd_cpudata * cpudata , u32 epp )
277
247
{
278
- u64 value = READ_ONCE ( cpudata -> cppc_req_cached ) ;
248
+ u64 value , prev ;
279
249
int ret ;
280
250
251
+ value = prev = READ_ONCE (cpudata -> cppc_req_cached );
281
252
value &= ~AMD_CPPC_EPP_PERF_MASK ;
282
253
value |= FIELD_PREP (AMD_CPPC_EPP_PERF_MASK , epp );
283
254
255
+ if (value == prev )
256
+ return 0 ;
257
+
284
258
ret = wrmsrl_on_cpu (cpudata -> cpu , MSR_AMD_CPPC_REQ , value );
285
259
if (ret ) {
286
260
pr_err ("failed to set energy perf value (%d)\n" , ret );
287
261
return ret ;
288
262
}
289
263
290
- cpudata -> epp_cached = epp ;
264
+ WRITE_ONCE ( cpudata -> epp_cached , epp ) ;
291
265
WRITE_ONCE (cpudata -> cppc_req_cached , value );
292
266
293
267
return ret ;
@@ -305,13 +279,16 @@ static int shmem_set_epp(struct amd_cpudata *cpudata, u32 epp)
305
279
int ret ;
306
280
struct cppc_perf_ctrls perf_ctrls ;
307
281
282
+ if (epp == cpudata -> epp_cached )
283
+ return 0 ;
284
+
308
285
perf_ctrls .energy_perf = epp ;
309
286
ret = cppc_set_epp_perf (cpudata -> cpu , & perf_ctrls , 1 );
310
287
if (ret ) {
311
288
pr_debug ("failed to set energy perf value (%d)\n" , ret );
312
289
return ret ;
313
290
}
314
- cpudata -> epp_cached = epp ;
291
+ WRITE_ONCE ( cpudata -> epp_cached , epp ) ;
315
292
316
293
return ret ;
317
294
}
@@ -1214,9 +1191,22 @@ static ssize_t show_energy_performance_preference(
1214
1191
struct amd_cpudata * cpudata = policy -> driver_data ;
1215
1192
int preference ;
1216
1193
1217
- preference = amd_pstate_get_energy_pref_index (cpudata );
1218
- if (preference < 0 )
1219
- return preference ;
1194
+ switch (cpudata -> epp_cached ) {
1195
+ case AMD_CPPC_EPP_PERFORMANCE :
1196
+ preference = EPP_INDEX_PERFORMANCE ;
1197
+ break ;
1198
+ case AMD_CPPC_EPP_BALANCE_PERFORMANCE :
1199
+ preference = EPP_INDEX_BALANCE_PERFORMANCE ;
1200
+ break ;
1201
+ case AMD_CPPC_EPP_BALANCE_POWERSAVE :
1202
+ preference = EPP_INDEX_BALANCE_POWERSAVE ;
1203
+ break ;
1204
+ case AMD_CPPC_EPP_POWERSAVE :
1205
+ preference = EPP_INDEX_POWERSAVE ;
1206
+ break ;
1207
+ default :
1208
+ return - EINVAL ;
1209
+ }
1220
1210
1221
1211
return sysfs_emit (buf , "%s\n" , energy_perf_strings [preference ]);
1222
1212
}
@@ -1501,7 +1491,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
1501
1491
1502
1492
policy -> driver_data = cpudata ;
1503
1493
1504
- cpudata -> epp_cached = cpudata -> epp_default = amd_pstate_get_epp (cpudata , 0 );
1494
+ cpudata -> epp_cached = cpudata -> epp_default = amd_pstate_get_epp (cpudata );
1505
1495
1506
1496
policy -> min = policy -> cpuinfo .min_freq ;
1507
1497
policy -> max = policy -> cpuinfo .max_freq ;
@@ -1555,35 +1545,26 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
1555
1545
{
1556
1546
struct amd_cpudata * cpudata = policy -> driver_data ;
1557
1547
u64 value ;
1558
- s16 epp ;
1559
1548
1560
1549
amd_pstate_update_min_max_limit (policy );
1561
1550
1562
1551
value = READ_ONCE (cpudata -> cppc_req_cached );
1563
1552
1564
1553
value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
1565
- AMD_CPPC_DES_PERF_MASK );
1554
+ AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK );
1566
1555
value |= FIELD_PREP (AMD_CPPC_MAX_PERF_MASK , cpudata -> max_limit_perf );
1567
1556
value |= FIELD_PREP (AMD_CPPC_DES_PERF_MASK , 0 );
1568
1557
value |= FIELD_PREP (AMD_CPPC_MIN_PERF_MASK , cpudata -> min_limit_perf );
1569
1558
1570
- /* Get BIOS pre-defined epp value */
1571
- epp = amd_pstate_get_epp (cpudata , value );
1572
- if (epp < 0 ) {
1573
- /**
1574
- * This return value can only be negative for shared_memory
1575
- * systems where EPP register read/write not supported.
1576
- */
1577
- return epp ;
1578
- }
1579
-
1580
1559
if (cpudata -> policy == CPUFREQ_POLICY_PERFORMANCE )
1581
- epp = 0 ;
1560
+ WRITE_ONCE (cpudata -> epp_cached , 0 );
1561
+ value |= FIELD_PREP (AMD_CPPC_EPP_PERF_MASK , cpudata -> epp_cached );
1582
1562
1583
1563
WRITE_ONCE (cpudata -> cppc_req_cached , value );
1584
1564
1585
1565
if (trace_amd_pstate_epp_perf_enabled ()) {
1586
- trace_amd_pstate_epp_perf (cpudata -> cpu , cpudata -> highest_perf , epp ,
1566
+ trace_amd_pstate_epp_perf (cpudata -> cpu , cpudata -> highest_perf ,
1567
+ cpudata -> epp_cached ,
1587
1568
cpudata -> min_limit_perf ,
1588
1569
cpudata -> max_limit_perf ,
1589
1570
policy -> boost_enabled );
@@ -1592,7 +1573,7 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
1592
1573
amd_pstate_update_perf (cpudata , cpudata -> min_limit_perf , 0U ,
1593
1574
cpudata -> max_limit_perf , false);
1594
1575
1595
- return amd_pstate_set_epp (cpudata , epp );
1576
+ return amd_pstate_set_epp (cpudata , READ_ONCE ( cpudata -> epp_cached ) );
1596
1577
}
1597
1578
1598
1579
static int amd_pstate_epp_set_policy (struct cpufreq_policy * policy )
0 commit comments