@@ -307,11 +307,11 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
307
307
highest_perf = AMD_CPPC_HIGHEST_PERF (cap1 );
308
308
309
309
WRITE_ONCE (cpudata -> highest_perf , highest_perf );
310
-
310
+ WRITE_ONCE ( cpudata -> max_limit_perf , highest_perf );
311
311
WRITE_ONCE (cpudata -> nominal_perf , AMD_CPPC_NOMINAL_PERF (cap1 ));
312
312
WRITE_ONCE (cpudata -> lowest_nonlinear_perf , AMD_CPPC_LOWNONLIN_PERF (cap1 ));
313
313
WRITE_ONCE (cpudata -> lowest_perf , AMD_CPPC_LOWEST_PERF (cap1 ));
314
-
314
+ WRITE_ONCE ( cpudata -> min_limit_perf , AMD_CPPC_LOWEST_PERF ( cap1 ));
315
315
return 0 ;
316
316
}
317
317
@@ -329,11 +329,12 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
329
329
highest_perf = cppc_perf .highest_perf ;
330
330
331
331
WRITE_ONCE (cpudata -> highest_perf , highest_perf );
332
-
332
+ WRITE_ONCE ( cpudata -> max_limit_perf , highest_perf );
333
333
WRITE_ONCE (cpudata -> nominal_perf , cppc_perf .nominal_perf );
334
334
WRITE_ONCE (cpudata -> lowest_nonlinear_perf ,
335
335
cppc_perf .lowest_nonlinear_perf );
336
336
WRITE_ONCE (cpudata -> lowest_perf , cppc_perf .lowest_perf );
337
+ WRITE_ONCE (cpudata -> min_limit_perf , cppc_perf .lowest_perf );
337
338
338
339
if (cppc_state == AMD_PSTATE_ACTIVE )
339
340
return 0 ;
@@ -432,6 +433,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
432
433
u64 prev = READ_ONCE (cpudata -> cppc_req_cached );
433
434
u64 value = prev ;
434
435
436
+ min_perf = clamp_t (unsigned long , min_perf , cpudata -> min_limit_perf ,
437
+ cpudata -> max_limit_perf );
438
+ max_perf = clamp_t (unsigned long , max_perf , cpudata -> min_limit_perf ,
439
+ cpudata -> max_limit_perf );
435
440
des_perf = clamp_t (unsigned long , des_perf , min_perf , max_perf );
436
441
437
442
if ((cppc_state == AMD_PSTATE_GUIDED ) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING )) {
@@ -470,6 +475,22 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy)
470
475
return 0 ;
471
476
}
472
477
478
+ static int amd_pstate_update_min_max_limit (struct cpufreq_policy * policy )
479
+ {
480
+ u32 max_limit_perf , min_limit_perf ;
481
+ struct amd_cpudata * cpudata = policy -> driver_data ;
482
+
483
+ max_limit_perf = div_u64 (policy -> max * cpudata -> highest_perf , cpudata -> max_freq );
484
+ min_limit_perf = div_u64 (policy -> min * cpudata -> highest_perf , cpudata -> max_freq );
485
+
486
+ WRITE_ONCE (cpudata -> max_limit_perf , max_limit_perf );
487
+ WRITE_ONCE (cpudata -> min_limit_perf , min_limit_perf );
488
+ WRITE_ONCE (cpudata -> max_limit_freq , policy -> max );
489
+ WRITE_ONCE (cpudata -> min_limit_freq , policy -> min );
490
+
491
+ return 0 ;
492
+ }
493
+
473
494
static int amd_pstate_update_freq (struct cpufreq_policy * policy ,
474
495
unsigned int target_freq , bool fast_switch )
475
496
{
@@ -480,6 +501,9 @@ static int amd_pstate_update_freq(struct cpufreq_policy *policy,
480
501
if (!cpudata -> max_freq )
481
502
return - ENODEV ;
482
503
504
+ if (policy -> min != cpudata -> min_limit_freq || policy -> max != cpudata -> max_limit_freq )
505
+ amd_pstate_update_min_max_limit (policy );
506
+
483
507
cap_perf = READ_ONCE (cpudata -> highest_perf );
484
508
min_perf = READ_ONCE (cpudata -> lowest_perf );
485
509
max_perf = cap_perf ;
@@ -534,6 +558,10 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
534
558
struct amd_cpudata * cpudata = policy -> driver_data ;
535
559
unsigned int target_freq ;
536
560
561
+ if (policy -> min != cpudata -> min_limit_freq || policy -> max != cpudata -> max_limit_freq )
562
+ amd_pstate_update_min_max_limit (policy );
563
+
564
+
537
565
cap_perf = READ_ONCE (cpudata -> highest_perf );
538
566
lowest_nonlinear_perf = READ_ONCE (cpudata -> lowest_nonlinear_perf );
539
567
max_freq = READ_ONCE (cpudata -> max_freq );
@@ -747,6 +775,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
747
775
/* Initial processor data capability frequencies */
748
776
cpudata -> max_freq = max_freq ;
749
777
cpudata -> min_freq = min_freq ;
778
+ cpudata -> max_limit_freq = max_freq ;
779
+ cpudata -> min_limit_freq = min_freq ;
750
780
cpudata -> nominal_freq = nominal_freq ;
751
781
cpudata -> lowest_nonlinear_freq = lowest_nonlinear_freq ;
752
782
@@ -1185,16 +1215,25 @@ static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
1185
1215
return 0 ;
1186
1216
}
1187
1217
1188
- static void amd_pstate_epp_init ( unsigned int cpu )
1218
+ static void amd_pstate_epp_update_limit ( struct cpufreq_policy * policy )
1189
1219
{
1190
- struct cpufreq_policy * policy = cpufreq_cpu_get (cpu );
1191
1220
struct amd_cpudata * cpudata = policy -> driver_data ;
1192
- u32 max_perf , min_perf ;
1221
+ u32 max_perf , min_perf , min_limit_perf , max_limit_perf ;
1193
1222
u64 value ;
1194
1223
s16 epp ;
1195
1224
1196
1225
max_perf = READ_ONCE (cpudata -> highest_perf );
1197
1226
min_perf = READ_ONCE (cpudata -> lowest_perf );
1227
+ max_limit_perf = div_u64 (policy -> max * cpudata -> highest_perf , cpudata -> max_freq );
1228
+ min_limit_perf = div_u64 (policy -> min * cpudata -> highest_perf , cpudata -> max_freq );
1229
+
1230
+ max_perf = clamp_t (unsigned long , max_perf , cpudata -> min_limit_perf ,
1231
+ cpudata -> max_limit_perf );
1232
+ min_perf = clamp_t (unsigned long , min_perf , cpudata -> min_limit_perf ,
1233
+ cpudata -> max_limit_perf );
1234
+
1235
+ WRITE_ONCE (cpudata -> max_limit_perf , max_limit_perf );
1236
+ WRITE_ONCE (cpudata -> min_limit_perf , min_limit_perf );
1198
1237
1199
1238
value = READ_ONCE (cpudata -> cppc_req_cached );
1200
1239
@@ -1212,9 +1251,6 @@ static void amd_pstate_epp_init(unsigned int cpu)
1212
1251
value &= ~AMD_CPPC_DES_PERF (~0L );
1213
1252
value |= AMD_CPPC_DES_PERF (0 );
1214
1253
1215
- if (cpudata -> epp_policy == cpudata -> policy )
1216
- goto skip_epp ;
1217
-
1218
1254
cpudata -> epp_policy = cpudata -> policy ;
1219
1255
1220
1256
/* Get BIOS pre-defined epp value */
@@ -1224,7 +1260,7 @@ static void amd_pstate_epp_init(unsigned int cpu)
1224
1260
* This return value can only be negative for shared_memory
1225
1261
* systems where EPP register read/write not supported.
1226
1262
*/
1227
- goto skip_epp ;
1263
+ return ;
1228
1264
}
1229
1265
1230
1266
if (cpudata -> policy == CPUFREQ_POLICY_PERFORMANCE )
@@ -1238,8 +1274,6 @@ static void amd_pstate_epp_init(unsigned int cpu)
1238
1274
1239
1275
WRITE_ONCE (cpudata -> cppc_req_cached , value );
1240
1276
amd_pstate_set_epp (cpudata , epp );
1241
- skip_epp :
1242
- cpufreq_cpu_put (policy );
1243
1277
}
1244
1278
1245
1279
static int amd_pstate_epp_set_policy (struct cpufreq_policy * policy )
@@ -1254,7 +1288,7 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
1254
1288
1255
1289
cpudata -> policy = policy -> policy ;
1256
1290
1257
- amd_pstate_epp_init (policy -> cpu );
1291
+ amd_pstate_epp_update_limit (policy );
1258
1292
1259
1293
return 0 ;
1260
1294
}
0 commit comments