@@ -222,25 +222,47 @@ static s16 shmem_get_epp(struct amd_cpudata *cpudata)
222
222
}
223
223
224
224
static int msr_update_perf (struct amd_cpudata * cpudata , u32 min_perf ,
225
- u32 des_perf , u32 max_perf , bool fast_switch )
225
+ u32 des_perf , u32 max_perf , u32 epp , bool fast_switch )
226
226
{
227
+ u64 value , prev ;
228
+
229
+ value = prev = READ_ONCE (cpudata -> cppc_req_cached );
230
+
231
+ value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
232
+ AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK );
233
+ value |= FIELD_PREP (AMD_CPPC_MAX_PERF_MASK , max_perf );
234
+ value |= FIELD_PREP (AMD_CPPC_DES_PERF_MASK , des_perf );
235
+ value |= FIELD_PREP (AMD_CPPC_MIN_PERF_MASK , min_perf );
236
+ value |= FIELD_PREP (AMD_CPPC_EPP_PERF_MASK , epp );
237
+
238
+ if (value == prev )
239
+ return 0 ;
240
+
227
241
if (fast_switch ) {
228
- wrmsrl (MSR_AMD_CPPC_REQ , READ_ONCE ( cpudata -> cppc_req_cached ) );
242
+ wrmsrl (MSR_AMD_CPPC_REQ , value );
229
243
return 0 ;
244
+ } else {
245
+ int ret = wrmsrl_on_cpu (cpudata -> cpu , MSR_AMD_CPPC_REQ , value );
246
+
247
+ if (ret )
248
+ return ret ;
230
249
}
231
250
232
- return wrmsrl_on_cpu (cpudata -> cpu , MSR_AMD_CPPC_REQ ,
233
- READ_ONCE (cpudata -> cppc_req_cached ));
251
+ WRITE_ONCE (cpudata -> cppc_req_cached , value );
252
+ WRITE_ONCE (cpudata -> epp_cached , epp );
253
+
254
+ return 0 ;
234
255
}
235
256
236
257
DEFINE_STATIC_CALL (amd_pstate_update_perf , msr_update_perf );
237
258
238
259
static inline int amd_pstate_update_perf (struct amd_cpudata * cpudata ,
239
260
u32 min_perf , u32 des_perf ,
240
- u32 max_perf , bool fast_switch )
261
+ u32 max_perf , u32 epp ,
262
+ bool fast_switch )
241
263
{
242
264
return static_call (amd_pstate_update_perf )(cpudata , min_perf , des_perf ,
243
- max_perf , fast_switch );
265
+ max_perf , epp , fast_switch );
244
266
}
245
267
246
268
static int msr_set_epp (struct amd_cpudata * cpudata , u32 epp )
@@ -261,6 +283,7 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
261
283
return ret ;
262
284
}
263
285
286
+ /* update both so that msr_update_perf() can effectively check */
264
287
WRITE_ONCE (cpudata -> epp_cached , epp );
265
288
WRITE_ONCE (cpudata -> cppc_req_cached , value );
266
289
@@ -459,12 +482,18 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
459
482
return static_call (amd_pstate_init_perf )(cpudata );
460
483
}
461
484
462
- static int shmem_update_perf (struct amd_cpudata * cpudata ,
463
- u32 min_perf , u32 des_perf ,
464
- u32 max_perf , bool fast_switch )
485
+ static int shmem_update_perf (struct amd_cpudata * cpudata , u32 min_perf ,
486
+ u32 des_perf , u32 max_perf , u32 epp , bool fast_switch )
465
487
{
466
488
struct cppc_perf_ctrls perf_ctrls ;
467
489
490
+ if (cppc_state == AMD_PSTATE_ACTIVE ) {
491
+ int ret = shmem_set_epp (cpudata , epp );
492
+
493
+ if (ret )
494
+ return ret ;
495
+ }
496
+
468
497
perf_ctrls .max_perf = max_perf ;
469
498
perf_ctrls .min_perf = min_perf ;
470
499
perf_ctrls .desired_perf = des_perf ;
@@ -510,9 +539,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
510
539
{
511
540
unsigned long max_freq ;
512
541
struct cpufreq_policy * policy = cpufreq_cpu_get (cpudata -> cpu );
513
- u64 prev = READ_ONCE (cpudata -> cppc_req_cached );
514
542
u32 nominal_perf = READ_ONCE (cpudata -> nominal_perf );
515
- u64 value = prev ;
516
543
517
544
des_perf = clamp_t (unsigned long , des_perf , min_perf , max_perf );
518
545
@@ -528,27 +555,14 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
528
555
if (!cpudata -> boost_supported )
529
556
max_perf = min_t (unsigned long , nominal_perf , max_perf );
530
557
531
- value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
532
- AMD_CPPC_DES_PERF_MASK );
533
- value |= FIELD_PREP (AMD_CPPC_MAX_PERF_MASK , max_perf );
534
- value |= FIELD_PREP (AMD_CPPC_DES_PERF_MASK , des_perf );
535
- value |= FIELD_PREP (AMD_CPPC_MIN_PERF_MASK , min_perf );
536
-
537
558
if (trace_amd_pstate_perf_enabled () && amd_pstate_sample (cpudata )) {
538
559
trace_amd_pstate_perf (min_perf , des_perf , max_perf , cpudata -> freq ,
539
560
cpudata -> cur .mperf , cpudata -> cur .aperf , cpudata -> cur .tsc ,
540
- cpudata -> cpu , ( value != prev ), fast_switch );
561
+ cpudata -> cpu , fast_switch );
541
562
}
542
563
543
- if (value == prev )
544
- goto cpufreq_policy_put ;
564
+ amd_pstate_update_perf (cpudata , min_perf , des_perf , max_perf , 0 , fast_switch );
545
565
546
- WRITE_ONCE (cpudata -> cppc_req_cached , value );
547
-
548
- amd_pstate_update_perf (cpudata , min_perf , des_perf ,
549
- max_perf , fast_switch );
550
-
551
- cpufreq_policy_put :
552
566
cpufreq_cpu_put (policy );
553
567
}
554
568
@@ -1544,36 +1558,24 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
1544
1558
static int amd_pstate_epp_update_limit (struct cpufreq_policy * policy )
1545
1559
{
1546
1560
struct amd_cpudata * cpudata = policy -> driver_data ;
1547
- u64 value ;
1561
+ u32 epp ;
1548
1562
1549
1563
amd_pstate_update_min_max_limit (policy );
1550
1564
1551
- value = READ_ONCE (cpudata -> cppc_req_cached );
1552
-
1553
- value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
1554
- AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK );
1555
- value |= FIELD_PREP (AMD_CPPC_MAX_PERF_MASK , cpudata -> max_limit_perf );
1556
- value |= FIELD_PREP (AMD_CPPC_DES_PERF_MASK , 0 );
1557
- value |= FIELD_PREP (AMD_CPPC_MIN_PERF_MASK , cpudata -> min_limit_perf );
1558
-
1559
1565
if (cpudata -> policy == CPUFREQ_POLICY_PERFORMANCE )
1560
- WRITE_ONCE (cpudata -> epp_cached , 0 );
1561
- value |= FIELD_PREP (AMD_CPPC_EPP_PERF_MASK , cpudata -> epp_cached );
1562
-
1563
- WRITE_ONCE (cpudata -> cppc_req_cached , value );
1566
+ epp = 0 ;
1567
+ else
1568
+ epp = READ_ONCE (cpudata -> epp_cached );
1564
1569
1565
1570
if (trace_amd_pstate_epp_perf_enabled ()) {
1566
- trace_amd_pstate_epp_perf (cpudata -> cpu , cpudata -> highest_perf ,
1567
- cpudata -> epp_cached ,
1571
+ trace_amd_pstate_epp_perf (cpudata -> cpu , cpudata -> highest_perf , epp ,
1568
1572
cpudata -> min_limit_perf ,
1569
1573
cpudata -> max_limit_perf ,
1570
1574
policy -> boost_enabled );
1571
1575
}
1572
1576
1573
- amd_pstate_update_perf (cpudata , cpudata -> min_limit_perf , 0U ,
1574
- cpudata -> max_limit_perf , false);
1575
-
1576
- return amd_pstate_set_epp (cpudata , READ_ONCE (cpudata -> epp_cached ));
1577
+ return amd_pstate_update_perf (cpudata , cpudata -> min_limit_perf , 0U ,
1578
+ cpudata -> max_limit_perf , epp , false);
1577
1579
}
1578
1580
1579
1581
static int amd_pstate_epp_set_policy (struct cpufreq_policy * policy )
@@ -1602,7 +1604,7 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
1602
1604
return 0 ;
1603
1605
}
1604
1606
1605
- static void amd_pstate_epp_reenable (struct amd_cpudata * cpudata )
1607
+ static int amd_pstate_epp_reenable (struct amd_cpudata * cpudata )
1606
1608
{
1607
1609
u64 max_perf ;
1608
1610
int ret ;
@@ -1620,17 +1622,19 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
1620
1622
max_perf , cpudata -> boost_state );
1621
1623
}
1622
1624
1623
- amd_pstate_update_perf (cpudata , 0 , 0 , max_perf , false);
1624
- amd_pstate_set_epp (cpudata , cpudata -> epp_cached );
1625
+ return amd_pstate_update_perf (cpudata , 0 , 0 , max_perf , cpudata -> epp_cached , false);
1625
1626
}
1626
1627
1627
1628
static int amd_pstate_epp_cpu_online (struct cpufreq_policy * policy )
1628
1629
{
1629
1630
struct amd_cpudata * cpudata = policy -> driver_data ;
1631
+ int ret ;
1630
1632
1631
1633
pr_debug ("AMD CPU Core %d going online\n" , cpudata -> cpu );
1632
1634
1633
- amd_pstate_epp_reenable (cpudata );
1635
+ ret = amd_pstate_epp_reenable (cpudata );
1636
+ if (ret )
1637
+ return ret ;
1634
1638
cpudata -> suspended = false;
1635
1639
1636
1640
return 0 ;
@@ -1654,10 +1658,8 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
1654
1658
min_perf , min_perf , policy -> boost_enabled );
1655
1659
}
1656
1660
1657
- amd_pstate_update_perf (cpudata , min_perf , 0 , min_perf , false);
1658
- amd_pstate_set_epp (cpudata , AMD_CPPC_EPP_BALANCE_POWERSAVE );
1659
-
1660
- return 0 ;
1661
+ return amd_pstate_update_perf (cpudata , min_perf , 0 , min_perf ,
1662
+ AMD_CPPC_EPP_BALANCE_POWERSAVE , false);
1661
1663
}
1662
1664
1663
1665
static int amd_pstate_epp_suspend (struct cpufreq_policy * policy )
0 commit comments