@@ -514,6 +514,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
514
514
unsigned long max_freq ;
515
515
struct cpufreq_policy * policy = cpufreq_cpu_get (cpudata -> cpu );
516
516
u64 prev = READ_ONCE (cpudata -> cppc_req_cached );
517
+ u32 nominal_perf = READ_ONCE (cpudata -> nominal_perf );
517
518
u64 value = prev ;
518
519
519
520
min_perf = clamp_t (unsigned long , min_perf , cpudata -> min_limit_perf ,
@@ -536,6 +537,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
536
537
value &= ~AMD_CPPC_DES_PERF (~0L );
537
538
value |= AMD_CPPC_DES_PERF (des_perf );
538
539
540
+ /* limit the max perf when core performance boost feature is disabled */
541
+ if (!cpudata -> boost_supported )
542
+ max_perf = min_t (unsigned long , nominal_perf , max_perf );
543
+
539
544
value &= ~AMD_CPPC_MAX_PERF (~0L );
540
545
value |= AMD_CPPC_MAX_PERF (max_perf );
541
546
@@ -679,43 +684,105 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
679
684
cpufreq_cpu_put (policy );
680
685
}
681
686
682
- static int amd_pstate_set_boost (struct cpufreq_policy * policy , int state )
687
+ static int amd_pstate_cpu_boost_update (struct cpufreq_policy * policy , bool on )
683
688
{
684
689
struct amd_cpudata * cpudata = policy -> driver_data ;
690
+ struct cppc_perf_ctrls perf_ctrls ;
691
+ u32 highest_perf , nominal_perf , nominal_freq , max_freq ;
685
692
int ret ;
686
693
687
- if (!cpudata -> boost_supported ) {
688
- pr_err ("Boost mode is not supported by this processor or SBIOS\n" );
689
- return - EINVAL ;
694
+ highest_perf = READ_ONCE (cpudata -> highest_perf );
695
+ nominal_perf = READ_ONCE (cpudata -> nominal_perf );
696
+ nominal_freq = READ_ONCE (cpudata -> nominal_freq );
697
+ max_freq = READ_ONCE (cpudata -> max_freq );
698
+
699
+ if (boot_cpu_has (X86_FEATURE_CPPC )) {
700
+ u64 value = READ_ONCE (cpudata -> cppc_req_cached );
701
+
702
+ value &= ~GENMASK_ULL (7 , 0 );
703
+ value |= on ? highest_perf : nominal_perf ;
704
+ WRITE_ONCE (cpudata -> cppc_req_cached , value );
705
+
706
+ wrmsrl_on_cpu (cpudata -> cpu , MSR_AMD_CPPC_REQ , value );
707
+ } else {
708
+ perf_ctrls .max_perf = on ? highest_perf : nominal_perf ;
709
+ ret = cppc_set_perf (cpudata -> cpu , & perf_ctrls );
710
+ if (ret ) {
711
+ cpufreq_cpu_release (policy );
712
+ pr_debug ("Failed to set max perf on CPU:%d. ret:%d\n" ,
713
+ cpudata -> cpu , ret );
714
+ return ret ;
715
+ }
690
716
}
691
717
692
- if (state )
693
- policy -> cpuinfo .max_freq = cpudata -> max_freq ;
694
- else
695
- policy -> cpuinfo .max_freq = cpudata -> nominal_freq * 1000 ;
718
+ if (on )
719
+ policy -> cpuinfo .max_freq = max_freq ;
720
+ else if ( policy -> cpuinfo . max_freq > nominal_freq * 1000 )
721
+ policy -> cpuinfo .max_freq = nominal_freq * 1000 ;
696
722
697
723
policy -> max = policy -> cpuinfo .max_freq ;
698
724
699
- ret = freq_qos_update_request (& cpudata -> req [1 ],
700
- policy -> cpuinfo .max_freq );
701
- if (ret < 0 )
702
- return ret ;
725
+ if (cppc_state == AMD_PSTATE_PASSIVE ) {
726
+ ret = freq_qos_update_request (& cpudata -> req [1 ], policy -> cpuinfo .max_freq );
727
+ if (ret < 0 )
728
+ pr_debug ("Failed to update freq constraint: CPU%d\n" , cpudata -> cpu );
729
+ }
703
730
704
- return 0 ;
731
+ return ret < 0 ? ret : 0 ;
705
732
}
706
733
707
- static void amd_pstate_boost_init (struct amd_cpudata * cpudata )
734
+ static int amd_pstate_set_boost (struct cpufreq_policy * policy , int state )
708
735
{
709
- u32 highest_perf , nominal_perf ;
736
+ struct amd_cpudata * cpudata = policy -> driver_data ;
737
+ int ret ;
710
738
711
- highest_perf = READ_ONCE (cpudata -> highest_perf );
712
- nominal_perf = READ_ONCE (cpudata -> nominal_perf );
739
+ if (!cpudata -> boost_supported ) {
740
+ pr_err ("Boost mode is not supported by this processor or SBIOS\n" );
741
+ return - EOPNOTSUPP ;
742
+ }
743
+ mutex_lock (& amd_pstate_driver_lock );
744
+ ret = amd_pstate_cpu_boost_update (policy , state );
745
+ WRITE_ONCE (cpudata -> boost_state , !ret ? state : false);
746
+ policy -> boost_enabled = !ret ? state : false;
747
+ refresh_frequency_limits (policy );
748
+ mutex_unlock (& amd_pstate_driver_lock );
713
749
714
- if ( highest_perf <= nominal_perf )
715
- return ;
750
+ return ret ;
751
+ }
716
752
717
- cpudata -> boost_supported = true;
753
+ static int amd_pstate_init_boost_support (struct amd_cpudata * cpudata )
754
+ {
755
+ u64 boost_val ;
756
+ int ret = -1 ;
757
+
758
+ /*
759
+ * If platform has no CPB support or disable it, initialize current driver
760
+ * boost_enabled state to be false, it is not an error for cpufreq core to handle.
761
+ */
762
+ if (!cpu_feature_enabled (X86_FEATURE_CPB )) {
763
+ pr_debug_once ("Boost CPB capabilities not present in the processor\n" );
764
+ ret = 0 ;
765
+ goto exit_err ;
766
+ }
767
+
768
+ /* at least one CPU supports CPB, even if others fail later on to set up */
718
769
current_pstate_driver -> boost_enabled = true;
770
+
771
+ ret = rdmsrl_on_cpu (cpudata -> cpu , MSR_K7_HWCR , & boost_val );
772
+ if (ret ) {
773
+ pr_err_once ("failed to read initial CPU boost state!\n" );
774
+ ret = - EIO ;
775
+ goto exit_err ;
776
+ }
777
+
778
+ if (!(boost_val & MSR_K7_HWCR_CPB_DIS ))
779
+ cpudata -> boost_supported = true;
780
+
781
+ return 0 ;
782
+
783
+ exit_err :
784
+ cpudata -> boost_supported = false;
785
+ return ret ;
719
786
}
720
787
721
788
static void amd_perf_ctl_reset (unsigned int cpu )
@@ -968,6 +1035,10 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
968
1035
if (ret )
969
1036
goto free_cpudata1 ;
970
1037
1038
+ ret = amd_pstate_init_boost_support (cpudata );
1039
+ if (ret )
1040
+ goto free_cpudata1 ;
1041
+
971
1042
min_freq = READ_ONCE (cpudata -> min_freq );
972
1043
max_freq = READ_ONCE (cpudata -> max_freq );
973
1044
@@ -980,6 +1051,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
980
1051
policy -> cpuinfo .min_freq = min_freq ;
981
1052
policy -> cpuinfo .max_freq = max_freq ;
982
1053
1054
+ policy -> boost_enabled = READ_ONCE (cpudata -> boost_supported );
1055
+
983
1056
/* It will be updated by governor */
984
1057
policy -> cur = policy -> cpuinfo .min_freq ;
985
1058
@@ -1005,7 +1078,6 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
1005
1078
1006
1079
policy -> driver_data = cpudata ;
1007
1080
1008
- amd_pstate_boost_init (cpudata );
1009
1081
if (!current_pstate_driver -> adjust_perf )
1010
1082
current_pstate_driver -> adjust_perf = amd_pstate_adjust_perf ;
1011
1083
@@ -1420,6 +1492,10 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
1420
1492
if (ret )
1421
1493
goto free_cpudata1 ;
1422
1494
1495
+ ret = amd_pstate_init_boost_support (cpudata );
1496
+ if (ret )
1497
+ goto free_cpudata1 ;
1498
+
1423
1499
min_freq = READ_ONCE (cpudata -> min_freq );
1424
1500
max_freq = READ_ONCE (cpudata -> max_freq );
1425
1501
@@ -1435,6 +1511,8 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
1435
1511
policy -> min = policy -> cpuinfo .min_freq ;
1436
1512
policy -> max = policy -> cpuinfo .max_freq ;
1437
1513
1514
+ policy -> boost_enabled = READ_ONCE (cpudata -> boost_supported );
1515
+
1438
1516
/*
1439
1517
* Set the policy to provide a valid fallback value in case
1440
1518
* the default cpufreq governor is neither powersave nor performance.
@@ -1456,7 +1534,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
1456
1534
return ret ;
1457
1535
WRITE_ONCE (cpudata -> cppc_cap1_cached , value );
1458
1536
}
1459
- amd_pstate_boost_init (cpudata );
1460
1537
1461
1538
return 0 ;
1462
1539
@@ -1718,6 +1795,7 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
1718
1795
.suspend = amd_pstate_epp_suspend ,
1719
1796
.resume = amd_pstate_epp_resume ,
1720
1797
.update_limits = amd_pstate_update_limits ,
1798
+ .set_boost = amd_pstate_set_boost ,
1721
1799
.name = "amd-pstate-epp" ,
1722
1800
.attr = amd_pstate_epp_attr ,
1723
1801
};
0 commit comments