22
22
23
23
#define pr_fmt (fmt ) KBUILD_MODNAME ": " fmt
24
24
25
+ #include <linux/bitfield.h>
25
26
#include <linux/kernel.h>
26
27
#include <linux/module.h>
27
28
#include <linux/init.h>
@@ -88,6 +89,11 @@ static bool cppc_enabled;
88
89
static bool amd_pstate_prefcore = true;
89
90
static struct quirk_entry * quirks ;
90
91
92
+ #define AMD_CPPC_MAX_PERF_MASK GENMASK(7, 0)
93
+ #define AMD_CPPC_MIN_PERF_MASK GENMASK(15, 8)
94
+ #define AMD_CPPC_DES_PERF_MASK GENMASK(23, 16)
95
+ #define AMD_CPPC_EPP_PERF_MASK GENMASK(31, 24)
96
+
91
97
/*
92
98
* AMD Energy Preference Performance (EPP)
93
99
* The EPP is used in the CCLK DPM controller to drive
@@ -182,7 +188,6 @@ static DEFINE_MUTEX(amd_pstate_driver_lock);
182
188
183
189
static s16 msr_get_epp (struct amd_cpudata * cpudata , u64 cppc_req_cached )
184
190
{
185
- u64 epp ;
186
191
int ret ;
187
192
188
193
if (!cppc_req_cached ) {
@@ -192,9 +197,8 @@ static s16 msr_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
192
197
return ret ;
193
198
}
194
199
}
195
- epp = (cppc_req_cached >> 24 ) & 0xFF ;
196
200
197
- return ( s16 ) epp ;
201
+ return FIELD_GET ( AMD_CPPC_EPP_PERF_MASK , cppc_req_cached ) ;
198
202
}
199
203
200
204
DEFINE_STATIC_CALL (amd_pstate_get_epp , msr_get_epp );
@@ -269,12 +273,11 @@ static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
269
273
270
274
static int msr_set_epp (struct amd_cpudata * cpudata , u32 epp )
271
275
{
272
- int ret ;
273
-
274
276
u64 value = READ_ONCE (cpudata -> cppc_req_cached );
277
+ int ret ;
275
278
276
- value &= ~GENMASK_ULL ( 31 , 24 ) ;
277
- value |= ( u64 ) epp << 24 ;
279
+ value &= ~AMD_CPPC_EPP_PERF_MASK ;
280
+ value |= FIELD_PREP ( AMD_CPPC_EPP_PERF_MASK , epp ) ;
278
281
WRITE_ONCE (cpudata -> cppc_req_cached , value );
279
282
280
283
ret = wrmsrl_on_cpu (cpudata -> cpu , MSR_AMD_CPPC_REQ , value );
@@ -327,8 +330,8 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
327
330
if (trace_amd_pstate_epp_perf_enabled ()) {
328
331
trace_amd_pstate_epp_perf (cpudata -> cpu , cpudata -> highest_perf ,
329
332
epp ,
330
- AMD_CPPC_MIN_PERF ( cpudata -> cppc_req_cached ),
331
- AMD_CPPC_MAX_PERF ( cpudata -> cppc_req_cached ),
333
+ FIELD_GET ( AMD_CPPC_MIN_PERF_MASK , cpudata -> cppc_req_cached ),
334
+ FIELD_GET ( AMD_CPPC_MAX_PERF_MASK , cpudata -> cppc_req_cached ),
332
335
cpudata -> boost_state );
333
336
}
334
337
@@ -542,18 +545,15 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
542
545
des_perf = 0 ;
543
546
}
544
547
545
- value &= ~AMD_CPPC_MIN_PERF (~0L );
546
- value |= AMD_CPPC_MIN_PERF (min_perf );
547
-
548
- value &= ~AMD_CPPC_DES_PERF (~0L );
549
- value |= AMD_CPPC_DES_PERF (des_perf );
550
-
551
548
/* limit the max perf when core performance boost feature is disabled */
552
549
if (!cpudata -> boost_supported )
553
550
max_perf = min_t (unsigned long , nominal_perf , max_perf );
554
551
555
- value &= ~AMD_CPPC_MAX_PERF (~0L );
556
- value |= AMD_CPPC_MAX_PERF (max_perf );
552
+ value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
553
+ AMD_CPPC_DES_PERF_MASK );
554
+ value |= FIELD_PREP (AMD_CPPC_MAX_PERF_MASK , max_perf );
555
+ value |= FIELD_PREP (AMD_CPPC_DES_PERF_MASK , des_perf );
556
+ value |= FIELD_PREP (AMD_CPPC_MIN_PERF_MASK , min_perf );
557
557
558
558
if (trace_amd_pstate_perf_enabled () && amd_pstate_sample (cpudata )) {
559
559
trace_amd_pstate_perf (min_perf , des_perf , max_perf , cpudata -> freq ,
@@ -1573,16 +1573,11 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
1573
1573
if (cpudata -> policy == CPUFREQ_POLICY_PERFORMANCE )
1574
1574
min_perf = min (cpudata -> nominal_perf , max_perf );
1575
1575
1576
- /* Initial min/max values for CPPC Performance Controls Register */
1577
- value &= ~AMD_CPPC_MIN_PERF (~0L );
1578
- value |= AMD_CPPC_MIN_PERF (min_perf );
1579
-
1580
- value &= ~AMD_CPPC_MAX_PERF (~0L );
1581
- value |= AMD_CPPC_MAX_PERF (max_perf );
1582
-
1583
- /* CPPC EPP feature require to set zero to the desire perf bit */
1584
- value &= ~AMD_CPPC_DES_PERF (~0L );
1585
- value |= AMD_CPPC_DES_PERF (0 );
1576
+ value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
1577
+ AMD_CPPC_DES_PERF_MASK );
1578
+ value |= FIELD_PREP (AMD_CPPC_MAX_PERF_MASK , max_perf );
1579
+ value |= FIELD_PREP (AMD_CPPC_DES_PERF_MASK , 0 );
1580
+ value |= FIELD_PREP (AMD_CPPC_MIN_PERF_MASK , min_perf );
1586
1581
1587
1582
/* Get BIOS pre-defined epp value */
1588
1583
epp = amd_pstate_get_epp (cpudata , value );
@@ -1652,7 +1647,7 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
1652
1647
if (trace_amd_pstate_epp_perf_enabled ()) {
1653
1648
trace_amd_pstate_epp_perf (cpudata -> cpu , cpudata -> highest_perf ,
1654
1649
cpudata -> epp_cached ,
1655
- AMD_CPPC_MIN_PERF ( cpudata -> cppc_req_cached ),
1650
+ FIELD_GET ( AMD_CPPC_MIN_PERF_MASK , cpudata -> cppc_req_cached ),
1656
1651
max_perf , cpudata -> boost_state );
1657
1652
}
1658
1653
0 commit comments