Skip to content

Commit b3781f3

Browse files
committed
cpufreq/amd-pstate: Cache EPP value and use that everywhere
Cache the value in cpudata->epp_cached, and use that for all callers. As all callers use cached value merge amd_pstate_get_energy_pref_index() into show_energy_performance_preference(). Check if the EPP value is changed before writing it to MSR or shared memory region. Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com> Link: https://lore.kernel.org/r/20241209185248.16301-12-mario.limonciello@amd.com Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
1 parent 3f7b835 commit b3781f3

File tree

1 file changed

+43
-62
lines changed

1 file changed

+43
-62
lines changed

drivers/cpufreq/amd-pstate.c

Lines changed: 43 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -186,29 +186,28 @@ static inline int get_mode_idx_from_str(const char *str, size_t size)
186186
static DEFINE_MUTEX(amd_pstate_limits_lock);
187187
static DEFINE_MUTEX(amd_pstate_driver_lock);
188188

189-
static s16 msr_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
189+
static s16 msr_get_epp(struct amd_cpudata *cpudata)
190190
{
191+
u64 value;
191192
int ret;
192193

193-
if (!cppc_req_cached) {
194-
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req_cached);
195-
if (ret < 0) {
196-
pr_debug("Could not retrieve energy perf value (%d)\n", ret);
197-
return ret;
198-
}
194+
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
195+
if (ret < 0) {
196+
pr_debug("Could not retrieve energy perf value (%d)\n", ret);
197+
return ret;
199198
}
200199

201-
return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cppc_req_cached);
200+
return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, value);
202201
}
203202

204203
DEFINE_STATIC_CALL(amd_pstate_get_epp, msr_get_epp);
205204

206-
static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
205+
static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata)
207206
{
208-
return static_call(amd_pstate_get_epp)(cpudata, cppc_req_cached);
207+
return static_call(amd_pstate_get_epp)(cpudata);
209208
}
210209

211-
static s16 shmem_get_epp(struct amd_cpudata *cpudata, u64 dummy)
210+
static s16 shmem_get_epp(struct amd_cpudata *cpudata)
212211
{
213212
u64 epp;
214213
int ret;
@@ -222,35 +221,6 @@ static s16 shmem_get_epp(struct amd_cpudata *cpudata, u64 dummy)
222221
return (s16)(epp & 0xff);
223222
}
224223

225-
static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
226-
{
227-
s16 epp;
228-
int index = -EINVAL;
229-
230-
epp = amd_pstate_get_epp(cpudata, 0);
231-
if (epp < 0)
232-
return epp;
233-
234-
switch (epp) {
235-
case AMD_CPPC_EPP_PERFORMANCE:
236-
index = EPP_INDEX_PERFORMANCE;
237-
break;
238-
case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
239-
index = EPP_INDEX_BALANCE_PERFORMANCE;
240-
break;
241-
case AMD_CPPC_EPP_BALANCE_POWERSAVE:
242-
index = EPP_INDEX_BALANCE_POWERSAVE;
243-
break;
244-
case AMD_CPPC_EPP_POWERSAVE:
245-
index = EPP_INDEX_POWERSAVE;
246-
break;
247-
default:
248-
break;
249-
}
250-
251-
return index;
252-
}
253-
254224
static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
255225
u32 des_perf, u32 max_perf, bool fast_switch)
256226
{
@@ -275,19 +245,23 @@ static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
275245

276246
static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
277247
{
278-
u64 value = READ_ONCE(cpudata->cppc_req_cached);
248+
u64 value, prev;
279249
int ret;
280250

251+
value = prev = READ_ONCE(cpudata->cppc_req_cached);
281252
value &= ~AMD_CPPC_EPP_PERF_MASK;
282253
value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
283254

255+
if (value == prev)
256+
return 0;
257+
284258
ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
285259
if (ret) {
286260
pr_err("failed to set energy perf value (%d)\n", ret);
287261
return ret;
288262
}
289263

290-
cpudata->epp_cached = epp;
264+
WRITE_ONCE(cpudata->epp_cached, epp);
291265
WRITE_ONCE(cpudata->cppc_req_cached, value);
292266

293267
return ret;
@@ -305,13 +279,16 @@ static int shmem_set_epp(struct amd_cpudata *cpudata, u32 epp)
305279
int ret;
306280
struct cppc_perf_ctrls perf_ctrls;
307281

282+
if (epp == cpudata->epp_cached)
283+
return 0;
284+
308285
perf_ctrls.energy_perf = epp;
309286
ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
310287
if (ret) {
311288
pr_debug("failed to set energy perf value (%d)\n", ret);
312289
return ret;
313290
}
314-
cpudata->epp_cached = epp;
291+
WRITE_ONCE(cpudata->epp_cached, epp);
315292

316293
return ret;
317294
}
@@ -1214,9 +1191,22 @@ static ssize_t show_energy_performance_preference(
12141191
struct amd_cpudata *cpudata = policy->driver_data;
12151192
int preference;
12161193

1217-
preference = amd_pstate_get_energy_pref_index(cpudata);
1218-
if (preference < 0)
1219-
return preference;
1194+
switch (cpudata->epp_cached) {
1195+
case AMD_CPPC_EPP_PERFORMANCE:
1196+
preference = EPP_INDEX_PERFORMANCE;
1197+
break;
1198+
case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
1199+
preference = EPP_INDEX_BALANCE_PERFORMANCE;
1200+
break;
1201+
case AMD_CPPC_EPP_BALANCE_POWERSAVE:
1202+
preference = EPP_INDEX_BALANCE_POWERSAVE;
1203+
break;
1204+
case AMD_CPPC_EPP_POWERSAVE:
1205+
preference = EPP_INDEX_POWERSAVE;
1206+
break;
1207+
default:
1208+
return -EINVAL;
1209+
}
12201210

12211211
return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);
12221212
}
@@ -1501,7 +1491,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
15011491

15021492
policy->driver_data = cpudata;
15031493

1504-
cpudata->epp_cached = cpudata->epp_default = amd_pstate_get_epp(cpudata, 0);
1494+
cpudata->epp_cached = cpudata->epp_default = amd_pstate_get_epp(cpudata);
15051495

15061496
policy->min = policy->cpuinfo.min_freq;
15071497
policy->max = policy->cpuinfo.max_freq;
@@ -1555,35 +1545,26 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
15551545
{
15561546
struct amd_cpudata *cpudata = policy->driver_data;
15571547
u64 value;
1558-
s16 epp;
15591548

15601549
amd_pstate_update_min_max_limit(policy);
15611550

15621551
value = READ_ONCE(cpudata->cppc_req_cached);
15631552

15641553
value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
1565-
AMD_CPPC_DES_PERF_MASK);
1554+
AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK);
15661555
value |= FIELD_PREP(AMD_CPPC_MAX_PERF_MASK, cpudata->max_limit_perf);
15671556
value |= FIELD_PREP(AMD_CPPC_DES_PERF_MASK, 0);
15681557
value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, cpudata->min_limit_perf);
15691558

1570-
/* Get BIOS pre-defined epp value */
1571-
epp = amd_pstate_get_epp(cpudata, value);
1572-
if (epp < 0) {
1573-
/**
1574-
* This return value can only be negative for shared_memory
1575-
* systems where EPP register read/write not supported.
1576-
*/
1577-
return epp;
1578-
}
1579-
15801559
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
1581-
epp = 0;
1560+
WRITE_ONCE(cpudata->epp_cached, 0);
1561+
value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, cpudata->epp_cached);
15821562

15831563
WRITE_ONCE(cpudata->cppc_req_cached, value);
15841564

15851565
if (trace_amd_pstate_epp_perf_enabled()) {
1586-
trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, epp,
1566+
trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
1567+
cpudata->epp_cached,
15871568
cpudata->min_limit_perf,
15881569
cpudata->max_limit_perf,
15891570
policy->boost_enabled);
@@ -1592,7 +1573,7 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
15921573
amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
15931574
cpudata->max_limit_perf, false);
15941575

1595-
return amd_pstate_set_epp(cpudata, epp);
1576+
return amd_pstate_set_epp(cpudata, READ_ONCE(cpudata->epp_cached));
15961577
}
15971578

15981579
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)

0 commit comments

Comments
 (0)