Skip to content

Commit 0b055cf

Browse files
committed
Merge branches 'pm-cpuidle', 'pm-cpufreq' and 'pm-devfreq'
Merge cpuidle, cpufreq and devfreq updates for 6.8-rc1: - Add support for the Sierra Forest, Grand Ridge and Meteorlake SoCs to the intel_idle cpuidle driver (Artem Bityutskiy, Zhang Rui). - Do not enable interrupts when entering idle in the haltpoll cpuidle driver (Borislav Petkov). - Add Emerald Rapids support in no-HWP mode to the intel_pstate cpufreq driver (Zhenguo Yao). - Use EPP values programmed by the platform firmware as balance performance ones by default in intel_pstate (Srinivas Pandruvada). - Add a missing function return value check to the SCMI cpufreq driver to avoid unexpected behavior (Alexandra Diupina). - Fix parameter type warning in the armada-8k cpufreq driver (Gregory CLEMENT). - Rework trans_stat_show() in the devfreq core code to avoid buffer overflows (Christian Marangi). - Synchronize devfreq_monitor_[start/stop] so as to prevent a timer list corruption from occurring when devfreq governors are switched frequently (Mukesh Ojha). * pm-cpuidle: cpuidle: haltpoll: Do not enable interrupts when entering idle intel_idle: add Sierra Forest SoC support intel_idle: add Grand Ridge SoC support intel_idle: Add Meteorlake support * pm-cpufreq: cpufreq: intel_pstate: Add Emerald Rapids support in no-HWP mode cpufreq: armada-8k: Fix parameter type warning cpufreq: scmi: process the result of devm_of_clk_add_hw_provider() cpufreq: intel_pstate: Prioritize firmware-provided balance performance EPP * pm-devfreq: PM / devfreq: Synchronize devfreq_monitor_[start/stop] PM / devfreq: Convert to use sysfs_emit_at() API PM / devfreq: Fix buffer overflow in trans_stat_show
4 parents 4ee4ffc + c8f5cae + e956c88 + bfd7b2d commit 0b055cf

File tree

7 files changed

+195
-37
lines changed

7 files changed

+195
-37
lines changed

Documentation/ABI/testing/sysfs-class-devfreq

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,9 @@ Description:
5252

5353
echo 0 > /sys/class/devfreq/.../trans_stat
5454

55+
If the transition table is bigger than PAGE_SIZE, reading
56+
this will return an -EFBIG error.
57+
5558
What: /sys/class/devfreq/.../available_frequencies
5659
Date: October 2012
5760
Contact: Nishanth Menon <nm@ti.com>

drivers/cpufreq/armada-8k-cpufreq.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk,
5757
continue;
5858
}
5959

60-
clk = clk_get(cpu_dev, 0);
60+
clk = clk_get(cpu_dev, NULL);
6161
if (IS_ERR(clk)) {
6262
pr_warn("Cannot get clock for CPU %d\n", cpu);
6363
} else {
@@ -165,7 +165,7 @@ static int __init armada_8k_cpufreq_init(void)
165165
continue;
166166
}
167167

168-
clk = clk_get(cpu_dev, 0);
168+
clk = clk_get(cpu_dev, NULL);
169169

170170
if (IS_ERR(clk)) {
171171
pr_err("Cannot get clock for CPU %d\n", cpu);

drivers/cpufreq/intel_pstate.c

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1691,13 +1691,6 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
16911691
{
16921692
cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
16931693

1694-
/*
1695-
* If this CPU gen doesn't call for change in balance_perf
1696-
* EPP return.
1697-
*/
1698-
if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE)
1699-
return;
1700-
17011694
/*
17021695
* If the EPP is set by firmware, which means that firmware enabled HWP
17031696
* - Is equal or less than 0x80 (default balance_perf EPP)
@@ -1710,6 +1703,13 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
17101703
return;
17111704
}
17121705

1706+
/*
1707+
* If this CPU gen doesn't call for change in balance_perf
1708+
* EPP return.
1709+
*/
1710+
if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE)
1711+
return;
1712+
17131713
/*
17141714
* Use hard coded value per gen to update the balance_perf
17151715
* and default EPP.
@@ -2406,6 +2406,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
24062406
X86_MATCH(ICELAKE_X, core_funcs),
24072407
X86_MATCH(TIGERLAKE, core_funcs),
24082408
X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
2409+
X86_MATCH(EMERALDRAPIDS_X, core_funcs),
24092410
{}
24102411
};
24112412
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);

drivers/cpufreq/scmi-cpufreq.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -334,8 +334,11 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
334334

335335
#ifdef CONFIG_COMMON_CLK
336336
/* dummy clock provider as needed by OPP if clocks property is used */
337-
if (of_property_present(dev->of_node, "#clock-cells"))
338-
devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
337+
if (of_property_present(dev->of_node, "#clock-cells")) {
338+
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
339+
if (ret)
340+
return dev_err_probe(dev, ret, "%s: registering clock provider failed\n", __func__);
341+
}
339342
#endif
340343

341344
ret = cpufreq_register_driver(&scmi_cpufreq_driver);

drivers/cpuidle/cpuidle-haltpoll.c

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -25,13 +25,12 @@ MODULE_PARM_DESC(force, "Load unconditionally");
2525
static struct cpuidle_device __percpu *haltpoll_cpuidle_devices;
2626
static enum cpuhp_state haltpoll_hp_state;
2727

28-
static int default_enter_idle(struct cpuidle_device *dev,
29-
struct cpuidle_driver *drv, int index)
28+
static __cpuidle int default_enter_idle(struct cpuidle_device *dev,
29+
struct cpuidle_driver *drv, int index)
3030
{
31-
if (current_clr_polling_and_test()) {
32-
local_irq_enable();
31+
if (current_clr_polling_and_test())
3332
return index;
34-
}
33+
3534
arch_cpu_idle();
3635
return index;
3736
}

drivers/devfreq/devfreq.c

Lines changed: 59 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -461,10 +461,14 @@ static void devfreq_monitor(struct work_struct *work)
461461
if (err)
462462
dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
463463

464+
if (devfreq->stop_polling)
465+
goto out;
466+
464467
queue_delayed_work(devfreq_wq, &devfreq->work,
465468
msecs_to_jiffies(devfreq->profile->polling_ms));
466-
mutex_unlock(&devfreq->lock);
467469

470+
out:
471+
mutex_unlock(&devfreq->lock);
468472
trace_devfreq_monitor(devfreq);
469473
}
470474

@@ -483,6 +487,10 @@ void devfreq_monitor_start(struct devfreq *devfreq)
483487
if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN))
484488
return;
485489

490+
mutex_lock(&devfreq->lock);
491+
if (delayed_work_pending(&devfreq->work))
492+
goto out;
493+
486494
switch (devfreq->profile->timer) {
487495
case DEVFREQ_TIMER_DEFERRABLE:
488496
INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
@@ -491,12 +499,16 @@ void devfreq_monitor_start(struct devfreq *devfreq)
491499
INIT_DELAYED_WORK(&devfreq->work, devfreq_monitor);
492500
break;
493501
default:
494-
return;
502+
goto out;
495503
}
496504

497505
if (devfreq->profile->polling_ms)
498506
queue_delayed_work(devfreq_wq, &devfreq->work,
499507
msecs_to_jiffies(devfreq->profile->polling_ms));
508+
509+
out:
510+
devfreq->stop_polling = false;
511+
mutex_unlock(&devfreq->lock);
500512
}
501513
EXPORT_SYMBOL(devfreq_monitor_start);
502514

@@ -513,6 +525,14 @@ void devfreq_monitor_stop(struct devfreq *devfreq)
513525
if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN))
514526
return;
515527

528+
mutex_lock(&devfreq->lock);
529+
if (devfreq->stop_polling) {
530+
mutex_unlock(&devfreq->lock);
531+
return;
532+
}
533+
534+
devfreq->stop_polling = true;
535+
mutex_unlock(&devfreq->lock);
516536
cancel_delayed_work_sync(&devfreq->work);
517537
}
518538
EXPORT_SYMBOL(devfreq_monitor_stop);
@@ -1688,7 +1708,7 @@ static ssize_t trans_stat_show(struct device *dev,
16881708
struct device_attribute *attr, char *buf)
16891709
{
16901710
struct devfreq *df = to_devfreq(dev);
1691-
ssize_t len;
1711+
ssize_t len = 0;
16921712
int i, j;
16931713
unsigned int max_state;
16941714

@@ -1697,7 +1717,7 @@ static ssize_t trans_stat_show(struct device *dev,
16971717
max_state = df->max_state;
16981718

16991719
if (max_state == 0)
1700-
return sprintf(buf, "Not Supported.\n");
1720+
return sysfs_emit(buf, "Not Supported.\n");
17011721

17021722
mutex_lock(&df->lock);
17031723
if (!df->stop_polling &&
@@ -1707,31 +1727,49 @@ static ssize_t trans_stat_show(struct device *dev,
17071727
}
17081728
mutex_unlock(&df->lock);
17091729

1710-
len = sprintf(buf, " From : To\n");
1711-
len += sprintf(buf + len, " :");
1712-
for (i = 0; i < max_state; i++)
1713-
len += sprintf(buf + len, "%10lu",
1714-
df->freq_table[i]);
1730+
len += sysfs_emit_at(buf, len, " From : To\n");
1731+
len += sysfs_emit_at(buf, len, " :");
1732+
for (i = 0; i < max_state; i++) {
1733+
if (len >= PAGE_SIZE - 1)
1734+
break;
1735+
len += sysfs_emit_at(buf, len, "%10lu",
1736+
df->freq_table[i]);
1737+
}
17151738

1716-
len += sprintf(buf + len, " time(ms)\n");
1739+
if (len >= PAGE_SIZE - 1)
1740+
return PAGE_SIZE - 1;
1741+
len += sysfs_emit_at(buf, len, " time(ms)\n");
17171742

17181743
for (i = 0; i < max_state; i++) {
1719-
if (df->freq_table[i] == df->previous_freq)
1720-
len += sprintf(buf + len, "*");
1744+
if (len >= PAGE_SIZE - 1)
1745+
break;
1746+
if (df->freq_table[2] == df->previous_freq)
1747+
len += sysfs_emit_at(buf, len, "*");
17211748
else
1722-
len += sprintf(buf + len, " ");
1723-
1724-
len += sprintf(buf + len, "%10lu:", df->freq_table[i]);
1725-
for (j = 0; j < max_state; j++)
1726-
len += sprintf(buf + len, "%10u",
1749+
len += sysfs_emit_at(buf, len, " ");
1750+
if (len >= PAGE_SIZE - 1)
1751+
break;
1752+
len += sysfs_emit_at(buf, len, "%10lu:", df->freq_table[i]);
1753+
for (j = 0; j < max_state; j++) {
1754+
if (len >= PAGE_SIZE - 1)
1755+
break;
1756+
len += sysfs_emit_at(buf, len, "%10u",
17271757
df->stats.trans_table[(i * max_state) + j]);
1758+
}
1759+
if (len >= PAGE_SIZE - 1)
1760+
break;
1761+
len += sysfs_emit_at(buf, len, "%10llu\n", (u64)
1762+
jiffies64_to_msecs(df->stats.time_in_state[i]));
1763+
}
17281764

1729-
len += sprintf(buf + len, "%10llu\n", (u64)
1730-
jiffies64_to_msecs(df->stats.time_in_state[i]));
1765+
if (len < PAGE_SIZE - 1)
1766+
len += sysfs_emit_at(buf, len, "Total transition : %u\n",
1767+
df->stats.total_trans);
1768+
if (len >= PAGE_SIZE - 1) {
1769+
pr_warn_once("devfreq transition table exceeds PAGE_SIZE. Disabling\n");
1770+
return -EFBIG;
17311771
}
17321772

1733-
len += sprintf(buf + len, "Total transition : %u\n",
1734-
df->stats.total_trans);
17351773
return len;
17361774
}
17371775

drivers/idle/intel_idle.c

Lines changed: 114 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -923,6 +923,35 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
923923
.enter = NULL }
924924
};
925925

926+
static struct cpuidle_state mtl_l_cstates[] __initdata = {
927+
{
928+
.name = "C1E",
929+
.desc = "MWAIT 0x01",
930+
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
931+
.exit_latency = 1,
932+
.target_residency = 1,
933+
.enter = &intel_idle,
934+
.enter_s2idle = intel_idle_s2idle, },
935+
{
936+
.name = "C6",
937+
.desc = "MWAIT 0x20",
938+
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
939+
.exit_latency = 140,
940+
.target_residency = 420,
941+
.enter = &intel_idle,
942+
.enter_s2idle = intel_idle_s2idle, },
943+
{
944+
.name = "C10",
945+
.desc = "MWAIT 0x60",
946+
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
947+
.exit_latency = 310,
948+
.target_residency = 930,
949+
.enter = &intel_idle,
950+
.enter_s2idle = intel_idle_s2idle, },
951+
{
952+
.enter = NULL }
953+
};
954+
926955
static struct cpuidle_state gmt_cstates[] __initdata = {
927956
{
928957
.name = "C1",
@@ -1242,6 +1271,72 @@ static struct cpuidle_state snr_cstates[] __initdata = {
12421271
.enter = NULL }
12431272
};
12441273

1274+
static struct cpuidle_state grr_cstates[] __initdata = {
1275+
{
1276+
.name = "C1",
1277+
.desc = "MWAIT 0x00",
1278+
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE,
1279+
.exit_latency = 1,
1280+
.target_residency = 1,
1281+
.enter = &intel_idle,
1282+
.enter_s2idle = intel_idle_s2idle, },
1283+
{
1284+
.name = "C1E",
1285+
.desc = "MWAIT 0x01",
1286+
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
1287+
.exit_latency = 2,
1288+
.target_residency = 10,
1289+
.enter = &intel_idle,
1290+
.enter_s2idle = intel_idle_s2idle, },
1291+
{
1292+
.name = "C6S",
1293+
.desc = "MWAIT 0x22",
1294+
.flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED,
1295+
.exit_latency = 140,
1296+
.target_residency = 500,
1297+
.enter = &intel_idle,
1298+
.enter_s2idle = intel_idle_s2idle, },
1299+
{
1300+
.enter = NULL }
1301+
};
1302+
1303+
static struct cpuidle_state srf_cstates[] __initdata = {
1304+
{
1305+
.name = "C1",
1306+
.desc = "MWAIT 0x00",
1307+
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE,
1308+
.exit_latency = 1,
1309+
.target_residency = 1,
1310+
.enter = &intel_idle,
1311+
.enter_s2idle = intel_idle_s2idle, },
1312+
{
1313+
.name = "C1E",
1314+
.desc = "MWAIT 0x01",
1315+
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
1316+
.exit_latency = 2,
1317+
.target_residency = 10,
1318+
.enter = &intel_idle,
1319+
.enter_s2idle = intel_idle_s2idle, },
1320+
{
1321+
.name = "C6S",
1322+
.desc = "MWAIT 0x22",
1323+
.flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED,
1324+
.exit_latency = 270,
1325+
.target_residency = 700,
1326+
.enter = &intel_idle,
1327+
.enter_s2idle = intel_idle_s2idle, },
1328+
{
1329+
.name = "C6SP",
1330+
.desc = "MWAIT 0x23",
1331+
.flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED,
1332+
.exit_latency = 310,
1333+
.target_residency = 900,
1334+
.enter = &intel_idle,
1335+
.enter_s2idle = intel_idle_s2idle, },
1336+
{
1337+
.enter = NULL }
1338+
};
1339+
12451340
static const struct idle_cpu idle_cpu_nehalem __initconst = {
12461341
.state_table = nehalem_cstates,
12471342
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
@@ -1349,6 +1444,10 @@ static const struct idle_cpu idle_cpu_adl_l __initconst = {
13491444
.state_table = adl_l_cstates,
13501445
};
13511446

1447+
static const struct idle_cpu idle_cpu_mtl_l __initconst = {
1448+
.state_table = mtl_l_cstates,
1449+
};
1450+
13521451
static const struct idle_cpu idle_cpu_gmt __initconst = {
13531452
.state_table = gmt_cstates,
13541453
};
@@ -1387,6 +1486,18 @@ static const struct idle_cpu idle_cpu_snr __initconst = {
13871486
.use_acpi = true,
13881487
};
13891488

1489+
static const struct idle_cpu idle_cpu_grr __initconst = {
1490+
.state_table = grr_cstates,
1491+
.disable_promotion_to_c1e = true,
1492+
.use_acpi = true,
1493+
};
1494+
1495+
static const struct idle_cpu idle_cpu_srf __initconst = {
1496+
.state_table = srf_cstates,
1497+
.disable_promotion_to_c1e = true,
1498+
.use_acpi = true,
1499+
};
1500+
13901501
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
13911502
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx),
13921503
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem),
@@ -1423,6 +1534,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
14231534
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx),
14241535
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &idle_cpu_adl),
14251536
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &idle_cpu_adl_l),
1537+
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &idle_cpu_mtl_l),
14261538
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &idle_cpu_gmt),
14271539
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr),
14281540
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &idle_cpu_spr),
@@ -1432,6 +1544,8 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
14321544
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt),
14331545
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv),
14341546
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_snr),
1547+
X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &idle_cpu_grr),
1548+
X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &idle_cpu_srf),
14351549
{}
14361550
};
14371551

0 commit comments

Comments
 (0)