Skip to content

Commit 2475bf0

Browse files
committed
Merge tag 'sched_urgent_for_v6.2_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Borislav Petkov: - Make sure the scheduler doesn't use stale frequency scaling values when latter get disabled due to a value error - Fix a NULL pointer access on UP configs - Use the proper locking when updating CPU capacity * tag 'sched_urgent_for_v6.2_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/aperfmperf: Erase stale arch_freq_scale values when disabling frequency invariance readings sched/core: Fix NULL pointer access fault in sched_setaffinity() with non-SMP configs sched/fair: Fixes for capacity inversion detection sched/uclamp: Fix a uninitialized variable warnings
2 parents ab2f408 + 5f5cc9e commit 2475bf0

File tree

3 files changed

+44
-23
lines changed

3 files changed

+44
-23
lines changed

arch/x86/kernel/cpu/aperfmperf.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -330,7 +330,16 @@ static void __init bp_init_freq_invariance(void)
330330

331331
static void disable_freq_invariance_workfn(struct work_struct *work)
332332
{
333+
int cpu;
334+
333335
static_branch_disable(&arch_scale_freq_key);
336+
337+
/*
338+
* Set arch_freq_scale to a default value on all cpus
339+
* This negates the effect of scaling
340+
*/
341+
for_each_possible_cpu(cpu)
342+
per_cpu(arch_freq_scale, cpu) = SCHED_CAPACITY_SCALE;
334343
}
335344

336345
static DECLARE_WORK(disable_freq_invariance_work,

kernel/sched/core.c

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8290,12 +8290,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
82908290
if (retval)
82918291
goto out_put_task;
82928292

8293+
/*
8294+
* With non-SMP configs, user_cpus_ptr/user_mask isn't used and
8295+
* alloc_user_cpus_ptr() returns NULL.
8296+
*/
82938297
user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
8294-
if (IS_ENABLED(CONFIG_SMP) && !user_mask) {
8298+
if (user_mask) {
8299+
cpumask_copy(user_mask, in_mask);
8300+
} else if (IS_ENABLED(CONFIG_SMP)) {
82958301
retval = -ENOMEM;
82968302
goto out_put_task;
82978303
}
8298-
cpumask_copy(user_mask, in_mask);
8304+
82998305
ac = (struct affinity_context){
83008306
.new_mask = in_mask,
83018307
.user_mask = user_mask,

kernel/sched/fair.c

Lines changed: 27 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -7229,10 +7229,10 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
72297229
eenv_task_busy_time(&eenv, p, prev_cpu);
72307230

72317231
for (; pd; pd = pd->next) {
7232+
unsigned long util_min = p_util_min, util_max = p_util_max;
72327233
unsigned long cpu_cap, cpu_thermal_cap, util;
72337234
unsigned long cur_delta, max_spare_cap = 0;
72347235
unsigned long rq_util_min, rq_util_max;
7235-
unsigned long util_min, util_max;
72367236
unsigned long prev_spare_cap = 0;
72377237
int max_spare_cap_cpu = -1;
72387238
unsigned long base_energy;
@@ -7251,6 +7251,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
72517251
eenv.pd_cap = 0;
72527252

72537253
for_each_cpu(cpu, cpus) {
7254+
struct rq *rq = cpu_rq(cpu);
7255+
72547256
eenv.pd_cap += cpu_thermal_cap;
72557257

72567258
if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
@@ -7269,24 +7271,19 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
72697271
* much capacity we can get out of the CPU; this is
72707272
* aligned with sched_cpu_util().
72717273
*/
7272-
if (uclamp_is_used()) {
7273-
if (uclamp_rq_is_idle(cpu_rq(cpu))) {
7274-
util_min = p_util_min;
7275-
util_max = p_util_max;
7276-
} else {
7277-
/*
7278-
* Open code uclamp_rq_util_with() except for
7279-
* the clamp() part. Ie: apply max aggregation
7280-
* only. util_fits_cpu() logic requires to
7281-
* operate on non clamped util but must use the
7282-
* max-aggregated uclamp_{min, max}.
7283-
*/
7284-
rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
7285-
rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
7286-
7287-
util_min = max(rq_util_min, p_util_min);
7288-
util_max = max(rq_util_max, p_util_max);
7289-
}
7274+
if (uclamp_is_used() && !uclamp_rq_is_idle(rq)) {
7275+
/*
7276+
* Open code uclamp_rq_util_with() except for
7277+
* the clamp() part. Ie: apply max aggregation
7278+
* only. util_fits_cpu() logic requires to
7279+
* operate on non clamped util but must use the
7280+
* max-aggregated uclamp_{min, max}.
7281+
*/
7282+
rq_util_min = uclamp_rq_get(rq, UCLAMP_MIN);
7283+
rq_util_max = uclamp_rq_get(rq, UCLAMP_MAX);
7284+
7285+
util_min = max(rq_util_min, p_util_min);
7286+
util_max = max(rq_util_max, p_util_max);
72907287
}
72917288
if (!util_fits_cpu(util, util_min, util_max, cpu))
72927289
continue;
@@ -8871,16 +8868,23 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
88718868
* * Thermal pressure will impact all cpus in this perf domain
88728869
* equally.
88738870
*/
8874-
if (static_branch_unlikely(&sched_asym_cpucapacity)) {
8871+
if (sched_energy_enabled()) {
88758872
unsigned long inv_cap = capacity_orig - thermal_load_avg(rq);
8876-
struct perf_domain *pd = rcu_dereference(rq->rd->pd);
8873+
struct perf_domain *pd;
88778874

8875+
rcu_read_lock();
8876+
8877+
pd = rcu_dereference(rq->rd->pd);
88788878
rq->cpu_capacity_inverted = 0;
88798879

88808880
for (; pd; pd = pd->next) {
88818881
struct cpumask *pd_span = perf_domain_span(pd);
88828882
unsigned long pd_cap_orig, pd_cap;
88838883

8884+
/* We can't be inverted against our own pd */
8885+
if (cpumask_test_cpu(cpu_of(rq), pd_span))
8886+
continue;
8887+
88848888
cpu = cpumask_any(pd_span);
88858889
pd_cap_orig = arch_scale_cpu_capacity(cpu);
88868890

@@ -8905,6 +8909,8 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
89058909
break;
89068910
}
89078911
}
8912+
8913+
rcu_read_unlock();
89088914
}
89098915

89108916
trace_sched_cpu_capacity_tp(rq);

0 commit comments

Comments
 (0)