Skip to content

Commit 7136849

Browse files
committed
Merge tag 'sched_urgent_for_v5.18_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Borislav Petkov: - Use the correct static key checking primitive on the IRQ exit path - Two fixes for the new forceidle balancer * tag 'sched_urgent_for_v5.18_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: entry: Fix compile error in dynamic_irqentry_exit_cond_resched() sched: Teach the forced-newidle balancer about CPU affinity limitation. sched/core: Fix forceidle balancing
2 parents 1862a69 + 0a70045 commit 7136849

File tree

4 files changed

+12
-13
lines changed

4 files changed

+12
-13
lines changed

kernel/entry/common.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -392,7 +392,7 @@ DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
392392
DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
393393
void dynamic_irqentry_exit_cond_resched(void)
394394
{
395-
if (!static_key_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
395+
if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
396396
return;
397397
raw_irqentry_exit_cond_resched();
398398
}

kernel/sched/core.c

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5752,6 +5752,8 @@ static inline struct task_struct *pick_task(struct rq *rq)
57525752

57535753
extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
57545754

5755+
static void queue_core_balance(struct rq *rq);
5756+
57555757
static struct task_struct *
57565758
pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
57575759
{
@@ -5801,7 +5803,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
58015803
}
58025804

58035805
rq->core_pick = NULL;
5804-
return next;
5806+
goto out;
58055807
}
58065808

58075809
put_prev_task_balance(rq, prev, rf);
@@ -5851,7 +5853,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
58515853
*/
58525854
WARN_ON_ONCE(fi_before);
58535855
task_vruntime_update(rq, next, false);
5854-
goto done;
5856+
goto out_set_next;
58555857
}
58565858
}
58575859

@@ -5970,8 +5972,12 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
59705972
resched_curr(rq_i);
59715973
}
59725974

5973-
done:
5975+
out_set_next:
59745976
set_next_task(rq, next);
5977+
out:
5978+
if (rq->core->core_forceidle_count && next == rq->idle)
5979+
queue_core_balance(rq);
5980+
59755981
return next;
59765982
}
59775983

@@ -6000,7 +6006,7 @@ static bool try_steal_cookie(int this, int that)
60006006
if (p == src->core_pick || p == src->curr)
60016007
goto next;
60026008

6003-
if (!cpumask_test_cpu(this, &p->cpus_mask))
6009+
if (!is_cpu_allowed(p, this))
60046010
goto next;
60056011

60066012
if (p->core_occupation > dst->idle->core_occupation)
@@ -6066,7 +6072,7 @@ static void sched_core_balance(struct rq *rq)
60666072

60676073
static DEFINE_PER_CPU(struct callback_head, core_balance_head);
60686074

6069-
void queue_core_balance(struct rq *rq)
6075+
static void queue_core_balance(struct rq *rq)
60706076
{
60716077
if (!sched_core_enabled(rq))
60726078
return;

kernel/sched/idle.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -434,7 +434,6 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool fir
434434
{
435435
update_idle_core(rq);
436436
schedstat_inc(rq->sched_goidle);
437-
queue_core_balance(rq);
438437
}
439438

440439
#ifdef CONFIG_SMP

kernel/sched/sched.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1232,8 +1232,6 @@ static inline bool sched_group_cookie_match(struct rq *rq,
12321232
return false;
12331233
}
12341234

1235-
extern void queue_core_balance(struct rq *rq);
1236-
12371235
static inline bool sched_core_enqueued(struct task_struct *p)
12381236
{
12391237
return !RB_EMPTY_NODE(&p->core_node);
@@ -1267,10 +1265,6 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
12671265
return &rq->__lock;
12681266
}
12691267

1270-
static inline void queue_core_balance(struct rq *rq)
1271-
{
1272-
}
1273-
12741268
static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
12751269
{
12761270
return true;

0 commit comments

Comments
 (0)