Skip to content

Commit 2f02735

Browse files
Yang YingliangPeter Zijlstra
authored andcommitted
sched/core: Introduce sched_set_rq_on/offline() helper
Introduce sched_set_rq_on/offline() helper, so it can be called in normal or error path simply. No functional changed. Cc: stable@kernel.org Signed-off-by: Yang Yingliang <yangyingliang@huawei.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20240703031610.587047-4-yangyingliang@huaweicloud.com
1 parent e22f910 commit 2f02735

File tree

1 file changed

+26
-14
lines changed

1 file changed

+26
-14
lines changed

kernel/sched/core.c

Lines changed: 26 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -7845,6 +7845,30 @@ void set_rq_offline(struct rq *rq)
78457845
}
78467846
}
78477847

7848+
static inline void sched_set_rq_online(struct rq *rq, int cpu)
7849+
{
7850+
struct rq_flags rf;
7851+
7852+
rq_lock_irqsave(rq, &rf);
7853+
if (rq->rd) {
7854+
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7855+
set_rq_online(rq);
7856+
}
7857+
rq_unlock_irqrestore(rq, &rf);
7858+
}
7859+
7860+
static inline void sched_set_rq_offline(struct rq *rq, int cpu)
7861+
{
7862+
struct rq_flags rf;
7863+
7864+
rq_lock_irqsave(rq, &rf);
7865+
if (rq->rd) {
7866+
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7867+
set_rq_offline(rq);
7868+
}
7869+
rq_unlock_irqrestore(rq, &rf);
7870+
}
7871+
78487872
/*
78497873
* used to mark begin/end of suspend/resume:
78507874
*/
@@ -7914,7 +7938,6 @@ static inline void sched_smt_present_dec(int cpu)
79147938
int sched_cpu_activate(unsigned int cpu)
79157939
{
79167940
struct rq *rq = cpu_rq(cpu);
7917-
struct rq_flags rf;
79187941

79197942
/*
79207943
* Clear the balance_push callback and prepare to schedule
@@ -7943,20 +7966,14 @@ int sched_cpu_activate(unsigned int cpu)
79437966
* 2) At runtime, if cpuset_cpu_active() fails to rebuild the
79447967
* domains.
79457968
*/
7946-
rq_lock_irqsave(rq, &rf);
7947-
if (rq->rd) {
7948-
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7949-
set_rq_online(rq);
7950-
}
7951-
rq_unlock_irqrestore(rq, &rf);
7969+
sched_set_rq_online(rq, cpu);
79527970

79537971
return 0;
79547972
}
79557973

79567974
int sched_cpu_deactivate(unsigned int cpu)
79577975
{
79587976
struct rq *rq = cpu_rq(cpu);
7959-
struct rq_flags rf;
79607977
int ret;
79617978

79627979
/*
@@ -7987,12 +8004,7 @@ int sched_cpu_deactivate(unsigned int cpu)
79878004
*/
79888005
synchronize_rcu();
79898006

7990-
rq_lock_irqsave(rq, &rf);
7991-
if (rq->rd) {
7992-
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7993-
set_rq_offline(rq);
7994-
}
7995-
rq_unlock_irqrestore(rq, &rf);
8007+
sched_set_rq_offline(rq, cpu);
79968008

79978009
/*
79988010
* When going down, decrement the number of cores with SMT present.

0 commit comments

Comments
 (0)