Skip to content

Commit e4855fc

Browse files
arighihtejun
authored andcommitted
sched_ext: idle: Refactor scx_select_cpu_dfl()
Make scx_select_cpu_dfl() more consistent with the other idle-related APIs by returning a negative value when an idle CPU isn't found. No functional changes, this is purely a refactoring. Signed-off-by: Andrea Righi <arighi@nvidia.com> Signed-off-by: Tejun Heo <tj@kernel.org>
1 parent c414c21 commit e4855fc

File tree

3 files changed

+34
-27
lines changed

3 files changed

+34
-27
lines changed

kernel/sched/ext.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3393,16 +3393,17 @@ static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flag
33933393
else
33943394
return prev_cpu;
33953395
} else {
3396-
bool found;
33973396
s32 cpu;
33983397

3399-
cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, 0, &found);
3400-
p->scx.selected_cpu = cpu;
3401-
if (found) {
3398+
cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, 0);
3399+
if (cpu >= 0) {
34023400
p->scx.slice = SCX_SLICE_DFL;
34033401
p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
34043402
__scx_add_event(SCX_EV_ENQ_SLICE_DFL, 1);
3403+
} else {
3404+
cpu = prev_cpu;
34053405
}
3406+
p->scx.selected_cpu = cpu;
34063407

34073408
if (rq_bypass)
34083409
__scx_add_event(SCX_EV_BYPASS_DISPATCH, 1);

kernel/sched/ext_idle.c

Lines changed: 28 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -411,22 +411,26 @@ void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops)
411411
*
412412
* 5. Pick any idle CPU usable by the task.
413413
*
414-
* Step 3 and 4 are performed only if the system has, respectively, multiple
415-
* LLC domains / multiple NUMA nodes (see scx_selcpu_topo_llc and
416-
* scx_selcpu_topo_numa).
414+
* Step 3 and 4 are performed only if the system has, respectively,
415+
* multiple LLCs / multiple NUMA nodes (see scx_selcpu_topo_llc and
416+
* scx_selcpu_topo_numa) and they don't contain the same subset of CPUs.
417+
*
418+
* If %SCX_OPS_BUILTIN_IDLE_PER_NODE is enabled, the search will always
419+
* begin in @prev_cpu's node and proceed to other nodes in order of
420+
* increasing distance.
421+
*
422+
* Return the picked CPU if idle, or a negative value otherwise.
417423
*
418424
* NOTE: tasks that can only run on 1 CPU are excluded by this logic, because
419425
* we never call ops.select_cpu() for them, see select_task_rq().
420426
*/
421-
s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 flags, bool *found)
427+
s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 flags)
422428
{
423429
const struct cpumask *llc_cpus = NULL;
424430
const struct cpumask *numa_cpus = NULL;
425431
int node = scx_cpu_node_if_enabled(prev_cpu);
426432
s32 cpu;
427433

428-
*found = false;
429-
430434
/*
431435
* This is necessary to protect llc_cpus.
432436
*/
@@ -465,7 +469,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
465469
if (cpus_share_cache(cpu, prev_cpu) &&
466470
scx_idle_test_and_clear_cpu(prev_cpu)) {
467471
cpu = prev_cpu;
468-
goto cpu_found;
472+
goto out_unlock;
469473
}
470474

471475
/*
@@ -487,7 +491,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
487491
(!(flags & SCX_PICK_IDLE_IN_NODE) || (waker_node == node)) &&
488492
!cpumask_empty(idle_cpumask(waker_node)->cpu)) {
489493
if (cpumask_test_cpu(cpu, p->cpus_ptr))
490-
goto cpu_found;
494+
goto out_unlock;
491495
}
492496
}
493497

@@ -502,7 +506,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
502506
if (cpumask_test_cpu(prev_cpu, idle_cpumask(node)->smt) &&
503507
scx_idle_test_and_clear_cpu(prev_cpu)) {
504508
cpu = prev_cpu;
505-
goto cpu_found;
509+
goto out_unlock;
506510
}
507511

508512
/*
@@ -511,7 +515,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
511515
if (llc_cpus) {
512516
cpu = pick_idle_cpu_in_node(llc_cpus, node, SCX_PICK_IDLE_CORE);
513517
if (cpu >= 0)
514-
goto cpu_found;
518+
goto out_unlock;
515519
}
516520

517521
/*
@@ -520,7 +524,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
520524
if (numa_cpus) {
521525
cpu = pick_idle_cpu_in_node(numa_cpus, node, SCX_PICK_IDLE_CORE);
522526
if (cpu >= 0)
523-
goto cpu_found;
527+
goto out_unlock;
524528
}
525529

526530
/*
@@ -533,7 +537,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
533537
*/
534538
cpu = scx_pick_idle_cpu(p->cpus_ptr, node, flags | SCX_PICK_IDLE_CORE);
535539
if (cpu >= 0)
536-
goto cpu_found;
540+
goto out_unlock;
537541

538542
/*
539543
* Give up if we're strictly looking for a full-idle SMT
@@ -550,7 +554,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
550554
*/
551555
if (scx_idle_test_and_clear_cpu(prev_cpu)) {
552556
cpu = prev_cpu;
553-
goto cpu_found;
557+
goto out_unlock;
554558
}
555559

556560
/*
@@ -559,7 +563,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
559563
if (llc_cpus) {
560564
cpu = pick_idle_cpu_in_node(llc_cpus, node, 0);
561565
if (cpu >= 0)
562-
goto cpu_found;
566+
goto out_unlock;
563567
}
564568

565569
/*
@@ -568,7 +572,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
568572
if (numa_cpus) {
569573
cpu = pick_idle_cpu_in_node(numa_cpus, node, 0);
570574
if (cpu >= 0)
571-
goto cpu_found;
575+
goto out_unlock;
572576
}
573577

574578
/*
@@ -581,13 +585,8 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
581585
*/
582586
cpu = scx_pick_idle_cpu(p->cpus_ptr, node, flags);
583587
if (cpu >= 0)
584-
goto cpu_found;
585-
586-
cpu = prev_cpu;
587-
goto out_unlock;
588+
goto out_unlock;
588589

589-
cpu_found:
590-
*found = true;
591590
out_unlock:
592591
rcu_read_unlock();
593592

@@ -819,6 +818,9 @@ __bpf_kfunc int scx_bpf_cpu_node(s32 cpu)
819818
__bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
820819
u64 wake_flags, bool *is_idle)
821820
{
821+
#ifdef CONFIG_SMP
822+
s32 cpu;
823+
#endif
822824
if (!ops_cpu_valid(prev_cpu, NULL))
823825
goto prev_cpu;
824826

@@ -829,7 +831,11 @@ __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
829831
goto prev_cpu;
830832

831833
#ifdef CONFIG_SMP
832-
return scx_select_cpu_dfl(p, prev_cpu, wake_flags, 0, is_idle);
834+
cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, 0);
835+
if (cpu >= 0) {
836+
*is_idle = true;
837+
return cpu;
838+
}
833839
#endif
834840

835841
prev_cpu:

kernel/sched/ext_idle.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ static inline s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node
2727
}
2828
#endif /* CONFIG_SMP */
2929

30-
s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 flags, bool *found);
30+
s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 flags);
3131
void scx_idle_enable(struct sched_ext_ops *ops);
3232
void scx_idle_disable(void);
3333
int scx_idle_init(void);

0 commit comments

Comments
 (0)