@@ -411,22 +411,26 @@ void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops)
411
411
*
412
412
* 5. Pick any idle CPU usable by the task.
413
413
*
414
- * Step 3 and 4 are performed only if the system has, respectively, multiple
415
- * LLC domains / multiple NUMA nodes (see scx_selcpu_topo_llc and
416
- * scx_selcpu_topo_numa).
414
+ * Step 3 and 4 are performed only if the system has, respectively,
415
+ * multiple LLCs / multiple NUMA nodes (see scx_selcpu_topo_llc and
416
+ * scx_selcpu_topo_numa) and they don't contain the same subset of CPUs.
417
+ *
418
+ * If %SCX_OPS_BUILTIN_IDLE_PER_NODE is enabled, the search will always
419
+ * begin in @prev_cpu's node and proceed to other nodes in order of
420
+ * increasing distance.
421
+ *
422
+ * Return the picked CPU if idle, or a negative value otherwise.
417
423
*
418
424
* NOTE: tasks that can only run on 1 CPU are excluded by this logic, because
419
425
* we never call ops.select_cpu() for them, see select_task_rq().
420
426
*/
421
- s32 scx_select_cpu_dfl (struct task_struct * p , s32 prev_cpu , u64 wake_flags , u64 flags , bool * found )
427
+ s32 scx_select_cpu_dfl (struct task_struct * p , s32 prev_cpu , u64 wake_flags , u64 flags )
422
428
{
423
429
const struct cpumask * llc_cpus = NULL ;
424
430
const struct cpumask * numa_cpus = NULL ;
425
431
int node = scx_cpu_node_if_enabled (prev_cpu );
426
432
s32 cpu ;
427
433
428
- * found = false;
429
-
430
434
/*
431
435
* This is necessary to protect llc_cpus.
432
436
*/
@@ -465,7 +469,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
465
469
if (cpus_share_cache (cpu , prev_cpu ) &&
466
470
scx_idle_test_and_clear_cpu (prev_cpu )) {
467
471
cpu = prev_cpu ;
468
- goto cpu_found ;
472
+ goto out_unlock ;
469
473
}
470
474
471
475
/*
@@ -487,7 +491,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
487
491
(!(flags & SCX_PICK_IDLE_IN_NODE ) || (waker_node == node )) &&
488
492
!cpumask_empty (idle_cpumask (waker_node )-> cpu )) {
489
493
if (cpumask_test_cpu (cpu , p -> cpus_ptr ))
490
- goto cpu_found ;
494
+ goto out_unlock ;
491
495
}
492
496
}
493
497
@@ -502,7 +506,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
502
506
if (cpumask_test_cpu (prev_cpu , idle_cpumask (node )-> smt ) &&
503
507
scx_idle_test_and_clear_cpu (prev_cpu )) {
504
508
cpu = prev_cpu ;
505
- goto cpu_found ;
509
+ goto out_unlock ;
506
510
}
507
511
508
512
/*
@@ -511,7 +515,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
511
515
if (llc_cpus ) {
512
516
cpu = pick_idle_cpu_in_node (llc_cpus , node , SCX_PICK_IDLE_CORE );
513
517
if (cpu >= 0 )
514
- goto cpu_found ;
518
+ goto out_unlock ;
515
519
}
516
520
517
521
/*
@@ -520,7 +524,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
520
524
if (numa_cpus ) {
521
525
cpu = pick_idle_cpu_in_node (numa_cpus , node , SCX_PICK_IDLE_CORE );
522
526
if (cpu >= 0 )
523
- goto cpu_found ;
527
+ goto out_unlock ;
524
528
}
525
529
526
530
/*
@@ -533,7 +537,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
533
537
*/
534
538
cpu = scx_pick_idle_cpu (p -> cpus_ptr , node , flags | SCX_PICK_IDLE_CORE );
535
539
if (cpu >= 0 )
536
- goto cpu_found ;
540
+ goto out_unlock ;
537
541
538
542
/*
539
543
* Give up if we're strictly looking for a full-idle SMT
@@ -550,7 +554,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
550
554
*/
551
555
if (scx_idle_test_and_clear_cpu (prev_cpu )) {
552
556
cpu = prev_cpu ;
553
- goto cpu_found ;
557
+ goto out_unlock ;
554
558
}
555
559
556
560
/*
@@ -559,7 +563,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
559
563
if (llc_cpus ) {
560
564
cpu = pick_idle_cpu_in_node (llc_cpus , node , 0 );
561
565
if (cpu >= 0 )
562
- goto cpu_found ;
566
+ goto out_unlock ;
563
567
}
564
568
565
569
/*
@@ -568,7 +572,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
568
572
if (numa_cpus ) {
569
573
cpu = pick_idle_cpu_in_node (numa_cpus , node , 0 );
570
574
if (cpu >= 0 )
571
- goto cpu_found ;
575
+ goto out_unlock ;
572
576
}
573
577
574
578
/*
@@ -581,13 +585,8 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
581
585
*/
582
586
cpu = scx_pick_idle_cpu (p -> cpus_ptr , node , flags );
583
587
if (cpu >= 0 )
584
- goto cpu_found ;
585
-
586
- cpu = prev_cpu ;
587
- goto out_unlock ;
588
+ goto out_unlock ;
588
589
589
- cpu_found :
590
- * found = true;
591
590
out_unlock :
592
591
rcu_read_unlock ();
593
592
@@ -819,6 +818,9 @@ __bpf_kfunc int scx_bpf_cpu_node(s32 cpu)
819
818
__bpf_kfunc s32 scx_bpf_select_cpu_dfl (struct task_struct * p , s32 prev_cpu ,
820
819
u64 wake_flags , bool * is_idle )
821
820
{
821
+ #ifdef CONFIG_SMP
822
+ s32 cpu ;
823
+ #endif
822
824
if (!ops_cpu_valid (prev_cpu , NULL ))
823
825
goto prev_cpu ;
824
826
@@ -829,7 +831,11 @@ __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
829
831
goto prev_cpu ;
830
832
831
833
#ifdef CONFIG_SMP
832
- return scx_select_cpu_dfl (p , prev_cpu , wake_flags , 0 , is_idle );
834
+ cpu = scx_select_cpu_dfl (p , prev_cpu , wake_flags , 0 );
835
+ if (cpu >= 0 ) {
836
+ * is_idle = true;
837
+ return cpu ;
838
+ }
833
839
#endif
834
840
835
841
prev_cpu :
0 commit comments