@@ -2747,6 +2747,7 @@ static int balance_one(struct rq *rq, struct task_struct *prev)
2747
2747
{
2748
2748
struct scx_dsp_ctx * dspc = this_cpu_ptr (scx_dsp_ctx );
2749
2749
bool prev_on_scx = prev -> sched_class == & ext_sched_class ;
2750
+ bool prev_on_rq = prev -> scx .flags & SCX_TASK_QUEUED ;
2750
2751
int nr_loops = SCX_DSP_MAX_LOOPS ;
2751
2752
2752
2753
lockdep_assert_rq_held (rq );
@@ -2779,8 +2780,7 @@ static int balance_one(struct rq *rq, struct task_struct *prev)
2779
2780
* See scx_ops_disable_workfn() for the explanation on the
2780
2781
* bypassing test.
2781
2782
*/
2782
- if ((prev -> scx .flags & SCX_TASK_QUEUED ) &&
2783
- prev -> scx .slice && !scx_rq_bypassing (rq )) {
2783
+ if (prev_on_rq && prev -> scx .slice && !scx_rq_bypassing (rq )) {
2784
2784
rq -> scx .flags |= SCX_RQ_BAL_KEEP ;
2785
2785
goto has_tasks ;
2786
2786
}
@@ -2813,6 +2813,10 @@ static int balance_one(struct rq *rq, struct task_struct *prev)
2813
2813
2814
2814
flush_dispatch_buf (rq );
2815
2815
2816
+ if (prev_on_rq && prev -> scx .slice ) {
2817
+ rq -> scx .flags |= SCX_RQ_BAL_KEEP ;
2818
+ goto has_tasks ;
2819
+ }
2816
2820
if (rq -> scx .local_dsq .nr )
2817
2821
goto has_tasks ;
2818
2822
if (consume_global_dsq (rq ))
@@ -2838,8 +2842,7 @@ static int balance_one(struct rq *rq, struct task_struct *prev)
2838
2842
* Didn't find another task to run. Keep running @prev unless
2839
2843
* %SCX_OPS_ENQ_LAST is in effect.
2840
2844
*/
2841
- if ((prev -> scx .flags & SCX_TASK_QUEUED ) &&
2842
- (!static_branch_unlikely (& scx_ops_enq_last ) ||
2845
+ if (prev_on_rq && (!static_branch_unlikely (& scx_ops_enq_last ) ||
2843
2846
scx_rq_bypassing (rq ))) {
2844
2847
rq -> scx .flags |= SCX_RQ_BAL_KEEP ;
2845
2848
goto has_tasks ;
@@ -3034,7 +3037,7 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
3034
3037
*/
3035
3038
if (p -> scx .slice && !scx_rq_bypassing (rq )) {
3036
3039
dispatch_enqueue (& rq -> scx .local_dsq , p , SCX_ENQ_HEAD );
3037
- return ;
3040
+ goto switch_class ;
3038
3041
}
3039
3042
3040
3043
/*
@@ -3051,6 +3054,7 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
3051
3054
}
3052
3055
}
3053
3056
3057
+ switch_class :
3054
3058
if (next && next -> sched_class != & ext_sched_class )
3055
3059
switch_class (rq , next );
3056
3060
}
@@ -3586,16 +3590,8 @@ static void reset_idle_masks(void)
3586
3590
cpumask_copy (idle_masks .smt , cpu_online_mask );
3587
3591
}
3588
3592
3589
- void __scx_update_idle ( struct rq * rq , bool idle )
3593
+ static void update_builtin_idle ( int cpu , bool idle )
3590
3594
{
3591
- int cpu = cpu_of (rq );
3592
-
3593
- if (SCX_HAS_OP (update_idle ) && !scx_rq_bypassing (rq )) {
3594
- SCX_CALL_OP (SCX_KF_REST , update_idle , cpu_of (rq ), idle );
3595
- if (!static_branch_unlikely (& scx_builtin_idle_enabled ))
3596
- return ;
3597
- }
3598
-
3599
3595
if (idle )
3600
3596
cpumask_set_cpu (cpu , idle_masks .cpu );
3601
3597
else
@@ -3622,6 +3618,57 @@ void __scx_update_idle(struct rq *rq, bool idle)
3622
3618
#endif
3623
3619
}
3624
3620
3621
+ /*
3622
+ * Update the idle state of a CPU to @idle.
3623
+ *
3624
+ * If @do_notify is true, ops.update_idle() is invoked to notify the scx
3625
+ * scheduler of an actual idle state transition (idle to busy or vice
3626
+ * versa). If @do_notify is false, only the idle state in the idle masks is
3627
+ * refreshed without invoking ops.update_idle().
3628
+ *
3629
+ * This distinction is necessary, because an idle CPU can be "reserved" and
3630
+ * awakened via scx_bpf_pick_idle_cpu() + scx_bpf_kick_cpu(), marking it as
3631
+ * busy even if no tasks are dispatched. In this case, the CPU may return
3632
+ * to idle without a true state transition. Refreshing the idle masks
3633
+ * without invoking ops.update_idle() ensures accurate idle state tracking
3634
+ * while avoiding unnecessary updates and maintaining balanced state
3635
+ * transitions.
3636
+ */
3637
+ void __scx_update_idle (struct rq * rq , bool idle , bool do_notify )
3638
+ {
3639
+ int cpu = cpu_of (rq );
3640
+
3641
+ lockdep_assert_rq_held (rq );
3642
+
3643
+ /*
3644
+ * Trigger ops.update_idle() only when transitioning from a task to
3645
+ * the idle thread and vice versa.
3646
+ *
3647
+ * Idle transitions are indicated by do_notify being set to true,
3648
+ * managed by put_prev_task_idle()/set_next_task_idle().
3649
+ */
3650
+ if (SCX_HAS_OP (update_idle ) && do_notify && !scx_rq_bypassing (rq ))
3651
+ SCX_CALL_OP (SCX_KF_REST , update_idle , cpu_of (rq ), idle );
3652
+
3653
+ /*
3654
+ * Update the idle masks:
3655
+ * - for real idle transitions (do_notify == true)
3656
+ * - for idle-to-idle transitions (indicated by the previous task
3657
+ * being the idle thread, managed by pick_task_idle())
3658
+ *
3659
+ * Skip updating idle masks if the previous task is not the idle
3660
+ * thread, since set_next_task_idle() has already handled it when
3661
+ * transitioning from a task to the idle thread (calling this
3662
+ * function with do_notify == true).
3663
+ *
3664
+ * In this way we can avoid updating the idle masks twice,
3665
+ * unnecessarily.
3666
+ */
3667
+ if (static_branch_likely (& scx_builtin_idle_enabled ))
3668
+ if (do_notify || is_idle_task (rq -> curr ))
3669
+ update_builtin_idle (cpu , idle );
3670
+ }
3671
+
3625
3672
static void handle_hotplug (struct rq * rq , bool online )
3626
3673
{
3627
3674
int cpu = cpu_of (rq );
@@ -4744,10 +4791,9 @@ static void scx_ops_bypass(bool bypass)
4744
4791
*/
4745
4792
for_each_possible_cpu (cpu ) {
4746
4793
struct rq * rq = cpu_rq (cpu );
4747
- struct rq_flags rf ;
4748
4794
struct task_struct * p , * n ;
4749
4795
4750
- rq_lock (rq , & rf );
4796
+ raw_spin_rq_lock (rq );
4751
4797
4752
4798
if (bypass ) {
4753
4799
WARN_ON_ONCE (rq -> scx .flags & SCX_RQ_BYPASSING );
@@ -4763,7 +4809,7 @@ static void scx_ops_bypass(bool bypass)
4763
4809
* sees scx_rq_bypassing() before moving tasks to SCX.
4764
4810
*/
4765
4811
if (!scx_enabled ()) {
4766
- rq_unlock (rq , & rf );
4812
+ raw_spin_rq_unlock (rq );
4767
4813
continue ;
4768
4814
}
4769
4815
@@ -4783,10 +4829,11 @@ static void scx_ops_bypass(bool bypass)
4783
4829
sched_enq_and_set_task (& ctx );
4784
4830
}
4785
4831
4786
- rq_unlock (rq , & rf );
4787
-
4788
4832
/* resched to restore ticks and idle state */
4789
- resched_cpu (cpu );
4833
+ if (cpu_online (cpu ) || cpu == smp_processor_id ())
4834
+ resched_curr (rq );
4835
+
4836
+ raw_spin_rq_unlock (rq );
4790
4837
}
4791
4838
4792
4839
atomic_dec (& scx_ops_breather_depth );
0 commit comments