Skip to content

Commit 3a9910b

Browse files
Changwoo Minhtejun
authored andcommitted
sched_ext: Implement scx_bpf_now()
Returns a high-performance monotonically non-decreasing clock for the current CPU. The clock returned is in nanoseconds. It provides the following properties: 1) High performance: Many BPF schedulers call bpf_ktime_get_ns() frequently to account for execution time and track tasks' runtime properties. Unfortunately, in some hardware platforms, bpf_ktime_get_ns() -- which eventually reads a hardware timestamp counter -- is neither performant nor scalable. scx_bpf_now() aims to provide a high-performance clock by using the rq clock in the scheduler core whenever possible. 2) High enough resolution for the BPF scheduler use cases: In most BPF scheduler use cases, the required clock resolution is lower than the most accurate hardware clock (e.g., rdtsc in x86). scx_bpf_now() basically uses the rq clock in the scheduler core whenever it is valid. It considers that the rq clock is valid from the time the rq clock is updated (update_rq_clock) until the rq is unlocked (rq_unpin_lock). 3) Monotonically non-decreasing clock for the same CPU: scx_bpf_now() guarantees the clock never goes backward when comparing them in the same CPU. On the other hand, when comparing clocks in different CPUs, there is no such guarantee -- the clock can go backward. It provides a monotonically *non-decreasing* clock so that it would provide the same clock values in two different scx_bpf_now() calls in the same CPU during the same period of when the rq clock is valid. An rq clock becomes valid when it is updated using update_rq_clock() and invalidated when the rq is unlocked using rq_unpin_lock(). Let's suppose the following timeline in the scheduler core: T1. rq_lock(rq) T2. update_rq_clock(rq) T3. a sched_ext BPF operation T4. rq_unlock(rq) T5. a sched_ext BPF operation T6. rq_lock(rq) T7. update_rq_clock(rq) For [T2, T4), we consider that rq clock is valid (SCX_RQ_CLK_VALID is set), so scx_bpf_now() calls during [T2, T4) (including T3) will return the rq clock updated at T2. For duration [T4, T7), when a BPF scheduler can still call scx_bpf_now() (T5), we consider the rq clock is invalid (SCX_RQ_CLK_VALID is unset at T4). So when calling scx_bpf_now() at T5, we will return a fresh clock value by calling sched_clock_cpu() internally. Also, to prevent getting outdated rq clocks from a previous scx scheduler, invalidate all the rq clocks when unloading a BPF scheduler. One example of calling scx_bpf_now(), when the rq clock is invalid (like T5), is in scx_central [1]. The scx_central scheduler uses a BPF timer for preemptive scheduling. In every msec, the timer callback checks if the currently running tasks exceed their timeslice. At the beginning of the BPF timer callback (central_timerfn in scx_central.bpf.c), scx_central gets the current time. When the BPF timer callback runs, the rq clock could be invalid, the same as T5. In this case, scx_bpf_now() returns a fresh clock value rather than returning the old one (T2). [1] https://github.com/sched-ext/scx/blob/main/scheds/c/scx_central.bpf.c Signed-off-by: Changwoo Min <changwoo@igalia.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Andrea Righi <arighi@nvidia.com> Signed-off-by: Tejun Heo <tj@kernel.org>
1 parent ea9b262 commit 3a9910b

File tree

3 files changed

+101
-4
lines changed

3 files changed

+101
-4
lines changed

kernel/sched/core.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -789,6 +789,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
789789
void update_rq_clock(struct rq *rq)
790790
{
791791
s64 delta;
792+
u64 clock;
792793

793794
lockdep_assert_rq_held(rq);
794795

@@ -800,11 +801,14 @@ void update_rq_clock(struct rq *rq)
800801
SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
801802
rq->clock_update_flags |= RQCF_UPDATED;
802803
#endif
804+
clock = sched_clock_cpu(cpu_of(rq));
805+
scx_rq_clock_update(rq, clock);
803806

804-
delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
807+
delta = clock - rq->clock;
805808
if (delta < 0)
806809
return;
807810
rq->clock += delta;
811+
808812
update_rq_clock_task(rq, delta);
809813
}
810814

kernel/sched/ext.c

Lines changed: 73 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4911,7 +4911,7 @@ static void scx_ops_disable_workfn(struct kthread_work *work)
49114911
struct task_struct *p;
49124912
struct rhashtable_iter rht_iter;
49134913
struct scx_dispatch_q *dsq;
4914-
int i, kind;
4914+
int i, kind, cpu;
49154915

49164916
kind = atomic_read(&scx_exit_kind);
49174917
while (true) {
@@ -4994,6 +4994,15 @@ static void scx_ops_disable_workfn(struct kthread_work *work)
49944994
scx_task_iter_stop(&sti);
49954995
percpu_up_write(&scx_fork_rwsem);
49964996

4997+
/*
4998+
* Invalidate all the rq clocks to prevent getting outdated
4999+
* rq clocks from a previous scx scheduler.
5000+
*/
5001+
for_each_possible_cpu(cpu) {
5002+
struct rq *rq = cpu_rq(cpu);
5003+
scx_rq_clock_invalidate(rq);
5004+
}
5005+
49975006
/* no task is on scx, turn off all the switches and flush in-progress calls */
49985007
static_branch_disable(&__scx_ops_enabled);
49995008
for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++)
@@ -7599,6 +7608,68 @@ __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p)
75997608
}
76007609
#endif
76017610

7611+
/**
7612+
* scx_bpf_now - Returns a high-performance monotonically non-decreasing
7613+
* clock for the current CPU. The clock returned is in nanoseconds.
7614+
*
7615+
* It provides the following properties:
7616+
*
7617+
* 1) High performance: Many BPF schedulers call bpf_ktime_get_ns() frequently
7618+
* to account for execution time and track tasks' runtime properties.
7619+
* Unfortunately, in some hardware platforms, bpf_ktime_get_ns() -- which
7620+
* eventually reads a hardware timestamp counter -- is neither performant nor
7621+
* scalable. scx_bpf_now() aims to provide a high-performance clock by
7622+
* using the rq clock in the scheduler core whenever possible.
7623+
*
7624+
* 2) High enough resolution for the BPF scheduler use cases: In most BPF
7625+
* scheduler use cases, the required clock resolution is lower than the most
7626+
* accurate hardware clock (e.g., rdtsc in x86). scx_bpf_now() basically
7627+
* uses the rq clock in the scheduler core whenever it is valid. It considers
7628+
* that the rq clock is valid from the time the rq clock is updated
7629+
* (update_rq_clock) until the rq is unlocked (rq_unpin_lock).
7630+
*
7631+
* 3) Monotonically non-decreasing clock for the same CPU: scx_bpf_now()
7632+
* guarantees the clock never goes backward when comparing them in the same
7633+
* CPU. On the other hand, when comparing clocks in different CPUs, there
7634+
* is no such guarantee -- the clock can go backward. It provides a
7635+
* monotonically *non-decreasing* clock so that it would provide the same
7636+
* clock values in two different scx_bpf_now() calls in the same CPU
7637+
* during the same period of when the rq clock is valid.
7638+
*/
7639+
__bpf_kfunc u64 scx_bpf_now(void)
7640+
{
7641+
struct rq *rq;
7642+
u64 clock;
7643+
7644+
preempt_disable();
7645+
7646+
rq = this_rq();
7647+
if (smp_load_acquire(&rq->scx.flags) & SCX_RQ_CLK_VALID) {
7648+
/*
7649+
* If the rq clock is valid, use the cached rq clock.
7650+
*
7651+
* Note that scx_bpf_now() is re-entrant between a process
7652+
* context and an interrupt context (e.g., timer interrupt).
7653+
* However, we don't need to consider the race between them
7654+
* because such race is not observable from a caller.
7655+
*/
7656+
clock = READ_ONCE(rq->scx.clock);
7657+
} else {
7658+
/*
7659+
* Otherwise, return a fresh rq clock.
7660+
*
7661+
* The rq clock is updated outside of the rq lock.
7662+
* In this case, keep the updated rq clock invalid so the next
7663+
* kfunc call outside the rq lock gets a fresh rq clock.
7664+
*/
7665+
clock = sched_clock_cpu(cpu_of(rq));
7666+
}
7667+
7668+
preempt_enable();
7669+
7670+
return clock;
7671+
}
7672+
76027673
__bpf_kfunc_end_defs();
76037674

76047675
BTF_KFUNCS_START(scx_kfunc_ids_any)
@@ -7630,6 +7701,7 @@ BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
76307701
#ifdef CONFIG_CGROUP_SCHED
76317702
BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
76327703
#endif
7704+
BTF_ID_FLAGS(func, scx_bpf_now)
76337705
BTF_KFUNCS_END(scx_kfunc_ids_any)
76347706

76357707
static const struct btf_kfunc_id_set scx_kfunc_set_any = {

kernel/sched/sched.h

Lines changed: 23 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -754,6 +754,7 @@ enum scx_rq_flags {
754754
SCX_RQ_BAL_PENDING = 1 << 2, /* balance hasn't run yet */
755755
SCX_RQ_BAL_KEEP = 1 << 3, /* balance decided to keep current */
756756
SCX_RQ_BYPASSING = 1 << 4,
757+
SCX_RQ_CLK_VALID = 1 << 5, /* RQ clock is fresh and valid */
757758

758759
SCX_RQ_IN_WAKEUP = 1 << 16,
759760
SCX_RQ_IN_BALANCE = 1 << 17,
@@ -766,9 +767,10 @@ struct scx_rq {
766767
unsigned long ops_qseq;
767768
u64 extra_enq_flags; /* see move_task_to_local_dsq() */
768769
u32 nr_running;
769-
u32 flags;
770770
u32 cpuperf_target; /* [0, SCHED_CAPACITY_SCALE] */
771771
bool cpu_released;
772+
u32 flags;
773+
u64 clock; /* current per-rq clock -- see scx_bpf_now() */
772774
cpumask_var_t cpus_to_kick;
773775
cpumask_var_t cpus_to_kick_if_idle;
774776
cpumask_var_t cpus_to_preempt;
@@ -1725,9 +1727,28 @@ DECLARE_STATIC_KEY_FALSE(__scx_switched_all); /* all fair class tasks on SCX */
17251727

17261728
#define scx_enabled() static_branch_unlikely(&__scx_ops_enabled)
17271729
#define scx_switched_all() static_branch_unlikely(&__scx_switched_all)
1730+
1731+
static inline void scx_rq_clock_update(struct rq *rq, u64 clock)
1732+
{
1733+
if (!scx_enabled())
1734+
return;
1735+
WRITE_ONCE(rq->scx.clock, clock);
1736+
smp_store_release(&rq->scx.flags, rq->scx.flags | SCX_RQ_CLK_VALID);
1737+
}
1738+
1739+
static inline void scx_rq_clock_invalidate(struct rq *rq)
1740+
{
1741+
if (!scx_enabled())
1742+
return;
1743+
WRITE_ONCE(rq->scx.flags, rq->scx.flags & ~SCX_RQ_CLK_VALID);
1744+
}
1745+
17281746
#else /* !CONFIG_SCHED_CLASS_EXT */
17291747
#define scx_enabled() false
17301748
#define scx_switched_all() false
1749+
1750+
static inline void scx_rq_clock_update(struct rq *rq, u64 clock) {}
1751+
static inline void scx_rq_clock_invalidate(struct rq *rq) {}
17311752
#endif /* !CONFIG_SCHED_CLASS_EXT */
17321753

17331754
/*
@@ -1759,7 +1780,7 @@ static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
17591780
if (rq->clock_update_flags > RQCF_ACT_SKIP)
17601781
rf->clock_update_flags = RQCF_UPDATED;
17611782
#endif
1762-
1783+
scx_rq_clock_invalidate(rq);
17631784
lockdep_unpin_lock(__rq_lockp(rq), rf->cookie);
17641785
}
17651786

0 commit comments

Comments
 (0)