Skip to content

Commit 62addc6

Browse files
Changwoo Minhtejun
authored andcommitted
sched_ext: Use time helpers in BPF schedulers
Modify the BPF schedulers to use time helpers defined in common.bpf.h Signed-off-by: Changwoo Min <changwoo@igalia.com> Acked-by: Andrea Righi <arighi@nvidia.com> Signed-off-by: Tejun Heo <tj@kernel.org>
1 parent 0f130bc commit 62addc6

File tree

3 files changed

+11
-26
lines changed

3 files changed

+11
-26
lines changed

tools/sched_ext/scx_central.bpf.c

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -87,11 +87,6 @@ struct {
8787
__type(value, struct central_timer);
8888
} central_timer SEC(".maps");
8989

90-
static bool vtime_before(u64 a, u64 b)
91-
{
92-
return (s64)(a - b) < 0;
93-
}
94-
9590
s32 BPF_STRUCT_OPS(central_select_cpu, struct task_struct *p,
9691
s32 prev_cpu, u64 wake_flags)
9792
{
@@ -279,7 +274,7 @@ static int central_timerfn(void *map, int *key, struct bpf_timer *timer)
279274
/* kick iff the current one exhausted its slice */
280275
started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
281276
if (started_at && *started_at &&
282-
vtime_before(now, *started_at + slice_ns))
277+
time_before(now, *started_at + slice_ns))
283278
continue;
284279

285280
/* and there's something pending */

tools/sched_ext/scx_flatcg.bpf.c

Lines changed: 8 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -137,11 +137,6 @@ static u64 div_round_up(u64 dividend, u64 divisor)
137137
return (dividend + divisor - 1) / divisor;
138138
}
139139

140-
static bool vtime_before(u64 a, u64 b)
141-
{
142-
return (s64)(a - b) < 0;
143-
}
144-
145140
static bool cgv_node_less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
146141
{
147142
struct cgv_node *cgc_a, *cgc_b;
@@ -271,7 +266,7 @@ static void cgrp_cap_budget(struct cgv_node *cgv_node, struct fcg_cgrp_ctx *cgc)
271266
*/
272267
max_budget = (cgrp_slice_ns * nr_cpus * cgc->hweight) /
273268
(2 * FCG_HWEIGHT_ONE);
274-
if (vtime_before(cvtime, cvtime_now - max_budget))
269+
if (time_before(cvtime, cvtime_now - max_budget))
275270
cvtime = cvtime_now - max_budget;
276271

277272
cgv_node->cvtime = cvtime;
@@ -401,7 +396,7 @@ void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags)
401396
* Limit the amount of budget that an idling task can accumulate
402397
* to one slice.
403398
*/
404-
if (vtime_before(tvtime, cgc->tvtime_now - SCX_SLICE_DFL))
399+
if (time_before(tvtime, cgc->tvtime_now - SCX_SLICE_DFL))
405400
tvtime = cgc->tvtime_now - SCX_SLICE_DFL;
406401

407402
scx_bpf_dsq_insert_vtime(p, cgrp->kn->id, SCX_SLICE_DFL,
@@ -535,7 +530,7 @@ void BPF_STRUCT_OPS(fcg_running, struct task_struct *p)
535530
* from multiple CPUs and thus racy. Any error should be
536531
* contained and temporary. Let's just live with it.
537532
*/
538-
if (vtime_before(cgc->tvtime_now, p->scx.dsq_vtime))
533+
if (time_before(cgc->tvtime_now, p->scx.dsq_vtime))
539534
cgc->tvtime_now = p->scx.dsq_vtime;
540535
}
541536
bpf_cgroup_release(cgrp);
@@ -645,7 +640,7 @@ static bool try_pick_next_cgroup(u64 *cgidp)
645640
cgv_node = container_of(rb_node, struct cgv_node, rb_node);
646641
cgid = cgv_node->cgid;
647642

648-
if (vtime_before(cvtime_now, cgv_node->cvtime))
643+
if (time_before(cvtime_now, cgv_node->cvtime))
649644
cvtime_now = cgv_node->cvtime;
650645

651646
/*
@@ -744,7 +739,7 @@ void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev)
744739
if (!cpuc->cur_cgid)
745740
goto pick_next_cgroup;
746741

747-
if (vtime_before(now, cpuc->cur_at + cgrp_slice_ns)) {
742+
if (time_before(now, cpuc->cur_at + cgrp_slice_ns)) {
748743
if (scx_bpf_dsq_move_to_local(cpuc->cur_cgid)) {
749744
stat_inc(FCG_STAT_CNS_KEEP);
750745
return;
@@ -920,14 +915,14 @@ void BPF_STRUCT_OPS(fcg_cgroup_move, struct task_struct *p,
920915
struct cgroup *from, struct cgroup *to)
921916
{
922917
struct fcg_cgrp_ctx *from_cgc, *to_cgc;
923-
s64 vtime_delta;
918+
s64 delta;
924919

925920
/* find_cgrp_ctx() triggers scx_ops_error() on lookup failures */
926921
if (!(from_cgc = find_cgrp_ctx(from)) || !(to_cgc = find_cgrp_ctx(to)))
927922
return;
928923

929-
vtime_delta = p->scx.dsq_vtime - from_cgc->tvtime_now;
930-
p->scx.dsq_vtime = to_cgc->tvtime_now + vtime_delta;
924+
delta = time_delta(p->scx.dsq_vtime, from_cgc->tvtime_now);
925+
p->scx.dsq_vtime = to_cgc->tvtime_now + delta;
931926
}
932927

933928
s32 BPF_STRUCT_OPS_SLEEPABLE(fcg_init)

tools/sched_ext/scx_simple.bpf.c

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -52,11 +52,6 @@ static void stat_inc(u32 idx)
5252
(*cnt_p)++;
5353
}
5454

55-
static inline bool vtime_before(u64 a, u64 b)
56-
{
57-
return (s64)(a - b) < 0;
58-
}
59-
6055
s32 BPF_STRUCT_OPS(simple_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags)
6156
{
6257
bool is_idle = false;
@@ -84,7 +79,7 @@ void BPF_STRUCT_OPS(simple_enqueue, struct task_struct *p, u64 enq_flags)
8479
* Limit the amount of budget that an idling task can accumulate
8580
* to one slice.
8681
*/
87-
if (vtime_before(vtime, vtime_now - SCX_SLICE_DFL))
82+
if (time_before(vtime, vtime_now - SCX_SLICE_DFL))
8883
vtime = vtime_now - SCX_SLICE_DFL;
8984

9085
scx_bpf_dsq_insert_vtime(p, SHARED_DSQ, SCX_SLICE_DFL, vtime,
@@ -108,7 +103,7 @@ void BPF_STRUCT_OPS(simple_running, struct task_struct *p)
108103
* thus racy. Any error should be contained and temporary. Let's just
109104
* live with it.
110105
*/
111-
if (vtime_before(vtime_now, p->scx.dsq_vtime))
106+
if (time_before(vtime_now, p->scx.dsq_vtime))
112107
vtime_now = p->scx.dsq_vtime;
113108
}
114109

0 commit comments

Comments
 (0)