Skip to content

Commit b41bbb3

Browse files
author
Ingo Molnar
committed
Merge branch 'sched/eevdf' into sched/core
Pick up the EEVDF work into the main branch - it's looking good so far. Conflicts: kernel/sched/features.h Signed-off-by: Ingo Molnar <mingo@kernel.org>
2 parents 88c56cf + d07f09a commit b41bbb3

File tree

7 files changed

+563
-649
lines changed

7 files changed

+563
-649
lines changed

include/linux/rbtree_augmented.h

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,32 @@ rb_insert_augmented_cached(struct rb_node *node,
6060
rb_insert_augmented(node, &root->rb_root, augment);
6161
}
6262

63+
static __always_inline struct rb_node *
64+
rb_add_augmented_cached(struct rb_node *node, struct rb_root_cached *tree,
65+
bool (*less)(struct rb_node *, const struct rb_node *),
66+
const struct rb_augment_callbacks *augment)
67+
{
68+
struct rb_node **link = &tree->rb_root.rb_node;
69+
struct rb_node *parent = NULL;
70+
bool leftmost = true;
71+
72+
while (*link) {
73+
parent = *link;
74+
if (less(node, parent)) {
75+
link = &parent->rb_left;
76+
} else {
77+
link = &parent->rb_right;
78+
leftmost = false;
79+
}
80+
}
81+
82+
rb_link_node(node, parent, link);
83+
augment->propagate(parent, NULL); /* suboptimal */
84+
rb_insert_augmented_cached(node, tree, leftmost, augment);
85+
86+
return leftmost ? node : NULL;
87+
}
88+
6389
/*
6490
* Template for declaring augmented rbtree callbacks (generic case)
6591
*

include/linux/sched.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -549,13 +549,18 @@ struct sched_entity {
549549
/* For load-balancing: */
550550
struct load_weight load;
551551
struct rb_node run_node;
552+
u64 deadline;
553+
u64 min_deadline;
554+
552555
struct list_head group_node;
553556
unsigned int on_rq;
554557

555558
u64 exec_start;
556559
u64 sum_exec_runtime;
557-
u64 vruntime;
558560
u64 prev_sum_exec_runtime;
561+
u64 vruntime;
562+
s64 vlag;
563+
u64 slice;
559564

560565
u64 nr_migrations;
561566

kernel/sched/core.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4527,6 +4527,8 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
45274527
p->se.prev_sum_exec_runtime = 0;
45284528
p->se.nr_migrations = 0;
45294529
p->se.vruntime = 0;
4530+
p->se.vlag = 0;
4531+
p->se.slice = sysctl_sched_base_slice;
45304532
INIT_LIST_HEAD(&p->se.group_node);
45314533

45324534
#ifdef CONFIG_FAIR_GROUP_SCHED

kernel/sched/debug.c

Lines changed: 22 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -347,10 +347,7 @@ static __init int sched_init_debug(void)
347347
debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
348348
#endif
349349

350-
debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
351-
debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
352-
debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity);
353-
debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
350+
debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice);
354351

355352
debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
356353
debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
@@ -582,9 +579,13 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
582579
else
583580
SEQ_printf(m, " %c", task_state_to_char(p));
584581

585-
SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
582+
SEQ_printf(m, "%15s %5d %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld.%06ld %9Ld %5d ",
586583
p->comm, task_pid_nr(p),
587584
SPLIT_NS(p->se.vruntime),
585+
entity_eligible(cfs_rq_of(&p->se), &p->se) ? 'E' : 'N',
586+
SPLIT_NS(p->se.deadline),
587+
SPLIT_NS(p->se.slice),
588+
SPLIT_NS(p->se.sum_exec_runtime),
588589
(long long)(p->nvcsw + p->nivcsw),
589590
p->prio);
590591

@@ -627,10 +628,9 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
627628

628629
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
629630
{
630-
s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
631-
spread, rq0_min_vruntime, spread0;
631+
s64 left_vruntime = -1, min_vruntime, right_vruntime = -1, spread;
632+
struct sched_entity *last, *first;
632633
struct rq *rq = cpu_rq(cpu);
633-
struct sched_entity *last;
634634
unsigned long flags;
635635

636636
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -644,26 +644,25 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
644644
SPLIT_NS(cfs_rq->exec_clock));
645645

646646
raw_spin_rq_lock_irqsave(rq, flags);
647-
if (rb_first_cached(&cfs_rq->tasks_timeline))
648-
MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
647+
first = __pick_first_entity(cfs_rq);
648+
if (first)
649+
left_vruntime = first->vruntime;
649650
last = __pick_last_entity(cfs_rq);
650651
if (last)
651-
max_vruntime = last->vruntime;
652+
right_vruntime = last->vruntime;
652653
min_vruntime = cfs_rq->min_vruntime;
653-
rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
654654
raw_spin_rq_unlock_irqrestore(rq, flags);
655-
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
656-
SPLIT_NS(MIN_vruntime));
655+
656+
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "left_vruntime",
657+
SPLIT_NS(left_vruntime));
657658
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
658659
SPLIT_NS(min_vruntime));
659-
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
660-
SPLIT_NS(max_vruntime));
661-
spread = max_vruntime - MIN_vruntime;
662-
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
663-
SPLIT_NS(spread));
664-
spread0 = min_vruntime - rq0_min_vruntime;
665-
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
666-
SPLIT_NS(spread0));
660+
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "avg_vruntime",
661+
SPLIT_NS(avg_vruntime(cfs_rq)));
662+
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "right_vruntime",
663+
SPLIT_NS(right_vruntime));
664+
spread = right_vruntime - left_vruntime;
665+
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
667666
SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
668667
cfs_rq->nr_spread_over);
669668
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
@@ -864,10 +863,7 @@ static void sched_debug_header(struct seq_file *m)
864863
SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
865864
#define PN(x) \
866865
SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
867-
PN(sysctl_sched_latency);
868-
PN(sysctl_sched_min_granularity);
869-
PN(sysctl_sched_idle_min_granularity);
870-
PN(sysctl_sched_wakeup_granularity);
866+
PN(sysctl_sched_base_slice);
871867
P(sysctl_sched_child_runs_first);
872868
P(sysctl_sched_features);
873869
#undef PN

0 commit comments

Comments
 (0)