Skip to content

Commit dd5bdaf

Browse files
author
Ingo Molnar
committed
sched/debug: Make CONFIG_SCHED_DEBUG functionality unconditional
All the big Linux distros enable CONFIG_SCHED_DEBUG, because the various features it provides help not just with kernel development, but with system administration and user-space software development as well. Reflect this reality and enable this functionality unconditionally. Signed-off-by: Ingo Molnar <mingo@kernel.org> Tested-by: Shrikanth Hegde <sshegde@linux.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Vincent Guittot <vincent.guittot@linaro.org> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Ben Segall <bsegall@google.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Valentin Schneider <vschneid@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: https://lore.kernel.org/r/20250317104257.3496611-4-mingo@kernel.org
1 parent 57903f7 commit dd5bdaf

File tree

12 files changed

+9
-108
lines changed

12 files changed

+9
-108
lines changed

fs/proc/base.c

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1489,7 +1489,6 @@ static const struct file_operations proc_fail_nth_operations = {
14891489
#endif
14901490

14911491

1492-
#ifdef CONFIG_SCHED_DEBUG
14931492
/*
14941493
* Print out various scheduling related per-task fields:
14951494
*/
@@ -1539,8 +1538,6 @@ static const struct file_operations proc_pid_sched_operations = {
15391538
.release = single_release,
15401539
};
15411540

1542-
#endif
1543-
15441541
#ifdef CONFIG_SCHED_AUTOGROUP
15451542
/*
15461543
* Print out autogroup related information:
@@ -3331,9 +3328,7 @@ static const struct pid_entry tgid_base_stuff[] = {
33313328
ONE("status", S_IRUGO, proc_pid_status),
33323329
ONE("personality", S_IRUSR, proc_pid_personality),
33333330
ONE("limits", S_IRUGO, proc_pid_limits),
3334-
#ifdef CONFIG_SCHED_DEBUG
33353331
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
3336-
#endif
33373332
#ifdef CONFIG_SCHED_AUTOGROUP
33383333
REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
33393334
#endif
@@ -3682,9 +3677,7 @@ static const struct pid_entry tid_base_stuff[] = {
36823677
ONE("status", S_IRUGO, proc_pid_status),
36833678
ONE("personality", S_IRUSR, proc_pid_personality),
36843679
ONE("limits", S_IRUGO, proc_pid_limits),
3685-
#ifdef CONFIG_SCHED_DEBUG
36863680
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
3687-
#endif
36883681
NOD("comm", S_IFREG|S_IRUGO|S_IWUSR,
36893682
&proc_tid_comm_inode_operations,
36903683
&proc_pid_set_comm_operations, {}),

include/linux/energy_model.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -240,9 +240,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
240240
struct em_perf_state *ps;
241241
int i;
242242

243-
#ifdef CONFIG_SCHED_DEBUG
244243
WARN_ONCE(!rcu_read_lock_held(), "EM: rcu read lock needed\n");
245-
#endif
246244

247245
if (!sum_util)
248246
return 0;

include/linux/sched/debug.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,12 +35,10 @@ extern void show_stack(struct task_struct *task, unsigned long *sp,
3535

3636
extern void sched_show_task(struct task_struct *p);
3737

38-
#ifdef CONFIG_SCHED_DEBUG
3938
struct seq_file;
4039
extern void proc_sched_show_task(struct task_struct *p,
4140
struct pid_namespace *ns, struct seq_file *m);
4241
extern void proc_sched_set_task(struct task_struct *p);
43-
#endif
4442

4543
/* Attach to any functions which should be ignored in wchan output. */
4644
#define __sched __section(".sched.text")

include/linux/sched/topology.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,16 +25,12 @@ enum {
2525
};
2626
#undef SD_FLAG
2727

28-
#ifdef CONFIG_SCHED_DEBUG
29-
3028
struct sd_flag_debug {
3129
unsigned int meta_flags;
3230
char *name;
3331
};
3432
extern const struct sd_flag_debug sd_flag_debug[];
3533

36-
#endif
37-
3834
#ifdef CONFIG_SCHED_SMT
3935
static inline int cpu_smt_flags(void)
4036
{

include/trace/events/sched.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -193,9 +193,7 @@ static inline long __trace_sched_switch_state(bool preempt,
193193
{
194194
unsigned int state;
195195

196-
#ifdef CONFIG_SCHED_DEBUG
197196
BUG_ON(p != current);
198-
#endif /* CONFIG_SCHED_DEBUG */
199197

200198
/*
201199
* Preemption ignores task state, therefore preempted tasks are always

kernel/sched/build_utility.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -68,9 +68,7 @@
6868
# include "cpufreq_schedutil.c"
6969
#endif
7070

71-
#ifdef CONFIG_SCHED_DEBUG
72-
# include "debug.c"
73-
#endif
71+
#include "debug.c"
7472

7573
#ifdef CONFIG_SCHEDSTATS
7674
# include "stats.c"

kernel/sched/core.c

Lines changed: 3 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
118118

119119
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
120120

121-
#ifdef CONFIG_SCHED_DEBUG
122121
/*
123122
* Debugging: various feature bits
124123
*
@@ -142,7 +141,6 @@ __read_mostly unsigned int sysctl_sched_features =
142141
*/
143142
__read_mostly int sysctl_resched_latency_warn_ms = 100;
144143
__read_mostly int sysctl_resched_latency_warn_once = 1;
145-
#endif /* CONFIG_SCHED_DEBUG */
146144

147145
/*
148146
* Number of tasks to iterate in a single balance run.
@@ -799,11 +797,10 @@ void update_rq_clock(struct rq *rq)
799797
if (rq->clock_update_flags & RQCF_ACT_SKIP)
800798
return;
801799

802-
#ifdef CONFIG_SCHED_DEBUG
803800
if (sched_feat(WARN_DOUBLE_CLOCK))
804801
WARN_ON_ONCE(rq->clock_update_flags & RQCF_UPDATED);
805802
rq->clock_update_flags |= RQCF_UPDATED;
806-
#endif
803+
807804
clock = sched_clock_cpu(cpu_of(rq));
808805
scx_rq_clock_update(rq, clock);
809806

@@ -3291,7 +3288,6 @@ void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
32913288

32923289
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
32933290
{
3294-
#ifdef CONFIG_SCHED_DEBUG
32953291
unsigned int state = READ_ONCE(p->__state);
32963292

32973293
/*
@@ -3329,7 +3325,6 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
33293325
WARN_ON_ONCE(!cpu_online(new_cpu));
33303326

33313327
WARN_ON_ONCE(is_migration_disabled(p));
3332-
#endif
33333328

33343329
trace_sched_migrate_task(p, new_cpu);
33353330

@@ -5577,7 +5572,6 @@ unsigned long long task_sched_runtime(struct task_struct *p)
55775572
return ns;
55785573
}
55795574

5580-
#ifdef CONFIG_SCHED_DEBUG
55815575
static u64 cpu_resched_latency(struct rq *rq)
55825576
{
55835577
int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
@@ -5622,9 +5616,6 @@ static int __init setup_resched_latency_warn_ms(char *str)
56225616
return 1;
56235617
}
56245618
__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
5625-
#else
5626-
static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
5627-
#endif /* CONFIG_SCHED_DEBUG */
56285619

56295620
/*
56305621
* This function gets called by the timer code, with HZ frequency.
@@ -6718,9 +6709,7 @@ static void __sched notrace __schedule(int sched_mode)
67186709
picked:
67196710
clear_tsk_need_resched(prev);
67206711
clear_preempt_need_resched();
6721-
#ifdef CONFIG_SCHED_DEBUG
67226712
rq->last_seen_need_resched_ns = 0;
6723-
#endif
67246713

67256714
if (likely(prev != next)) {
67266715
rq->nr_switches++;
@@ -7094,7 +7083,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
70947083
int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
70957084
void *key)
70967085
{
7097-
WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
7086+
WARN_ON_ONCE(wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
70987087
return try_to_wake_up(curr->private, mode, wake_flags);
70997088
}
71007089
EXPORT_SYMBOL(default_wake_function);
@@ -7811,10 +7800,9 @@ void show_state_filter(unsigned int state_filter)
78117800
sched_show_task(p);
78127801
}
78137802

7814-
#ifdef CONFIG_SCHED_DEBUG
78157803
if (!state_filter)
78167804
sysrq_sched_debug_show();
7817-
#endif
7805+
78187806
rcu_read_unlock();
78197807
/*
78207808
* Only show locks if all tasks are dumped:

kernel/sched/deadline.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3574,9 +3574,7 @@ void dl_bw_free(int cpu, u64 dl_bw)
35743574
}
35753575
#endif
35763576

3577-
#ifdef CONFIG_SCHED_DEBUG
35783577
void print_dl_stats(struct seq_file *m, int cpu)
35793578
{
35803579
print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
35813580
}
3582-
#endif /* CONFIG_SCHED_DEBUG */

kernel/sched/fair.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -983,7 +983,6 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
983983
return best;
984984
}
985985

986-
#ifdef CONFIG_SCHED_DEBUG
987986
struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
988987
{
989988
struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
@@ -1010,7 +1009,6 @@ int sched_update_scaling(void)
10101009
return 0;
10111010
}
10121011
#endif
1013-
#endif
10141012

10151013
static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
10161014

@@ -13668,7 +13666,6 @@ DEFINE_SCHED_CLASS(fair) = {
1366813666
#endif
1366913667
};
1367013668

13671-
#ifdef CONFIG_SCHED_DEBUG
1367213669
void print_cfs_stats(struct seq_file *m, int cpu)
1367313670
{
1367413671
struct cfs_rq *cfs_rq, *pos;
@@ -13702,7 +13699,6 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
1370213699
rcu_read_unlock();
1370313700
}
1370413701
#endif /* CONFIG_NUMA_BALANCING */
13705-
#endif /* CONFIG_SCHED_DEBUG */
1370613702

1370713703
__init void init_sched_fair_class(void)
1370813704
{

kernel/sched/rt.c

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -169,9 +169,8 @@ static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
169169

170170
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
171171
{
172-
#ifdef CONFIG_SCHED_DEBUG
173172
WARN_ON_ONCE(!rt_entity_is_task(rt_se));
174-
#endif
173+
175174
return container_of(rt_se, struct task_struct, rt);
176175
}
177176

@@ -2969,7 +2968,6 @@ static int sched_rr_handler(const struct ctl_table *table, int write, void *buff
29692968
}
29702969
#endif /* CONFIG_SYSCTL */
29712970

2972-
#ifdef CONFIG_SCHED_DEBUG
29732971
void print_rt_stats(struct seq_file *m, int cpu)
29742972
{
29752973
rt_rq_iter_t iter;
@@ -2980,4 +2978,3 @@ void print_rt_stats(struct seq_file *m, int cpu)
29802978
print_rt_rq(m, cpu, rt_rq);
29812979
rcu_read_unlock();
29822980
}
2983-
#endif /* CONFIG_SCHED_DEBUG */

0 commit comments

Comments
 (0)