Skip to content

Commit a478ffb

Browse files
Frederic WeisbeckerKAGA-KOKO
authored andcommitted
tick: Move individual bit features to debuggable mask accesses
The individual bitfields of struct tick_sched must be modified from IRQs disabled places, otherwise local modifications can race due to them sharing the same memory storage. The recent move of the "got_idle_tick" bitfield to its own storage shows that the use of these bitfields, as pretty as they look, can be as much error prone. In order to avoid future issues of the like and make sure that those bitfields are safely accessed, move those flags to an explicit mask along with a mutator function performing the basic IRQs disabled sanity check. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/r/20240225225508.11587-13-frederic@kernel.org
1 parent 3ce74f1 commit a478ffb

File tree

3 files changed

+73
-43
lines changed

3 files changed

+73
-43
lines changed

kernel/time/tick-sched.c

Lines changed: 55 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -180,6 +180,26 @@ static ktime_t tick_init_jiffy_update(void)
180180
return period;
181181
}
182182

183+
static inline int tick_sched_flag_test(struct tick_sched *ts,
184+
unsigned long flag)
185+
{
186+
return !!(ts->flags & flag);
187+
}
188+
189+
static inline void tick_sched_flag_set(struct tick_sched *ts,
190+
unsigned long flag)
191+
{
192+
lockdep_assert_irqs_disabled();
193+
ts->flags |= flag;
194+
}
195+
196+
static inline void tick_sched_flag_clear(struct tick_sched *ts,
197+
unsigned long flag)
198+
{
199+
lockdep_assert_irqs_disabled();
200+
ts->flags &= ~flag;
201+
}
202+
183203
#define MAX_STALLED_JIFFIES 5
184204

185205
static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
@@ -223,7 +243,7 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
223243
}
224244
}
225245

226-
if (ts->inidle)
246+
if (tick_sched_flag_test(ts, TS_FLAG_INIDLE))
227247
ts->got_idle_tick = 1;
228248
}
229249

@@ -237,7 +257,8 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
237257
* idle" jiffy stamp so the idle accounting adjustment we do
238258
* when we go busy again does not account too many ticks.
239259
*/
240-
if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && ts->tick_stopped) {
260+
if (IS_ENABLED(CONFIG_NO_HZ_COMMON) &&
261+
tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
241262
touch_softlockup_watchdog_sched();
242263
if (is_idle_task(current))
243264
ts->idle_jiffies++;
@@ -279,7 +300,7 @@ static enum hrtimer_restart tick_nohz_handler(struct hrtimer *timer)
279300
* - to the idle task if in dynticks-idle
280301
* - to IRQ exit if in full-dynticks.
281302
*/
282-
if (unlikely(ts->tick_stopped))
303+
if (unlikely(tick_sched_flag_test(ts, TS_FLAG_STOPPED)))
283304
return HRTIMER_NORESTART;
284305

285306
hrtimer_forward(timer, now, TICK_NSEC);
@@ -559,7 +580,7 @@ void __tick_nohz_task_switch(void)
559580

560581
ts = this_cpu_ptr(&tick_cpu_sched);
561582

562-
if (ts->tick_stopped) {
583+
if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
563584
if (atomic_read(&current->tick_dep_mask) ||
564585
atomic_read(&current->signal->tick_dep_mask))
565586
tick_nohz_full_kick();
@@ -656,14 +677,14 @@ bool tick_nohz_tick_stopped(void)
656677
{
657678
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
658679

659-
return ts->tick_stopped;
680+
return tick_sched_flag_test(ts, TS_FLAG_STOPPED);
660681
}
661682

662683
bool tick_nohz_tick_stopped_cpu(int cpu)
663684
{
664685
struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
665686

666-
return ts->tick_stopped;
687+
return tick_sched_flag_test(ts, TS_FLAG_STOPPED);
667688
}
668689

669690
/**
@@ -693,7 +714,7 @@ static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
693714
{
694715
ktime_t delta;
695716

696-
if (WARN_ON_ONCE(!ts->idle_active))
717+
if (WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE)))
697718
return;
698719

699720
delta = ktime_sub(now, ts->idle_entrytime);
@@ -705,7 +726,7 @@ static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
705726
ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
706727

707728
ts->idle_entrytime = now;
708-
ts->idle_active = 0;
729+
tick_sched_flag_clear(ts, TS_FLAG_IDLE_ACTIVE);
709730
write_seqcount_end(&ts->idle_sleeptime_seq);
710731

711732
sched_clock_idle_wakeup_event();
@@ -715,7 +736,7 @@ static void tick_nohz_start_idle(struct tick_sched *ts)
715736
{
716737
write_seqcount_begin(&ts->idle_sleeptime_seq);
717738
ts->idle_entrytime = ktime_get();
718-
ts->idle_active = 1;
739+
tick_sched_flag_set(ts, TS_FLAG_IDLE_ACTIVE);
719740
write_seqcount_end(&ts->idle_sleeptime_seq);
720741

721742
sched_clock_idle_sleep_event();
@@ -737,7 +758,7 @@ static u64 get_cpu_sleep_time_us(struct tick_sched *ts, ktime_t *sleeptime,
737758
do {
738759
seq = read_seqcount_begin(&ts->idle_sleeptime_seq);
739760

740-
if (ts->idle_active && compute_delta) {
761+
if (tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE) && compute_delta) {
741762
ktime_t delta = ktime_sub(now, ts->idle_entrytime);
742763

743764
idle = ktime_add(*sleeptime, delta);
@@ -905,7 +926,7 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
905926
* We've not stopped the tick yet, and there's a timer in the
906927
* next period, so no point in stopping it either, bail.
907928
*/
908-
if (!ts->tick_stopped) {
929+
if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
909930
ts->timer_expires = 0;
910931
goto out;
911932
}
@@ -918,7 +939,8 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
918939
*/
919940
delta = timekeeping_max_deferment();
920941
if (cpu != tick_do_timer_cpu &&
921-
(tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last))
942+
(tick_do_timer_cpu != TICK_DO_TIMER_NONE ||
943+
!tick_sched_flag_test(ts, TS_FLAG_DO_TIMER_LAST)))
922944
delta = KTIME_MAX;
923945

924946
/* Calculate the next expiry time */
@@ -938,7 +960,7 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
938960
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
939961
unsigned long basejiff = ts->last_jiffies;
940962
u64 basemono = ts->timer_expires_base;
941-
bool timer_idle = ts->tick_stopped;
963+
bool timer_idle = tick_sched_flag_test(ts, TS_FLAG_STOPPED);
942964
u64 expires;
943965

944966
/* Make sure we won't be trying to stop it twice in a row. */
@@ -978,13 +1000,13 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
9781000
*/
9791001
if (cpu == tick_do_timer_cpu) {
9801002
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
981-
ts->do_timer_last = 1;
1003+
tick_sched_flag_set(ts, TS_FLAG_DO_TIMER_LAST);
9821004
} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
983-
ts->do_timer_last = 0;
1005+
tick_sched_flag_clear(ts, TS_FLAG_DO_TIMER_LAST);
9841006
}
9851007

9861008
/* Skip reprogram of event if it's not changed */
987-
if (ts->tick_stopped && (expires == ts->next_tick)) {
1009+
if (tick_sched_flag_test(ts, TS_FLAG_STOPPED) && (expires == ts->next_tick)) {
9881010
/* Sanity check: make sure clockevent is actually programmed */
9891011
if (expires == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer))
9901012
return;
@@ -1002,12 +1024,12 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
10021024
* call we save the current tick time, so we can restart the
10031025
* scheduler tick in tick_nohz_restart_sched_tick().
10041026
*/
1005-
if (!ts->tick_stopped) {
1027+
if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
10061028
calc_load_nohz_start();
10071029
quiet_vmstat();
10081030

10091031
ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
1010-
ts->tick_stopped = 1;
1032+
tick_sched_flag_set(ts, TS_FLAG_STOPPED);
10111033
trace_tick_stop(1, TICK_DEP_MASK_NONE);
10121034
}
10131035

@@ -1064,7 +1086,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
10641086
touch_softlockup_watchdog_sched();
10651087

10661088
/* Cancel the scheduled timer and restore the tick: */
1067-
ts->tick_stopped = 0;
1089+
tick_sched_flag_clear(ts, TS_FLAG_STOPPED);
10681090
tick_nohz_restart(ts, now);
10691091
}
10701092

@@ -1076,7 +1098,7 @@ static void __tick_nohz_full_update_tick(struct tick_sched *ts,
10761098

10771099
if (can_stop_full_tick(cpu, ts))
10781100
tick_nohz_full_stop_tick(ts, cpu);
1079-
else if (ts->tick_stopped)
1101+
else if (tick_sched_flag_test(ts, TS_FLAG_STOPPED))
10801102
tick_nohz_restart_sched_tick(ts, now);
10811103
#endif
10821104
}
@@ -1196,14 +1218,14 @@ void tick_nohz_idle_stop_tick(void)
11961218
ts->idle_calls++;
11971219

11981220
if (expires > 0LL) {
1199-
int was_stopped = ts->tick_stopped;
1221+
int was_stopped = tick_sched_flag_test(ts, TS_FLAG_STOPPED);
12001222

12011223
tick_nohz_stop_tick(ts, cpu);
12021224

12031225
ts->idle_sleeps++;
12041226
ts->idle_expires = expires;
12051227

1206-
if (!was_stopped && ts->tick_stopped) {
1228+
if (!was_stopped && tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
12071229
ts->idle_jiffies = ts->last_jiffies;
12081230
nohz_balance_enter_idle(cpu);
12091231
}
@@ -1234,7 +1256,7 @@ void tick_nohz_idle_enter(void)
12341256

12351257
WARN_ON_ONCE(ts->timer_expires_base);
12361258

1237-
ts->inidle = 1;
1259+
tick_sched_flag_set(ts, TS_FLAG_INIDLE);
12381260
tick_nohz_start_idle(ts);
12391261

12401262
local_irq_enable();
@@ -1263,7 +1285,7 @@ void tick_nohz_irq_exit(void)
12631285
{
12641286
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
12651287

1266-
if (ts->inidle)
1288+
if (tick_sched_flag_test(ts, TS_FLAG_INIDLE))
12671289
tick_nohz_start_idle(ts);
12681290
else
12691291
tick_nohz_full_update_tick(ts);
@@ -1317,7 +1339,7 @@ ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
13171339
ktime_t now = ts->idle_entrytime;
13181340
ktime_t next_event;
13191341

1320-
WARN_ON_ONCE(!ts->inidle);
1342+
WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_INIDLE));
13211343

13221344
*delta_next = ktime_sub(dev->next_event, now);
13231345

@@ -1389,7 +1411,7 @@ void tick_nohz_idle_restart_tick(void)
13891411
{
13901412
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
13911413

1392-
if (ts->tick_stopped) {
1414+
if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
13931415
ktime_t now = ktime_get();
13941416
tick_nohz_restart_sched_tick(ts, now);
13951417
tick_nohz_account_idle_time(ts, now);
@@ -1430,12 +1452,12 @@ void tick_nohz_idle_exit(void)
14301452

14311453
local_irq_disable();
14321454

1433-
WARN_ON_ONCE(!ts->inidle);
1455+
WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_INIDLE));
14341456
WARN_ON_ONCE(ts->timer_expires_base);
14351457

1436-
ts->inidle = 0;
1437-
idle_active = ts->idle_active;
1438-
tick_stopped = ts->tick_stopped;
1458+
tick_sched_flag_clear(ts, TS_FLAG_INIDLE);
1459+
idle_active = tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE);
1460+
tick_stopped = tick_sched_flag_test(ts, TS_FLAG_STOPPED);
14391461

14401462
if (idle_active || tick_stopped)
14411463
now = ktime_get();
@@ -1498,10 +1520,10 @@ static inline void tick_nohz_irq_enter(void)
14981520
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
14991521
ktime_t now;
15001522

1501-
if (!ts->idle_active && !ts->tick_stopped)
1523+
if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED | TS_FLAG_IDLE_ACTIVE))
15021524
return;
15031525
now = ktime_get();
1504-
if (ts->idle_active)
1526+
if (tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE))
15051527
tick_nohz_stop_idle(ts, now);
15061528
/*
15071529
* If all CPUs are idle we may need to update a stale jiffies value.
@@ -1510,7 +1532,7 @@ static inline void tick_nohz_irq_enter(void)
15101532
* rare case (typically stop machine). So we must make sure we have a
15111533
* last resort.
15121534
*/
1513-
if (ts->tick_stopped)
1535+
if (tick_sched_flag_test(ts, TS_FLAG_STOPPED))
15141536
tick_nohz_update_jiffies(now);
15151537
}
15161538

kernel/time/tick-sched.h

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -20,14 +20,22 @@ enum tick_nohz_mode {
2020
NOHZ_MODE_HIGHRES,
2121
};
2222

23+
/* The CPU is in the tick idle mode */
24+
#define TS_FLAG_INIDLE BIT(0)
25+
/* The idle tick has been stopped */
26+
#define TS_FLAG_STOPPED BIT(1)
27+
/*
28+
* Indicator that the CPU is actively in the tick idle mode;
29+
* it is reset during irq handling phases.
30+
*/
31+
#define TS_FLAG_IDLE_ACTIVE BIT(2)
32+
/* CPU was the last one doing do_timer before going idle */
33+
#define TS_FLAG_DO_TIMER_LAST BIT(3)
34+
2335
/**
2436
* struct tick_sched - sched tick emulation and no idle tick control/stats
2537
*
26-
* @inidle: Indicator that the CPU is in the tick idle mode
27-
* @tick_stopped: Indicator that the idle tick has been stopped
28-
* @idle_active: Indicator that the CPU is actively in the tick idle mode;
29-
* it is reset during irq handling phases.
30-
* @do_timer_last: CPU was the last one doing do_timer before going idle
38+
* @flags: State flags gathering the TS_FLAG_* features
3139
* @got_idle_tick: Tick timer function has run with @inidle set
3240
* @stalled_jiffies: Number of stalled jiffies detected across ticks
3341
* @last_tick_jiffies: Value of jiffies seen on last tick
@@ -57,10 +65,7 @@ enum tick_nohz_mode {
5765
*/
5866
struct tick_sched {
5967
/* Common flags */
60-
unsigned int inidle : 1;
61-
unsigned int tick_stopped : 1;
62-
unsigned int idle_active : 1;
63-
unsigned int do_timer_last : 1;
68+
unsigned long flags;
6469

6570
/* Tick handling: jiffies stall check */
6671
unsigned int stalled_jiffies;

kernel/time/timer_list.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -147,11 +147,14 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
147147
# define P_ns(x) \
148148
SEQ_printf(m, " .%-15s: %Lu nsecs\n", #x, \
149149
(unsigned long long)(ktime_to_ns(ts->x)))
150+
# define P_flag(x, f) \
151+
SEQ_printf(m, " .%-15s: %d\n", #x, !!(ts->flags & (f)))
152+
150153
{
151154
struct tick_sched *ts = tick_get_tick_sched(cpu);
152155
P(nohz_mode);
153156
P_ns(last_tick);
154-
P(tick_stopped);
157+
P_flag(tick_stopped, TS_FLAG_STOPPED);
155158
P(idle_jiffies);
156159
P(idle_calls);
157160
P(idle_sleeps);

0 commit comments

Comments
 (0)