@@ -180,6 +180,26 @@ static ktime_t tick_init_jiffy_update(void)
180
180
return period ;
181
181
}
182
182
183
+ static inline int tick_sched_flag_test (struct tick_sched * ts ,
184
+ unsigned long flag )
185
+ {
186
+ return !!(ts -> flags & flag );
187
+ }
188
+
189
+ static inline void tick_sched_flag_set (struct tick_sched * ts ,
190
+ unsigned long flag )
191
+ {
192
+ lockdep_assert_irqs_disabled ();
193
+ ts -> flags |= flag ;
194
+ }
195
+
196
+ static inline void tick_sched_flag_clear (struct tick_sched * ts ,
197
+ unsigned long flag )
198
+ {
199
+ lockdep_assert_irqs_disabled ();
200
+ ts -> flags &= ~flag ;
201
+ }
202
+
183
203
#define MAX_STALLED_JIFFIES 5
184
204
185
205
static void tick_sched_do_timer (struct tick_sched * ts , ktime_t now )
@@ -223,7 +243,7 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
223
243
}
224
244
}
225
245
226
- if (ts -> inidle )
246
+ if (tick_sched_flag_test ( ts , TS_FLAG_INIDLE ) )
227
247
ts -> got_idle_tick = 1 ;
228
248
}
229
249
@@ -237,7 +257,8 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
237
257
* idle" jiffy stamp so the idle accounting adjustment we do
238
258
* when we go busy again does not account too many ticks.
239
259
*/
240
- if (IS_ENABLED (CONFIG_NO_HZ_COMMON ) && ts -> tick_stopped ) {
260
+ if (IS_ENABLED (CONFIG_NO_HZ_COMMON ) &&
261
+ tick_sched_flag_test (ts , TS_FLAG_STOPPED )) {
241
262
touch_softlockup_watchdog_sched ();
242
263
if (is_idle_task (current ))
243
264
ts -> idle_jiffies ++ ;
@@ -279,7 +300,7 @@ static enum hrtimer_restart tick_nohz_handler(struct hrtimer *timer)
279
300
* - to the idle task if in dynticks-idle
280
301
* - to IRQ exit if in full-dynticks.
281
302
*/
282
- if (unlikely (ts -> tick_stopped ))
303
+ if (unlikely (tick_sched_flag_test ( ts , TS_FLAG_STOPPED ) ))
283
304
return HRTIMER_NORESTART ;
284
305
285
306
hrtimer_forward (timer , now , TICK_NSEC );
@@ -559,7 +580,7 @@ void __tick_nohz_task_switch(void)
559
580
560
581
ts = this_cpu_ptr (& tick_cpu_sched );
561
582
562
- if (ts -> tick_stopped ) {
583
+ if (tick_sched_flag_test ( ts , TS_FLAG_STOPPED ) ) {
563
584
if (atomic_read (& current -> tick_dep_mask ) ||
564
585
atomic_read (& current -> signal -> tick_dep_mask ))
565
586
tick_nohz_full_kick ();
@@ -656,14 +677,14 @@ bool tick_nohz_tick_stopped(void)
656
677
{
657
678
struct tick_sched * ts = this_cpu_ptr (& tick_cpu_sched );
658
679
659
- return ts -> tick_stopped ;
680
+ return tick_sched_flag_test ( ts , TS_FLAG_STOPPED ) ;
660
681
}
661
682
662
683
bool tick_nohz_tick_stopped_cpu (int cpu )
663
684
{
664
685
struct tick_sched * ts = per_cpu_ptr (& tick_cpu_sched , cpu );
665
686
666
- return ts -> tick_stopped ;
687
+ return tick_sched_flag_test ( ts , TS_FLAG_STOPPED ) ;
667
688
}
668
689
669
690
/**
@@ -693,7 +714,7 @@ static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
693
714
{
694
715
ktime_t delta ;
695
716
696
- if (WARN_ON_ONCE (!ts -> idle_active ))
717
+ if (WARN_ON_ONCE (!tick_sched_flag_test ( ts , TS_FLAG_IDLE_ACTIVE ) ))
697
718
return ;
698
719
699
720
delta = ktime_sub (now , ts -> idle_entrytime );
@@ -705,7 +726,7 @@ static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
705
726
ts -> idle_sleeptime = ktime_add (ts -> idle_sleeptime , delta );
706
727
707
728
ts -> idle_entrytime = now ;
708
- ts -> idle_active = 0 ;
729
+ tick_sched_flag_clear ( ts , TS_FLAG_IDLE_ACTIVE ) ;
709
730
write_seqcount_end (& ts -> idle_sleeptime_seq );
710
731
711
732
sched_clock_idle_wakeup_event ();
@@ -715,7 +736,7 @@ static void tick_nohz_start_idle(struct tick_sched *ts)
715
736
{
716
737
write_seqcount_begin (& ts -> idle_sleeptime_seq );
717
738
ts -> idle_entrytime = ktime_get ();
718
- ts -> idle_active = 1 ;
739
+ tick_sched_flag_set ( ts , TS_FLAG_IDLE_ACTIVE ) ;
719
740
write_seqcount_end (& ts -> idle_sleeptime_seq );
720
741
721
742
sched_clock_idle_sleep_event ();
@@ -737,7 +758,7 @@ static u64 get_cpu_sleep_time_us(struct tick_sched *ts, ktime_t *sleeptime,
737
758
do {
738
759
seq = read_seqcount_begin (& ts -> idle_sleeptime_seq );
739
760
740
- if (ts -> idle_active && compute_delta ) {
761
+ if (tick_sched_flag_test ( ts , TS_FLAG_IDLE_ACTIVE ) && compute_delta ) {
741
762
ktime_t delta = ktime_sub (now , ts -> idle_entrytime );
742
763
743
764
idle = ktime_add (* sleeptime , delta );
@@ -905,7 +926,7 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
905
926
* We've not stopped the tick yet, and there's a timer in the
906
927
* next period, so no point in stopping it either, bail.
907
928
*/
908
- if (!ts -> tick_stopped ) {
929
+ if (!tick_sched_flag_test ( ts , TS_FLAG_STOPPED ) ) {
909
930
ts -> timer_expires = 0 ;
910
931
goto out ;
911
932
}
@@ -918,7 +939,8 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
918
939
*/
919
940
delta = timekeeping_max_deferment ();
920
941
if (cpu != tick_do_timer_cpu &&
921
- (tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts -> do_timer_last ))
942
+ (tick_do_timer_cpu != TICK_DO_TIMER_NONE ||
943
+ !tick_sched_flag_test (ts , TS_FLAG_DO_TIMER_LAST )))
922
944
delta = KTIME_MAX ;
923
945
924
946
/* Calculate the next expiry time */
@@ -938,7 +960,7 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
938
960
struct clock_event_device * dev = __this_cpu_read (tick_cpu_device .evtdev );
939
961
unsigned long basejiff = ts -> last_jiffies ;
940
962
u64 basemono = ts -> timer_expires_base ;
941
- bool timer_idle = ts -> tick_stopped ;
963
+ bool timer_idle = tick_sched_flag_test ( ts , TS_FLAG_STOPPED ) ;
942
964
u64 expires ;
943
965
944
966
/* Make sure we won't be trying to stop it twice in a row. */
@@ -978,13 +1000,13 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
978
1000
*/
979
1001
if (cpu == tick_do_timer_cpu ) {
980
1002
tick_do_timer_cpu = TICK_DO_TIMER_NONE ;
981
- ts -> do_timer_last = 1 ;
1003
+ tick_sched_flag_set ( ts , TS_FLAG_DO_TIMER_LAST ) ;
982
1004
} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE ) {
983
- ts -> do_timer_last = 0 ;
1005
+ tick_sched_flag_clear ( ts , TS_FLAG_DO_TIMER_LAST ) ;
984
1006
}
985
1007
986
1008
/* Skip reprogram of event if it's not changed */
987
- if (ts -> tick_stopped && (expires == ts -> next_tick )) {
1009
+ if (tick_sched_flag_test ( ts , TS_FLAG_STOPPED ) && (expires == ts -> next_tick )) {
988
1010
/* Sanity check: make sure clockevent is actually programmed */
989
1011
if (expires == KTIME_MAX || ts -> next_tick == hrtimer_get_expires (& ts -> sched_timer ))
990
1012
return ;
@@ -1002,12 +1024,12 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
1002
1024
* call we save the current tick time, so we can restart the
1003
1025
* scheduler tick in tick_nohz_restart_sched_tick().
1004
1026
*/
1005
- if (!ts -> tick_stopped ) {
1027
+ if (!tick_sched_flag_test ( ts , TS_FLAG_STOPPED ) ) {
1006
1028
calc_load_nohz_start ();
1007
1029
quiet_vmstat ();
1008
1030
1009
1031
ts -> last_tick = hrtimer_get_expires (& ts -> sched_timer );
1010
- ts -> tick_stopped = 1 ;
1032
+ tick_sched_flag_set ( ts , TS_FLAG_STOPPED ) ;
1011
1033
trace_tick_stop (1 , TICK_DEP_MASK_NONE );
1012
1034
}
1013
1035
@@ -1064,7 +1086,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
1064
1086
touch_softlockup_watchdog_sched ();
1065
1087
1066
1088
/* Cancel the scheduled timer and restore the tick: */
1067
- ts -> tick_stopped = 0 ;
1089
+ tick_sched_flag_clear ( ts , TS_FLAG_STOPPED ) ;
1068
1090
tick_nohz_restart (ts , now );
1069
1091
}
1070
1092
@@ -1076,7 +1098,7 @@ static void __tick_nohz_full_update_tick(struct tick_sched *ts,
1076
1098
1077
1099
if (can_stop_full_tick (cpu , ts ))
1078
1100
tick_nohz_full_stop_tick (ts , cpu );
1079
- else if (ts -> tick_stopped )
1101
+ else if (tick_sched_flag_test ( ts , TS_FLAG_STOPPED ) )
1080
1102
tick_nohz_restart_sched_tick (ts , now );
1081
1103
#endif
1082
1104
}
@@ -1196,14 +1218,14 @@ void tick_nohz_idle_stop_tick(void)
1196
1218
ts -> idle_calls ++ ;
1197
1219
1198
1220
if (expires > 0LL ) {
1199
- int was_stopped = ts -> tick_stopped ;
1221
+ int was_stopped = tick_sched_flag_test ( ts , TS_FLAG_STOPPED ) ;
1200
1222
1201
1223
tick_nohz_stop_tick (ts , cpu );
1202
1224
1203
1225
ts -> idle_sleeps ++ ;
1204
1226
ts -> idle_expires = expires ;
1205
1227
1206
- if (!was_stopped && ts -> tick_stopped ) {
1228
+ if (!was_stopped && tick_sched_flag_test ( ts , TS_FLAG_STOPPED ) ) {
1207
1229
ts -> idle_jiffies = ts -> last_jiffies ;
1208
1230
nohz_balance_enter_idle (cpu );
1209
1231
}
@@ -1234,7 +1256,7 @@ void tick_nohz_idle_enter(void)
1234
1256
1235
1257
WARN_ON_ONCE (ts -> timer_expires_base );
1236
1258
1237
- ts -> inidle = 1 ;
1259
+ tick_sched_flag_set ( ts , TS_FLAG_INIDLE ) ;
1238
1260
tick_nohz_start_idle (ts );
1239
1261
1240
1262
local_irq_enable ();
@@ -1263,7 +1285,7 @@ void tick_nohz_irq_exit(void)
1263
1285
{
1264
1286
struct tick_sched * ts = this_cpu_ptr (& tick_cpu_sched );
1265
1287
1266
- if (ts -> inidle )
1288
+ if (tick_sched_flag_test ( ts , TS_FLAG_INIDLE ) )
1267
1289
tick_nohz_start_idle (ts );
1268
1290
else
1269
1291
tick_nohz_full_update_tick (ts );
@@ -1317,7 +1339,7 @@ ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
1317
1339
ktime_t now = ts -> idle_entrytime ;
1318
1340
ktime_t next_event ;
1319
1341
1320
- WARN_ON_ONCE (!ts -> inidle );
1342
+ WARN_ON_ONCE (!tick_sched_flag_test ( ts , TS_FLAG_INIDLE ) );
1321
1343
1322
1344
* delta_next = ktime_sub (dev -> next_event , now );
1323
1345
@@ -1389,7 +1411,7 @@ void tick_nohz_idle_restart_tick(void)
1389
1411
{
1390
1412
struct tick_sched * ts = this_cpu_ptr (& tick_cpu_sched );
1391
1413
1392
- if (ts -> tick_stopped ) {
1414
+ if (tick_sched_flag_test ( ts , TS_FLAG_STOPPED ) ) {
1393
1415
ktime_t now = ktime_get ();
1394
1416
tick_nohz_restart_sched_tick (ts , now );
1395
1417
tick_nohz_account_idle_time (ts , now );
@@ -1430,12 +1452,12 @@ void tick_nohz_idle_exit(void)
1430
1452
1431
1453
local_irq_disable ();
1432
1454
1433
- WARN_ON_ONCE (!ts -> inidle );
1455
+ WARN_ON_ONCE (!tick_sched_flag_test ( ts , TS_FLAG_INIDLE ) );
1434
1456
WARN_ON_ONCE (ts -> timer_expires_base );
1435
1457
1436
- ts -> inidle = 0 ;
1437
- idle_active = ts -> idle_active ;
1438
- tick_stopped = ts -> tick_stopped ;
1458
+ tick_sched_flag_clear ( ts , TS_FLAG_INIDLE ) ;
1459
+ idle_active = tick_sched_flag_test ( ts , TS_FLAG_IDLE_ACTIVE ) ;
1460
+ tick_stopped = tick_sched_flag_test ( ts , TS_FLAG_STOPPED ) ;
1439
1461
1440
1462
if (idle_active || tick_stopped )
1441
1463
now = ktime_get ();
@@ -1498,10 +1520,10 @@ static inline void tick_nohz_irq_enter(void)
1498
1520
struct tick_sched * ts = this_cpu_ptr (& tick_cpu_sched );
1499
1521
ktime_t now ;
1500
1522
1501
- if (!ts -> idle_active && ! ts -> tick_stopped )
1523
+ if (!tick_sched_flag_test ( ts , TS_FLAG_STOPPED | TS_FLAG_IDLE_ACTIVE ) )
1502
1524
return ;
1503
1525
now = ktime_get ();
1504
- if (ts -> idle_active )
1526
+ if (tick_sched_flag_test ( ts , TS_FLAG_IDLE_ACTIVE ) )
1505
1527
tick_nohz_stop_idle (ts , now );
1506
1528
/*
1507
1529
* If all CPUs are idle we may need to update a stale jiffies value.
@@ -1510,7 +1532,7 @@ static inline void tick_nohz_irq_enter(void)
1510
1532
* rare case (typically stop machine). So we must make sure we have a
1511
1533
* last resort.
1512
1534
*/
1513
- if (ts -> tick_stopped )
1535
+ if (tick_sched_flag_test ( ts , TS_FLAG_STOPPED ) )
1514
1536
tick_nohz_update_jiffies (now );
1515
1537
}
1516
1538
0 commit comments