58
58
#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
59
59
#define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
60
60
61
+ static void retrigger_next_event (void * arg );
62
+
61
63
/*
62
64
* The timer bases:
63
65
*
@@ -111,7 +113,8 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
111
113
.clockid = CLOCK_TAI ,
112
114
.get_time = & ktime_get_clocktai ,
113
115
},
114
- }
116
+ },
117
+ .csd = CSD_INIT (retrigger_next_event , NULL )
115
118
};
116
119
117
120
static const int hrtimer_clock_to_base_table [MAX_CLOCKS ] = {
@@ -124,6 +127,14 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
124
127
[CLOCK_TAI ] = HRTIMER_BASE_TAI ,
125
128
};
126
129
130
+ static inline bool hrtimer_base_is_online (struct hrtimer_cpu_base * base )
131
+ {
132
+ if (!IS_ENABLED (CONFIG_HOTPLUG_CPU ))
133
+ return true;
134
+ else
135
+ return likely (base -> online );
136
+ }
137
+
127
138
/*
128
139
* Functions and macros which are different for UP/SMP systems are kept in a
129
140
* single place
@@ -145,11 +156,6 @@ static struct hrtimer_cpu_base migration_cpu_base = {
145
156
146
157
#define migration_base migration_cpu_base.clock_base[0]
147
158
148
- static inline bool is_migration_base (struct hrtimer_clock_base * base )
149
- {
150
- return base == & migration_base ;
151
- }
152
-
153
159
/*
154
160
* We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
155
161
* means that all timers which are tied to this base via timer->base are
@@ -183,27 +189,54 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
183
189
}
184
190
185
191
/*
186
- * We do not migrate the timer when it is expiring before the next
187
- * event on the target cpu. When high resolution is enabled, we cannot
188
- * reprogram the target cpu hardware and we would cause it to fire
189
- * late. To keep it simple, we handle the high resolution enabled and
190
- * disabled case similar.
192
+ * Check if the elected target is suitable considering its next
193
+ * event and the hotplug state of the current CPU.
194
+ *
195
+ * If the elected target is remote and its next event is after the timer
196
+ * to queue, then a remote reprogram is necessary. However there is no
197
+ * guarantee the IPI handling the operation would arrive in time to meet
198
+ * the high resolution deadline. In this case the local CPU becomes a
199
+ * preferred target, unless it is offline.
200
+ *
201
+ * High and low resolution modes are handled the same way for simplicity.
191
202
*
192
203
* Called with cpu_base->lock of target cpu held.
193
204
*/
194
- static int
195
- hrtimer_check_target (struct hrtimer * timer , struct hrtimer_clock_base * new_base )
205
+ static bool hrtimer_suitable_target (struct hrtimer * timer , struct hrtimer_clock_base * new_base ,
206
+ struct hrtimer_cpu_base * new_cpu_base ,
207
+ struct hrtimer_cpu_base * this_cpu_base )
196
208
{
197
209
ktime_t expires ;
198
210
211
+ /*
212
+ * The local CPU clockevent can be reprogrammed. Also get_target_base()
213
+ * guarantees it is online.
214
+ */
215
+ if (new_cpu_base == this_cpu_base )
216
+ return true;
217
+
218
+ /*
219
+ * The offline local CPU can't be the default target if the
220
+ * next remote target event is after this timer. Keep the
221
+ * elected new base. An IPI will we issued to reprogram
222
+ * it as a last resort.
223
+ */
224
+ if (!hrtimer_base_is_online (this_cpu_base ))
225
+ return true;
226
+
199
227
expires = ktime_sub (hrtimer_get_expires (timer ), new_base -> offset );
200
- return expires < new_base -> cpu_base -> expires_next ;
228
+
229
+ return expires >= new_base -> cpu_base -> expires_next ;
201
230
}
202
231
203
- static inline
204
- struct hrtimer_cpu_base * get_target_base (struct hrtimer_cpu_base * base ,
205
- int pinned )
232
+ static inline struct hrtimer_cpu_base * get_target_base (struct hrtimer_cpu_base * base , int pinned )
206
233
{
234
+ if (!hrtimer_base_is_online (base )) {
235
+ int cpu = cpumask_any_and (cpu_online_mask , housekeeping_cpumask (HK_TYPE_TIMER ));
236
+
237
+ return & per_cpu (hrtimer_bases , cpu );
238
+ }
239
+
207
240
#if defined(CONFIG_SMP ) && defined(CONFIG_NO_HZ_COMMON )
208
241
if (static_branch_likely (& timers_migration_enabled ) && !pinned )
209
242
return & per_cpu (hrtimer_bases , get_nohz_timer_target ());
@@ -254,8 +287,8 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
254
287
raw_spin_unlock (& base -> cpu_base -> lock );
255
288
raw_spin_lock (& new_base -> cpu_base -> lock );
256
289
257
- if (new_cpu_base != this_cpu_base &&
258
- hrtimer_check_target ( timer , new_base )) {
290
+ if (! hrtimer_suitable_target ( timer , new_base , new_cpu_base ,
291
+ this_cpu_base )) {
259
292
raw_spin_unlock (& new_base -> cpu_base -> lock );
260
293
raw_spin_lock (& base -> cpu_base -> lock );
261
294
new_cpu_base = this_cpu_base ;
@@ -264,8 +297,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
264
297
}
265
298
WRITE_ONCE (timer -> base , new_base );
266
299
} else {
267
- if (new_cpu_base != this_cpu_base &&
268
- hrtimer_check_target (timer , new_base )) {
300
+ if (!hrtimer_suitable_target (timer , new_base , new_cpu_base , this_cpu_base )) {
269
301
new_cpu_base = this_cpu_base ;
270
302
goto again ;
271
303
}
@@ -275,11 +307,6 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
275
307
276
308
#else /* CONFIG_SMP */
277
309
278
- static inline bool is_migration_base (struct hrtimer_clock_base * base )
279
- {
280
- return false;
281
- }
282
-
283
310
static inline struct hrtimer_clock_base *
284
311
lock_hrtimer_base (const struct hrtimer * timer , unsigned long * flags )
285
312
__acquires (& timer - > base - > cpu_base - > lock )
@@ -716,8 +743,6 @@ static inline int hrtimer_is_hres_enabled(void)
716
743
return hrtimer_hres_enabled ;
717
744
}
718
745
719
- static void retrigger_next_event (void * arg );
720
-
721
746
/*
722
747
* Switch to high resolution mode
723
748
*/
@@ -1205,6 +1230,7 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1205
1230
u64 delta_ns , const enum hrtimer_mode mode ,
1206
1231
struct hrtimer_clock_base * base )
1207
1232
{
1233
+ struct hrtimer_cpu_base * this_cpu_base = this_cpu_ptr (& hrtimer_bases );
1208
1234
struct hrtimer_clock_base * new_base ;
1209
1235
bool force_local , first ;
1210
1236
@@ -1216,9 +1242,15 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1216
1242
* and enforce reprogramming after it is queued no matter whether
1217
1243
* it is the new first expiring timer again or not.
1218
1244
*/
1219
- force_local = base -> cpu_base == this_cpu_ptr ( & hrtimer_bases ) ;
1245
+ force_local = base -> cpu_base == this_cpu_base ;
1220
1246
force_local &= base -> cpu_base -> next_timer == timer ;
1221
1247
1248
+ /*
1249
+ * Don't force local queuing if this enqueue happens on a unplugged
1250
+ * CPU after hrtimer_cpu_dying() has been invoked.
1251
+ */
1252
+ force_local &= this_cpu_base -> online ;
1253
+
1222
1254
/*
1223
1255
* Remove an active timer from the queue. In case it is not queued
1224
1256
* on the current CPU, make sure that remove_hrtimer() updates the
@@ -1248,8 +1280,27 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1248
1280
}
1249
1281
1250
1282
first = enqueue_hrtimer (timer , new_base , mode );
1251
- if (!force_local )
1252
- return first ;
1283
+ if (!force_local ) {
1284
+ /*
1285
+ * If the current CPU base is online, then the timer is
1286
+ * never queued on a remote CPU if it would be the first
1287
+ * expiring timer there.
1288
+ */
1289
+ if (hrtimer_base_is_online (this_cpu_base ))
1290
+ return first ;
1291
+
1292
+ /*
1293
+ * Timer was enqueued remote because the current base is
1294
+ * already offline. If the timer is the first to expire,
1295
+ * kick the remote CPU to reprogram the clock event.
1296
+ */
1297
+ if (first ) {
1298
+ struct hrtimer_cpu_base * new_cpu_base = new_base -> cpu_base ;
1299
+
1300
+ smp_call_function_single_async (new_cpu_base -> cpu , & new_cpu_base -> csd );
1301
+ }
1302
+ return 0 ;
1303
+ }
1253
1304
1254
1305
/*
1255
1306
* Timer was forced to stay on the current CPU to avoid
@@ -1370,6 +1421,18 @@ static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base,
1370
1421
}
1371
1422
}
1372
1423
1424
+ #ifdef CONFIG_SMP
1425
+ static __always_inline bool is_migration_base (struct hrtimer_clock_base * base )
1426
+ {
1427
+ return base == & migration_base ;
1428
+ }
1429
+ #else
1430
+ static __always_inline bool is_migration_base (struct hrtimer_clock_base * base )
1431
+ {
1432
+ return false;
1433
+ }
1434
+ #endif
1435
+
1373
1436
/*
1374
1437
* This function is called on PREEMPT_RT kernels when the fast path
1375
1438
* deletion of a timer failed because the timer callback function was
0 commit comments