Skip to content

Commit c891e98

Browse files
committed
Merge tag 'smp-core-2023-10-29-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull SMP and CPU hotplug updates from Thomas Gleixner: - Switch the smp_call_function*() @csd argument to call_single_data_t type, which is a cache-line aligned typedef of the underlying struct __call_single_data. This ensures that the call data is not crossing a cacheline which avoids bouncing an extra cache-line for the SMP function call - Prevent offlining of the last housekeeping CPU when CPU isolation is active. Offlining the last housekeeping CPU makes no sense in general, but also caused the scheduler to panic due to the empty CPU mask when rebuilding the scheduler domains. - Remove an unused CPU hotplug state * tag 'smp-core-2023-10-29-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: cpu/hotplug: Don't offline the last non-isolated CPU cpu/hotplug: Remove unused cpuhp_state CPUHP_AP_X86_VDSO_VMA_ONLINE smp: Change function signatures to use call_single_data_t
2 parents b08ecce + 38685e2 commit c891e98

File tree

6 files changed

+26
-24
lines changed

6 files changed

+26
-24
lines changed

include/linux/cpuhotplug.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -204,7 +204,6 @@ enum cpuhp_state {
204204
CPUHP_AP_KVM_ONLINE,
205205
CPUHP_AP_SCHED_WAIT_EMPTY,
206206
CPUHP_AP_SMPBOOT_THREADS,
207-
CPUHP_AP_X86_VDSO_VMA_ONLINE,
208207
CPUHP_AP_IRQ_AFFINITY_ONLINE,
209208
CPUHP_AP_BLK_MQ_ONLINE,
210209
CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS,

include/linux/smp.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
5353
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
5454
void *info, bool wait, const struct cpumask *mask);
5555

56-
int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
56+
int smp_call_function_single_async(int cpu, call_single_data_t *csd);
5757

5858
/*
5959
* Cpus stopping functions in panic. All have default weak definitions.

include/trace/events/csd.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ TRACE_EVENT(csd_queue_cpu,
1212
TP_PROTO(const unsigned int cpu,
1313
unsigned long callsite,
1414
smp_call_func_t func,
15-
struct __call_single_data *csd),
15+
call_single_data_t *csd),
1616

1717
TP_ARGS(cpu, callsite, func, csd),
1818

@@ -39,7 +39,7 @@ TRACE_EVENT(csd_queue_cpu,
3939
*/
4040
DECLARE_EVENT_CLASS(csd_function,
4141

42-
TP_PROTO(smp_call_func_t func, struct __call_single_data *csd),
42+
TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
4343

4444
TP_ARGS(func, csd),
4545

@@ -57,12 +57,12 @@ DECLARE_EVENT_CLASS(csd_function,
5757
);
5858

5959
DEFINE_EVENT(csd_function, csd_function_entry,
60-
TP_PROTO(smp_call_func_t func, struct __call_single_data *csd),
60+
TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
6161
TP_ARGS(func, csd)
6262
);
6363

6464
DEFINE_EVENT(csd_function, csd_function_exit,
65-
TP_PROTO(smp_call_func_t func, struct __call_single_data *csd),
65+
TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
6666
TP_ARGS(func, csd)
6767
);
6868

kernel/cpu.c

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1515,11 +1515,14 @@ static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
15151515
/*
15161516
* Ensure that the control task does not run on the to be offlined
15171517
* CPU to prevent a deadlock against cfs_b->period_timer.
1518+
* Also keep at least one housekeeping cpu onlined to avoid generating
1519+
* an empty sched_domain span.
15181520
*/
1519-
cpu = cpumask_any_but(cpu_online_mask, cpu);
1520-
if (cpu >= nr_cpu_ids)
1521-
return -EBUSY;
1522-
return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
1521+
for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) {
1522+
if (cpu != work.cpu)
1523+
return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
1524+
}
1525+
return -EBUSY;
15231526
}
15241527

15251528
static int cpu_down(unsigned int cpu, enum cpuhp_state target)

kernel/smp.c

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ send_call_function_ipi_mask(struct cpumask *mask)
127127
}
128128

129129
static __always_inline void
130-
csd_do_func(smp_call_func_t func, void *info, struct __call_single_data *csd)
130+
csd_do_func(smp_call_func_t func, void *info, call_single_data_t *csd)
131131
{
132132
trace_csd_function_entry(func, csd);
133133
func(info);
@@ -174,7 +174,7 @@ module_param(csd_lock_timeout, ulong, 0444);
174174
static atomic_t csd_bug_count = ATOMIC_INIT(0);
175175

176176
/* Record current CSD work for current CPU, NULL to erase. */
177-
static void __csd_lock_record(struct __call_single_data *csd)
177+
static void __csd_lock_record(call_single_data_t *csd)
178178
{
179179
if (!csd) {
180180
smp_mb(); /* NULL cur_csd after unlock. */
@@ -189,13 +189,13 @@ static void __csd_lock_record(struct __call_single_data *csd)
189189
/* Or before unlock, as the case may be. */
190190
}
191191

192-
static __always_inline void csd_lock_record(struct __call_single_data *csd)
192+
static __always_inline void csd_lock_record(call_single_data_t *csd)
193193
{
194194
if (static_branch_unlikely(&csdlock_debug_enabled))
195195
__csd_lock_record(csd);
196196
}
197197

198-
static int csd_lock_wait_getcpu(struct __call_single_data *csd)
198+
static int csd_lock_wait_getcpu(call_single_data_t *csd)
199199
{
200200
unsigned int csd_type;
201201

@@ -210,7 +210,7 @@ static int csd_lock_wait_getcpu(struct __call_single_data *csd)
210210
* the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
211211
* so waiting on other types gets much less information.
212212
*/
213-
static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
213+
static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
214214
{
215215
int cpu = -1;
216216
int cpux;
@@ -276,7 +276,7 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
276276
* previous function call. For multi-cpu calls its even more interesting
277277
* as we'll have to ensure no other cpu is observing our csd.
278278
*/
279-
static void __csd_lock_wait(struct __call_single_data *csd)
279+
static void __csd_lock_wait(call_single_data_t *csd)
280280
{
281281
int bug_id = 0;
282282
u64 ts0, ts1;
@@ -290,7 +290,7 @@ static void __csd_lock_wait(struct __call_single_data *csd)
290290
smp_acquire__after_ctrl_dep();
291291
}
292292

293-
static __always_inline void csd_lock_wait(struct __call_single_data *csd)
293+
static __always_inline void csd_lock_wait(call_single_data_t *csd)
294294
{
295295
if (static_branch_unlikely(&csdlock_debug_enabled)) {
296296
__csd_lock_wait(csd);
@@ -300,17 +300,17 @@ static __always_inline void csd_lock_wait(struct __call_single_data *csd)
300300
smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
301301
}
302302
#else
303-
static void csd_lock_record(struct __call_single_data *csd)
303+
static void csd_lock_record(call_single_data_t *csd)
304304
{
305305
}
306306

307-
static __always_inline void csd_lock_wait(struct __call_single_data *csd)
307+
static __always_inline void csd_lock_wait(call_single_data_t *csd)
308308
{
309309
smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
310310
}
311311
#endif
312312

313-
static __always_inline void csd_lock(struct __call_single_data *csd)
313+
static __always_inline void csd_lock(call_single_data_t *csd)
314314
{
315315
csd_lock_wait(csd);
316316
csd->node.u_flags |= CSD_FLAG_LOCK;
@@ -323,7 +323,7 @@ static __always_inline void csd_lock(struct __call_single_data *csd)
323323
smp_wmb();
324324
}
325325

326-
static __always_inline void csd_unlock(struct __call_single_data *csd)
326+
static __always_inline void csd_unlock(call_single_data_t *csd)
327327
{
328328
WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
329329

@@ -376,7 +376,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
376376
* for execution on the given CPU. data must already have
377377
* ->func, ->info, and ->flags set.
378378
*/
379-
static int generic_exec_single(int cpu, struct __call_single_data *csd)
379+
static int generic_exec_single(int cpu, call_single_data_t *csd)
380380
{
381381
if (cpu == smp_processor_id()) {
382382
smp_call_func_t func = csd->func;
@@ -667,7 +667,7 @@ EXPORT_SYMBOL(smp_call_function_single);
667667
*
668668
* Return: %0 on success or negative errno value on error
669669
*/
670-
int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
670+
int smp_call_function_single_async(int cpu, call_single_data_t *csd)
671671
{
672672
int err = 0;
673673

kernel/up.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
2525
}
2626
EXPORT_SYMBOL(smp_call_function_single);
2727

28-
int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
28+
int smp_call_function_single_async(int cpu, call_single_data_t *csd)
2929
{
3030
unsigned long flags;
3131

0 commit comments

Comments
 (0)