Skip to content

Commit b324696

Browse files
committed
Merge tag 'csd-lock.2023.07.15a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu
Pull CSD lock updates from Paul McKenney: "This series reduces the number of stack traces dumped during CSD-lock debugging. This helps to avoid console overrun on systems with large numbers of CPUs" * tag 'csd-lock.2023.07.15a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu: smp: Reduce NMI traffic from CSD waiters to CSD destination smp: Reduce logging due to dump_stack of CSD waiters
2 parents 6ae0c15 + 0d3a00b commit b324696

File tree

1 file changed

+11
-2
lines changed

1 file changed

+11
-2
lines changed

kernel/smp.c

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,8 @@ static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
4646

4747
static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
4848

49+
static DEFINE_PER_CPU(atomic_t, trigger_backtrace) = ATOMIC_INIT(1);
50+
4951
static void __flush_smp_call_function_queue(bool warn_cpu_offline);
5052

5153
int smpcfd_prepare_cpu(unsigned int cpu)
@@ -253,13 +255,15 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
253255
*bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
254256
}
255257
if (cpu >= 0) {
256-
dump_cpu_task(cpu);
258+
if (atomic_cmpxchg_acquire(&per_cpu(trigger_backtrace, cpu), 1, 0))
259+
dump_cpu_task(cpu);
257260
if (!cpu_cur_csd) {
258261
pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
259262
arch_send_call_function_single_ipi(cpu);
260263
}
261264
}
262-
dump_stack();
265+
if (firsttime)
266+
dump_stack();
263267
*ts1 = ts2;
264268

265269
return false;
@@ -433,9 +437,14 @@ static void __flush_smp_call_function_queue(bool warn_cpu_offline)
433437
struct llist_node *entry, *prev;
434438
struct llist_head *head;
435439
static bool warned;
440+
atomic_t *tbt;
436441

437442
lockdep_assert_irqs_disabled();
438443

444+
/* Allow waiters to send backtrace NMI from here onwards */
445+
tbt = this_cpu_ptr(&trigger_backtrace);
446+
atomic_set_release(tbt, 1);
447+
439448
head = this_cpu_ptr(&call_single_queue);
440449
entry = llist_del_all(head);
441450
entry = llist_reverse_order(entry);

0 commit comments

Comments
 (0)