Skip to content

Commit da12301

Browse files
committed
rcu-tasks: Fix computation of CPU-to-list shift counts
The ->percpu_enqueue_shift field is used to map from the running CPU number to the index of the corresponding callback list. This mapping can change at runtime in response to varying callback load, resulting in varying levels of contention on the callback-list locks. Unfortunately, the initial value of this field is correct only if the system happens to have a power-of-two number of CPUs, otherwise the callbacks from the high-numbered CPUs can be placed into the callback list indexed by 1 (rather than 0), and those index-1 callbacks will be ignored. This can result in soft lockups and hangs. This commit therefore corrects this mapping, adding one to this shift count as needed for systems having odd numbers of CPUs. Fixes: 7a30871 ("rcu-tasks: Introduce ->percpu_enqueue_shift for dynamic queue selection") Reported-by: Andrii Nakryiko <andrii.nakryiko@gmail.com> Cc: Reported-by: Martin Lau <kafai@fb.com> Cc: Neeraj Upadhyay <neeraj.iitr10@gmail.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
1 parent e783362 commit da12301

File tree

1 file changed

+8
-4
lines changed

1 file changed

+8
-4
lines changed

kernel/rcu/tasks.h

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ static struct rcu_tasks rt_name = \
123123
.call_func = call, \
124124
.rtpcpu = &rt_name ## __percpu, \
125125
.name = n, \
126-
.percpu_enqueue_shift = ilog2(CONFIG_NR_CPUS), \
126+
.percpu_enqueue_shift = ilog2(CONFIG_NR_CPUS) + 1, \
127127
.percpu_enqueue_lim = 1, \
128128
.percpu_dequeue_lim = 1, \
129129
.barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
@@ -216,6 +216,7 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
216216
int cpu;
217217
unsigned long flags;
218218
int lim;
219+
int shift;
219220

220221
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
221222
if (rcu_task_enqueue_lim < 0) {
@@ -229,7 +230,10 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
229230

230231
if (lim > nr_cpu_ids)
231232
lim = nr_cpu_ids;
232-
WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids / lim));
233+
shift = ilog2(nr_cpu_ids / lim);
234+
if (((nr_cpu_ids - 1) >> shift) >= lim)
235+
shift++;
236+
WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
233237
WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
234238
smp_store_release(&rtp->percpu_enqueue_lim, lim);
235239
for_each_possible_cpu(cpu) {
@@ -298,7 +302,7 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
298302
if (unlikely(needadjust)) {
299303
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
300304
if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
301-
WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids));
305+
WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids) + 1);
302306
WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
303307
smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
304308
pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
@@ -413,7 +417,7 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
413417
if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
414418
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
415419
if (rtp->percpu_enqueue_lim > 1) {
416-
WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids));
420+
WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids) + 1);
417421
smp_store_release(&rtp->percpu_enqueue_lim, 1);
418422
rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
419423
pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);

0 commit comments

Comments
 (0)