Skip to content

Commit b913c3f

Browse files
Frederic Weisbeckerfbq
authored andcommitted
rcu/nocb: Make IRQs disablement symmetric
Currently IRQs are disabled on call_rcu() and then depending on the context: * If the CPU is in nocb mode: - If the callback is enqueued in the bypass list, IRQs are re-enabled implictly by rcu_nocb_try_bypass() - If the callback is enqueued in the normal list, IRQs are re-enabled implicitly by __call_rcu_nocb_wake() * If the CPU is NOT in nocb mode, IRQs are reenabled explicitly from call_rcu() This makes the code a bit hard to follow, especially as it interleaves with nocb locking. To make the IRQ flags coverage clearer and also in order to prepare for moving all the nocb enqueue code to its own function, always re-enable the IRQ flags explicitly from call_rcu(). Reviewed-by: Neeraj Upadhyay (AMD) <neeraj.iitr10@gmail.com> Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Reviewed-by: Paul E. McKenney <paulmck@kernel.org> Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
1 parent 1e8e695 commit b913c3f

File tree

2 files changed

+15
-14
lines changed

2 files changed

+15
-14
lines changed

kernel/rcu/tree.c

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2735,8 +2735,10 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
27352735
}
27362736

27372737
check_cb_ovld(rdp);
2738-
if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy))
2738+
if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy)) {
2739+
local_irq_restore(flags);
27392740
return; // Enqueued onto ->nocb_bypass, so just leave.
2741+
}
27402742
// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
27412743
rcu_segcblist_enqueue(&rdp->cblist, head);
27422744
if (__is_kvfree_rcu_offset((unsigned long)func))
@@ -2754,8 +2756,8 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
27542756
__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
27552757
} else {
27562758
__call_rcu_core(rdp, head, flags);
2757-
local_irq_restore(flags);
27582759
}
2760+
local_irq_restore(flags);
27592761
}
27602762

27612763
#ifdef CONFIG_RCU_LAZY
@@ -4646,8 +4648,9 @@ void rcutree_migrate_callbacks(int cpu)
46464648
__call_rcu_nocb_wake(my_rdp, true, flags);
46474649
} else {
46484650
rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4649-
raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4651+
raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
46504652
}
4653+
local_irq_restore(flags);
46514654
if (needwake)
46524655
rcu_gp_kthread_wake();
46534656
lockdep_assert_irqs_enabled();

kernel/rcu/tree_nocb.h

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -532,9 +532,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
532532
// 2. Both of these conditions are met:
533533
// a. The bypass list previously had only lazy CBs, and:
534534
// b. The new CB is non-lazy.
535-
if (ncbs && (!bypass_is_lazy || lazy)) {
536-
local_irq_restore(flags);
537-
} else {
535+
if (!ncbs || (bypass_is_lazy && !lazy)) {
538536
// No-CBs GP kthread might be indefinitely asleep, if so, wake.
539537
rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
540538
if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
@@ -544,7 +542,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
544542
} else {
545543
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
546544
TPS("FirstBQnoWake"));
547-
rcu_nocb_unlock_irqrestore(rdp, flags);
545+
rcu_nocb_unlock(rdp);
548546
}
549547
}
550548
return true; // Callback already enqueued.
@@ -570,7 +568,7 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
570568
// If we are being polled or there is no kthread, just leave.
571569
t = READ_ONCE(rdp->nocb_gp_kthread);
572570
if (rcu_nocb_poll || !t) {
573-
rcu_nocb_unlock_irqrestore(rdp, flags);
571+
rcu_nocb_unlock(rdp);
574572
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
575573
TPS("WakeNotPoll"));
576574
return;
@@ -583,17 +581,17 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
583581
rdp->qlen_last_fqs_check = len;
584582
// Only lazy CBs in bypass list
585583
if (lazy_len && bypass_len == lazy_len) {
586-
rcu_nocb_unlock_irqrestore(rdp, flags);
584+
rcu_nocb_unlock(rdp);
587585
wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_LAZY,
588586
TPS("WakeLazy"));
589587
} else if (!irqs_disabled_flags(flags)) {
590588
/* ... if queue was empty ... */
591-
rcu_nocb_unlock_irqrestore(rdp, flags);
589+
rcu_nocb_unlock(rdp);
592590
wake_nocb_gp(rdp, false);
593591
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
594592
TPS("WakeEmpty"));
595593
} else {
596-
rcu_nocb_unlock_irqrestore(rdp, flags);
594+
rcu_nocb_unlock(rdp);
597595
wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
598596
TPS("WakeEmptyIsDeferred"));
599597
}
@@ -611,15 +609,15 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
611609
if ((rdp->nocb_cb_sleep ||
612610
!rcu_segcblist_ready_cbs(&rdp->cblist)) &&
613611
!timer_pending(&rdp->nocb_timer)) {
614-
rcu_nocb_unlock_irqrestore(rdp, flags);
612+
rcu_nocb_unlock(rdp);
615613
wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
616614
TPS("WakeOvfIsDeferred"));
617615
} else {
618-
rcu_nocb_unlock_irqrestore(rdp, flags);
616+
rcu_nocb_unlock(rdp);
619617
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
620618
}
621619
} else {
622-
rcu_nocb_unlock_irqrestore(rdp, flags);
620+
rcu_nocb_unlock(rdp);
623621
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
624622
}
625623
}

0 commit comments

Comments
 (0)