Skip to content

Commit a6f88ac

Browse files
Zhiguo Niufbq
authored andcommitted
lockdep: fix deadlock issue between lockdep and rcu
There is a deadlock scenario between lockdep and rcu when rcu nocb feature is enabled, just as following call stack: rcuop/x -000|queued_spin_lock_slowpath(lock = 0xFFFFFF817F2A8A80, val = ?) -001|queued_spin_lock(inline) // try to hold nocb_gp_lock -001|do_raw_spin_lock(lock = 0xFFFFFF817F2A8A80) -002|__raw_spin_lock_irqsave(inline) -002|_raw_spin_lock_irqsave(lock = 0xFFFFFF817F2A8A80) -003|wake_nocb_gp_defer(inline) -003|__call_rcu_nocb_wake(rdp = 0xFFFFFF817F30B680) -004|__call_rcu_common(inline) -004|call_rcu(head = 0xFFFFFFC082EECC28, func = ?) -005|call_rcu_zapped(inline) -005|free_zapped_rcu(ch = ?)// hold graph lock -006|rcu_do_batch(rdp = 0xFFFFFF817F245680) -007|nocb_cb_wait(inline) -007|rcu_nocb_cb_kthread(arg = 0xFFFFFF817F245680) -008|kthread(_create = 0xFFFFFF80803122C0) -009|ret_from_fork(asm) rcuop/y -000|queued_spin_lock_slowpath(lock = 0xFFFFFFC08291BBC8, val = 0) -001|queued_spin_lock() -001|lockdep_lock() -001|graph_lock() // try to hold graph lock -002|lookup_chain_cache_add() -002|validate_chain() -003|lock_acquire -004|_raw_spin_lock_irqsave(lock = 0xFFFFFF817F211D80) -005|lock_timer_base(inline) -006|mod_timer(inline) -006|wake_nocb_gp_defer(inline)// hold nocb_gp_lock -006|__call_rcu_nocb_wake(rdp = 0xFFFFFF817F2A8680) -007|__call_rcu_common(inline) -007|call_rcu(head = 0xFFFFFFC0822E0B58, func = ?) -008|call_rcu_hurry(inline) -008|rcu_sync_call(inline) -008|rcu_sync_func(rhp = 0xFFFFFFC0822E0B58) -009|rcu_do_batch(rdp = 0xFFFFFF817F266680) -010|nocb_cb_wait(inline) -010|rcu_nocb_cb_kthread(arg = 0xFFFFFF817F266680) -011|kthread(_create = 0xFFFFFF8080363740) -012|ret_from_fork(asm) rcuop/x and rcuop/y are rcu nocb threads with the same nocb gp thread. This patch release the graph lock before lockdep call_rcu. Fixes: a0b0fd5 ("locking/lockdep: Free lock classes that are no longer in use") Cc: stable@vger.kernel.org Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Waiman Long <longman@redhat.com> Cc: Carlos Llamas <cmllamas@google.com> Cc: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Zhiguo Niu <zhiguo.niu@unisoc.com> Signed-off-by: Xuewen Yan <xuewen.yan@unisoc.com> Reviewed-by: Waiman Long <longman@redhat.com> Reviewed-by: Carlos Llamas <cmllamas@google.com> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Carlos Llamas <cmllamas@google.com> Acked-by: Paul E. McKenney <paulmck@kernel.org> Signed-off-by: Boqun Feng <boqun.feng@gmail.com> Link: https://lore.kernel.org/r/20240620225436.3127927-1-cmllamas@google.com
1 parent 13c267f commit a6f88ac

File tree

1 file changed

+32
-16
lines changed

1 file changed

+32
-16
lines changed

kernel/locking/lockdep.c

Lines changed: 32 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -6186,25 +6186,27 @@ static struct pending_free *get_pending_free(void)
61866186
static void free_zapped_rcu(struct rcu_head *cb);
61876187

61886188
/*
6189-
* Schedule an RCU callback if no RCU callback is pending. Must be called with
6190-
* the graph lock held.
6191-
*/
6192-
static void call_rcu_zapped(struct pending_free *pf)
6189+
* See if we need to queue an RCU callback, must called with
6190+
* the lockdep lock held, returns false if either we don't have
6191+
* any pending free or the callback is already scheduled.
6192+
* Otherwise, a call_rcu() must follow this function call.
6193+
*/
6194+
static bool prepare_call_rcu_zapped(struct pending_free *pf)
61936195
{
61946196
WARN_ON_ONCE(inside_selftest());
61956197

61966198
if (list_empty(&pf->zapped))
6197-
return;
6199+
return false;
61986200

61996201
if (delayed_free.scheduled)
6200-
return;
6202+
return false;
62016203

62026204
delayed_free.scheduled = true;
62036205

62046206
WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
62056207
delayed_free.index ^= 1;
62066208

6207-
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
6209+
return true;
62086210
}
62096211

62106212
/* The caller must hold the graph lock. May be called from RCU context. */
@@ -6230,6 +6232,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
62306232
{
62316233
struct pending_free *pf;
62326234
unsigned long flags;
6235+
bool need_callback;
62336236

62346237
if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
62356238
return;
@@ -6241,14 +6244,18 @@ static void free_zapped_rcu(struct rcu_head *ch)
62416244
pf = delayed_free.pf + (delayed_free.index ^ 1);
62426245
__free_zapped_classes(pf);
62436246
delayed_free.scheduled = false;
6247+
need_callback =
6248+
prepare_call_rcu_zapped(delayed_free.pf + delayed_free.index);
6249+
lockdep_unlock();
6250+
raw_local_irq_restore(flags);
62446251

62456252
/*
6246-
* If there's anything on the open list, close and start a new callback.
6247-
*/
6248-
call_rcu_zapped(delayed_free.pf + delayed_free.index);
6253+
* If there's pending free and its callback has not been scheduled,
6254+
* queue an RCU callback.
6255+
*/
6256+
if (need_callback)
6257+
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
62496258

6250-
lockdep_unlock();
6251-
raw_local_irq_restore(flags);
62526259
}
62536260

62546261
/*
@@ -6288,17 +6295,19 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
62886295
{
62896296
struct pending_free *pf;
62906297
unsigned long flags;
6298+
bool need_callback;
62916299

62926300
init_data_structures_once();
62936301

62946302
raw_local_irq_save(flags);
62956303
lockdep_lock();
62966304
pf = get_pending_free();
62976305
__lockdep_free_key_range(pf, start, size);
6298-
call_rcu_zapped(pf);
6306+
need_callback = prepare_call_rcu_zapped(pf);
62996307
lockdep_unlock();
63006308
raw_local_irq_restore(flags);
6301-
6309+
if (need_callback)
6310+
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
63026311
/*
63036312
* Wait for any possible iterators from look_up_lock_class() to pass
63046313
* before continuing to free the memory they refer to.
@@ -6392,6 +6401,7 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
63926401
struct pending_free *pf;
63936402
unsigned long flags;
63946403
int locked;
6404+
bool need_callback = false;
63956405

63966406
raw_local_irq_save(flags);
63976407
locked = graph_lock();
@@ -6400,11 +6410,13 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
64006410

64016411
pf = get_pending_free();
64026412
__lockdep_reset_lock(pf, lock);
6403-
call_rcu_zapped(pf);
6413+
need_callback = prepare_call_rcu_zapped(pf);
64046414

64056415
graph_unlock();
64066416
out_irq:
64076417
raw_local_irq_restore(flags);
6418+
if (need_callback)
6419+
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
64086420
}
64096421

64106422
/*
@@ -6448,6 +6460,7 @@ void lockdep_unregister_key(struct lock_class_key *key)
64486460
struct pending_free *pf;
64496461
unsigned long flags;
64506462
bool found = false;
6463+
bool need_callback = false;
64516464

64526465
might_sleep();
64536466

@@ -6468,11 +6481,14 @@ void lockdep_unregister_key(struct lock_class_key *key)
64686481
if (found) {
64696482
pf = get_pending_free();
64706483
__lockdep_free_key_range(pf, key, 1);
6471-
call_rcu_zapped(pf);
6484+
need_callback = prepare_call_rcu_zapped(pf);
64726485
}
64736486
lockdep_unlock();
64746487
raw_local_irq_restore(flags);
64756488

6489+
if (need_callback)
6490+
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
6491+
64766492
/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
64776493
synchronize_rcu();
64786494
}

0 commit comments

Comments
 (0)