@@ -6186,25 +6186,27 @@ static struct pending_free *get_pending_free(void)
6186
6186
static void free_zapped_rcu (struct rcu_head * cb );
6187
6187
6188
6188
/*
6189
- * Schedule an RCU callback if no RCU callback is pending. Must be called with
6190
- * the graph lock held.
6191
- */
6192
- static void call_rcu_zapped (struct pending_free * pf )
6189
+ * See if we need to queue an RCU callback, must called with
6190
+ * the lockdep lock held, returns false if either we don't have
6191
+ * any pending free or the callback is already scheduled.
6192
+ * Otherwise, a call_rcu() must follow this function call.
6193
+ */
6194
+ static bool prepare_call_rcu_zapped (struct pending_free * pf )
6193
6195
{
6194
6196
WARN_ON_ONCE (inside_selftest ());
6195
6197
6196
6198
if (list_empty (& pf -> zapped ))
6197
- return ;
6199
+ return false ;
6198
6200
6199
6201
if (delayed_free .scheduled )
6200
- return ;
6202
+ return false ;
6201
6203
6202
6204
delayed_free .scheduled = true;
6203
6205
6204
6206
WARN_ON_ONCE (delayed_free .pf + delayed_free .index != pf );
6205
6207
delayed_free .index ^= 1 ;
6206
6208
6207
- call_rcu ( & delayed_free . rcu_head , free_zapped_rcu ) ;
6209
+ return true ;
6208
6210
}
6209
6211
6210
6212
/* The caller must hold the graph lock. May be called from RCU context. */
@@ -6230,6 +6232,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
6230
6232
{
6231
6233
struct pending_free * pf ;
6232
6234
unsigned long flags ;
6235
+ bool need_callback ;
6233
6236
6234
6237
if (WARN_ON_ONCE (ch != & delayed_free .rcu_head ))
6235
6238
return ;
@@ -6241,14 +6244,18 @@ static void free_zapped_rcu(struct rcu_head *ch)
6241
6244
pf = delayed_free .pf + (delayed_free .index ^ 1 );
6242
6245
__free_zapped_classes (pf );
6243
6246
delayed_free .scheduled = false;
6247
+ need_callback =
6248
+ prepare_call_rcu_zapped (delayed_free .pf + delayed_free .index );
6249
+ lockdep_unlock ();
6250
+ raw_local_irq_restore (flags );
6244
6251
6245
6252
/*
6246
- * If there's anything on the open list, close and start a new callback.
6247
- */
6248
- call_rcu_zapped (delayed_free .pf + delayed_free .index );
6253
+ * If there's pending free and its callback has not been scheduled,
6254
+ * queue an RCU callback.
6255
+ */
6256
+ if (need_callback )
6257
+ call_rcu (& delayed_free .rcu_head , free_zapped_rcu );
6249
6258
6250
- lockdep_unlock ();
6251
- raw_local_irq_restore (flags );
6252
6259
}
6253
6260
6254
6261
/*
@@ -6288,17 +6295,19 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
6288
6295
{
6289
6296
struct pending_free * pf ;
6290
6297
unsigned long flags ;
6298
+ bool need_callback ;
6291
6299
6292
6300
init_data_structures_once ();
6293
6301
6294
6302
raw_local_irq_save (flags );
6295
6303
lockdep_lock ();
6296
6304
pf = get_pending_free ();
6297
6305
__lockdep_free_key_range (pf , start , size );
6298
- call_rcu_zapped (pf );
6306
+ need_callback = prepare_call_rcu_zapped (pf );
6299
6307
lockdep_unlock ();
6300
6308
raw_local_irq_restore (flags );
6301
-
6309
+ if (need_callback )
6310
+ call_rcu (& delayed_free .rcu_head , free_zapped_rcu );
6302
6311
/*
6303
6312
* Wait for any possible iterators from look_up_lock_class() to pass
6304
6313
* before continuing to free the memory they refer to.
@@ -6392,6 +6401,7 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
6392
6401
struct pending_free * pf ;
6393
6402
unsigned long flags ;
6394
6403
int locked ;
6404
+ bool need_callback = false;
6395
6405
6396
6406
raw_local_irq_save (flags );
6397
6407
locked = graph_lock ();
@@ -6400,11 +6410,13 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
6400
6410
6401
6411
pf = get_pending_free ();
6402
6412
__lockdep_reset_lock (pf , lock );
6403
- call_rcu_zapped (pf );
6413
+ need_callback = prepare_call_rcu_zapped (pf );
6404
6414
6405
6415
graph_unlock ();
6406
6416
out_irq :
6407
6417
raw_local_irq_restore (flags );
6418
+ if (need_callback )
6419
+ call_rcu (& delayed_free .rcu_head , free_zapped_rcu );
6408
6420
}
6409
6421
6410
6422
/*
@@ -6448,6 +6460,7 @@ void lockdep_unregister_key(struct lock_class_key *key)
6448
6460
struct pending_free * pf ;
6449
6461
unsigned long flags ;
6450
6462
bool found = false;
6463
+ bool need_callback = false;
6451
6464
6452
6465
might_sleep ();
6453
6466
@@ -6468,11 +6481,14 @@ void lockdep_unregister_key(struct lock_class_key *key)
6468
6481
if (found ) {
6469
6482
pf = get_pending_free ();
6470
6483
__lockdep_free_key_range (pf , key , 1 );
6471
- call_rcu_zapped (pf );
6484
+ need_callback = prepare_call_rcu_zapped (pf );
6472
6485
}
6473
6486
lockdep_unlock ();
6474
6487
raw_local_irq_restore (flags );
6475
6488
6489
+ if (need_callback )
6490
+ call_rcu (& delayed_free .rcu_head , free_zapped_rcu );
6491
+
6476
6492
/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
6477
6493
synchronize_rcu ();
6478
6494
}
0 commit comments