@@ -284,7 +284,7 @@ static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
284
284
static wait_queue_head_t * barrier_cbs_wq ; /* Coordinate barrier testing. */
285
285
static DECLARE_WAIT_QUEUE_HEAD (barrier_wq );
286
286
287
- static bool rcu_fwd_cb_nodelay ; /* Short rcu_torture_delay() delays. */
287
+ static atomic_t rcu_fwd_cb_nodelay ; /* Short rcu_torture_delay() delays. */
288
288
289
289
/*
290
290
* Allocate an element from the rcu_tortures pool.
@@ -387,7 +387,7 @@ rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
387
387
* period, and we want a long delay occasionally to trigger
388
388
* force_quiescent_state. */
389
389
390
- if (!READ_ONCE ( rcu_fwd_cb_nodelay ) &&
390
+ if (!atomic_read ( & rcu_fwd_cb_nodelay ) &&
391
391
!(torture_random (rrsp ) % (nrealreaders * 2000 * longdelay_ms ))) {
392
392
started = cur_ops -> get_gp_seq ();
393
393
ts = rcu_trace_clock_local ();
@@ -674,6 +674,7 @@ static struct rcu_torture_ops srcu_ops = {
674
674
.call = srcu_torture_call ,
675
675
.cb_barrier = srcu_torture_barrier ,
676
676
.stats = srcu_torture_stats ,
677
+ .cbflood_max = 50000 ,
677
678
.irq_capable = 1 ,
678
679
.no_pi_lock = IS_ENABLED (CONFIG_TINY_SRCU ),
679
680
.name = "srcu"
@@ -708,6 +709,7 @@ static struct rcu_torture_ops srcud_ops = {
708
709
.call = srcu_torture_call ,
709
710
.cb_barrier = srcu_torture_barrier ,
710
711
.stats = srcu_torture_stats ,
712
+ .cbflood_max = 50000 ,
711
713
.irq_capable = 1 ,
712
714
.no_pi_lock = IS_ENABLED (CONFIG_TINY_SRCU ),
713
715
.name = "srcud"
@@ -997,7 +999,7 @@ static int rcu_torture_boost(void *arg)
997
999
goto checkwait ;
998
1000
999
1001
/* Wait for the next test interval. */
1000
- oldstarttime = boost_starttime ;
1002
+ oldstarttime = READ_ONCE ( boost_starttime ) ;
1001
1003
while (time_before (jiffies , oldstarttime )) {
1002
1004
schedule_timeout_interruptible (oldstarttime - jiffies );
1003
1005
if (stutter_wait ("rcu_torture_boost" ))
@@ -1041,10 +1043,11 @@ static int rcu_torture_boost(void *arg)
1041
1043
* interval. Besides, we are running at RT priority,
1042
1044
* so delays should be relatively rare.
1043
1045
*/
1044
- while (oldstarttime == boost_starttime && !kthread_should_stop ()) {
1046
+ while (oldstarttime == READ_ONCE ( boost_starttime ) && !kthread_should_stop ()) {
1045
1047
if (mutex_trylock (& boost_mutex )) {
1046
1048
if (oldstarttime == boost_starttime ) {
1047
- boost_starttime = jiffies + test_boost_interval * HZ ;
1049
+ WRITE_ONCE (boost_starttime ,
1050
+ jiffies + test_boost_interval * HZ );
1048
1051
n_rcu_torture_boosts ++ ;
1049
1052
}
1050
1053
mutex_unlock (& boost_mutex );
@@ -1276,7 +1279,7 @@ rcu_torture_writer(void *arg)
1276
1279
boot_ended = rcu_inkernel_boot_has_ended ();
1277
1280
stutter_waited = stutter_wait ("rcu_torture_writer" );
1278
1281
if (stutter_waited &&
1279
- !READ_ONCE ( rcu_fwd_cb_nodelay ) &&
1282
+ !atomic_read ( & rcu_fwd_cb_nodelay ) &&
1280
1283
!cur_ops -> slow_gps &&
1281
1284
!torture_must_stop () &&
1282
1285
boot_ended )
@@ -2180,7 +2183,6 @@ static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
2180
2183
for (i = ARRAY_SIZE (rfp -> n_launders_hist ) - 1 ; i > 0 ; i -- )
2181
2184
if (rfp -> n_launders_hist [i ].n_launders > 0 )
2182
2185
break ;
2183
- mutex_lock (& rcu_fwd_mutex ); // Serialize histograms.
2184
2186
pr_alert ("%s: Callback-invocation histogram %d (duration %lu jiffies):" ,
2185
2187
__func__ , rfp -> rcu_fwd_id , jiffies - rfp -> rcu_fwd_startat );
2186
2188
gps_old = rfp -> rcu_launder_gp_seq_start ;
@@ -2193,7 +2195,6 @@ static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
2193
2195
gps_old = gps ;
2194
2196
}
2195
2197
pr_cont ("\n" );
2196
- mutex_unlock (& rcu_fwd_mutex );
2197
2198
}
2198
2199
2199
2200
/* Callback function for continuous-flood RCU callbacks. */
@@ -2281,6 +2282,7 @@ static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
2281
2282
unsigned long stopat ;
2282
2283
static DEFINE_TORTURE_RANDOM (trs );
2283
2284
2285
+ pr_alert ("%s: Starting forward-progress test %d\n" , __func__ , rfp -> rcu_fwd_id );
2284
2286
if (!cur_ops -> sync )
2285
2287
return ; // Cannot do need_resched() forward progress testing without ->sync.
2286
2288
if (cur_ops -> call && cur_ops -> cb_barrier ) {
@@ -2289,7 +2291,7 @@ static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
2289
2291
}
2290
2292
2291
2293
/* Tight loop containing cond_resched(). */
2292
- WRITE_ONCE ( rcu_fwd_cb_nodelay , true );
2294
+ atomic_inc ( & rcu_fwd_cb_nodelay );
2293
2295
cur_ops -> sync (); /* Later readers see above write. */
2294
2296
if (selfpropcb ) {
2295
2297
WRITE_ONCE (fcs .stop , 0 );
@@ -2325,6 +2327,7 @@ static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
2325
2327
if (selfpropcb ) {
2326
2328
WRITE_ONCE (fcs .stop , 1 );
2327
2329
cur_ops -> sync (); /* Wait for running CB to complete. */
2330
+ pr_alert ("%s: Waiting for CBs: %pS() %d\n" , __func__ , cur_ops -> cb_barrier , rfp -> rcu_fwd_id );
2328
2331
cur_ops -> cb_barrier (); /* Wait for queued callbacks. */
2329
2332
}
2330
2333
@@ -2333,7 +2336,7 @@ static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
2333
2336
destroy_rcu_head_on_stack (& fcs .rh );
2334
2337
}
2335
2338
schedule_timeout_uninterruptible (HZ / 10 ); /* Let kthreads recover. */
2336
- WRITE_ONCE ( rcu_fwd_cb_nodelay , false );
2339
+ atomic_dec ( & rcu_fwd_cb_nodelay );
2337
2340
}
2338
2341
2339
2342
/* Carry out call_rcu() forward-progress testing. */
@@ -2353,13 +2356,14 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
2353
2356
unsigned long stopat ;
2354
2357
unsigned long stoppedat ;
2355
2358
2359
+ pr_alert ("%s: Starting forward-progress test %d\n" , __func__ , rfp -> rcu_fwd_id );
2356
2360
if (READ_ONCE (rcu_fwd_emergency_stop ))
2357
2361
return ; /* Get out of the way quickly, no GP wait! */
2358
2362
if (!cur_ops -> call )
2359
2363
return ; /* Can't do call_rcu() fwd prog without ->call. */
2360
2364
2361
2365
/* Loop continuously posting RCU callbacks. */
2362
- WRITE_ONCE ( rcu_fwd_cb_nodelay , true );
2366
+ atomic_inc ( & rcu_fwd_cb_nodelay );
2363
2367
cur_ops -> sync (); /* Later readers see above write. */
2364
2368
WRITE_ONCE (rfp -> rcu_fwd_startat , jiffies );
2365
2369
stopat = rfp -> rcu_fwd_startat + MAX_FWD_CB_JIFFIES ;
@@ -2414,6 +2418,7 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
2414
2418
n_launders_cb_snap = READ_ONCE (rfp -> n_launders_cb );
2415
2419
cver = READ_ONCE (rcu_torture_current_version ) - cver ;
2416
2420
gps = rcutorture_seq_diff (cur_ops -> get_gp_seq (), gps );
2421
+ pr_alert ("%s: Waiting for CBs: %pS() %d\n" , __func__ , cur_ops -> cb_barrier , rfp -> rcu_fwd_id );
2417
2422
cur_ops -> cb_barrier (); /* Wait for callbacks to be invoked. */
2418
2423
(void )rcu_torture_fwd_prog_cbfree (rfp );
2419
2424
@@ -2427,11 +2432,13 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
2427
2432
n_launders , n_launders_sa ,
2428
2433
n_max_gps , n_max_cbs , cver , gps );
2429
2434
atomic_long_add (n_max_cbs , & rcu_fwd_max_cbs );
2435
+ mutex_lock (& rcu_fwd_mutex ); // Serialize histograms.
2430
2436
rcu_torture_fwd_cb_hist (rfp );
2437
+ mutex_unlock (& rcu_fwd_mutex );
2431
2438
}
2432
2439
schedule_timeout_uninterruptible (HZ ); /* Let CBs drain. */
2433
2440
tick_dep_clear_task (current , TICK_DEP_BIT_RCU );
2434
- WRITE_ONCE ( rcu_fwd_cb_nodelay , false );
2441
+ atomic_dec ( & rcu_fwd_cb_nodelay );
2435
2442
}
2436
2443
2437
2444
@@ -2511,7 +2518,7 @@ static int rcu_torture_fwd_prog(void *args)
2511
2518
firsttime = false;
2512
2519
WRITE_ONCE (rcu_fwd_seq , rcu_fwd_seq + 1 );
2513
2520
} else {
2514
- while (READ_ONCE (rcu_fwd_seq ) == oldseq )
2521
+ while (READ_ONCE (rcu_fwd_seq ) == oldseq && ! torture_must_stop () )
2515
2522
schedule_timeout_interruptible (1 );
2516
2523
oldseq = READ_ONCE (rcu_fwd_seq );
2517
2524
}
@@ -2905,8 +2912,10 @@ rcu_torture_cleanup(void)
2905
2912
int i ;
2906
2913
2907
2914
if (torture_cleanup_begin ()) {
2908
- if (cur_ops -> cb_barrier != NULL )
2915
+ if (cur_ops -> cb_barrier != NULL ) {
2916
+ pr_info ("%s: Invoking %pS().\n" , __func__ , cur_ops -> cb_barrier );
2909
2917
cur_ops -> cb_barrier ();
2918
+ }
2910
2919
return ;
2911
2920
}
2912
2921
if (!cur_ops ) {
@@ -2961,8 +2970,10 @@ rcu_torture_cleanup(void)
2961
2970
* Wait for all RCU callbacks to fire, then do torture-type-specific
2962
2971
* cleanup operations.
2963
2972
*/
2964
- if (cur_ops -> cb_barrier != NULL )
2973
+ if (cur_ops -> cb_barrier != NULL ) {
2974
+ pr_info ("%s: Invoking %pS().\n" , __func__ , cur_ops -> cb_barrier );
2965
2975
cur_ops -> cb_barrier ();
2976
+ }
2966
2977
if (cur_ops -> cleanup != NULL )
2967
2978
cur_ops -> cleanup ();
2968
2979
0 commit comments