@@ -97,6 +97,7 @@ struct scf_statistics {
97
97
static struct scf_statistics * scf_stats_p ;
98
98
static struct task_struct * scf_torture_stats_task ;
99
99
static DEFINE_PER_CPU (long long, scf_invoked_count ) ;
100
+ static DEFINE_PER_CPU (struct llist_head , scf_free_pool ) ;
100
101
101
102
// Data for random primitive selection
102
103
#define SCF_PRIM_RESCHED 0
@@ -133,6 +134,7 @@ struct scf_check {
133
134
bool scfc_wait ;
134
135
bool scfc_rpc ;
135
136
struct completion scfc_completion ;
137
+ struct llist_node scf_node ;
136
138
};
137
139
138
140
// Use to wait for all threads to start.
@@ -148,6 +150,33 @@ static DEFINE_TORTURE_RANDOM_PERCPU(scf_torture_rand);
148
150
149
151
extern void resched_cpu (int cpu ); // An alternative IPI vector.
150
152
153
+ static void scf_add_to_free_list (struct scf_check * scfcp )
154
+ {
155
+ struct llist_head * pool ;
156
+ unsigned int cpu ;
157
+
158
+ if (!scfcp )
159
+ return ;
160
+ cpu = raw_smp_processor_id () % nthreads ;
161
+ pool = & per_cpu (scf_free_pool , cpu );
162
+ llist_add (& scfcp -> scf_node , pool );
163
+ }
164
+
165
+ static void scf_cleanup_free_list (unsigned int cpu )
166
+ {
167
+ struct llist_head * pool ;
168
+ struct llist_node * node ;
169
+ struct scf_check * scfcp ;
170
+
171
+ pool = & per_cpu (scf_free_pool , cpu );
172
+ node = llist_del_all (pool );
173
+ while (node ) {
174
+ scfcp = llist_entry (node , struct scf_check , scf_node );
175
+ node = node -> next ;
176
+ kfree (scfcp );
177
+ }
178
+ }
179
+
151
180
// Print torture statistics. Caller must ensure serialization.
152
181
static void scf_torture_stats_print (void )
153
182
{
@@ -296,7 +325,7 @@ static void scf_handler(void *scfc_in)
296
325
if (scfcp -> scfc_rpc )
297
326
complete (& scfcp -> scfc_completion );
298
327
} else {
299
- kfree (scfcp );
328
+ scf_add_to_free_list (scfcp );
300
329
}
301
330
}
302
331
@@ -320,10 +349,6 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
320
349
struct scf_check * scfcp = NULL ;
321
350
struct scf_selector * scfsp = scf_sel_rand (trsp );
322
351
323
- if (use_cpus_read_lock )
324
- cpus_read_lock ();
325
- else
326
- preempt_disable ();
327
352
if (scfsp -> scfs_prim == SCF_PRIM_SINGLE || scfsp -> scfs_wait ) {
328
353
scfcp = kmalloc (sizeof (* scfcp ), GFP_ATOMIC );
329
354
if (!scfcp ) {
@@ -337,6 +362,10 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
337
362
scfcp -> scfc_rpc = false;
338
363
}
339
364
}
365
+ if (use_cpus_read_lock )
366
+ cpus_read_lock ();
367
+ else
368
+ preempt_disable ();
340
369
switch (scfsp -> scfs_prim ) {
341
370
case SCF_PRIM_RESCHED :
342
371
if (IS_BUILTIN (CONFIG_SCF_TORTURE_TEST )) {
@@ -363,7 +392,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
363
392
scfp -> n_single_wait_ofl ++ ;
364
393
else
365
394
scfp -> n_single_ofl ++ ;
366
- kfree (scfcp );
395
+ scf_add_to_free_list (scfcp );
367
396
scfcp = NULL ;
368
397
}
369
398
break ;
@@ -391,7 +420,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
391
420
preempt_disable ();
392
421
} else {
393
422
scfp -> n_single_rpc_ofl ++ ;
394
- kfree (scfcp );
423
+ scf_add_to_free_list (scfcp );
395
424
scfcp = NULL ;
396
425
}
397
426
break ;
@@ -428,7 +457,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
428
457
pr_warn ("%s: Memory-ordering failure, scfs_prim: %d.\n" , __func__ , scfsp -> scfs_prim );
429
458
atomic_inc (& n_mb_out_errs ); // Leak rather than trash!
430
459
} else {
431
- kfree (scfcp );
460
+ scf_add_to_free_list (scfcp );
432
461
}
433
462
barrier (); // Prevent race-reduction compiler optimizations.
434
463
}
@@ -463,7 +492,7 @@ static int scftorture_invoker(void *arg)
463
492
464
493
// Make sure that the CPU is affinitized appropriately during testing.
465
494
curcpu = raw_smp_processor_id ();
466
- WARN_ONCE (curcpu != scfp -> cpu % nr_cpu_ids ,
495
+ WARN_ONCE (curcpu != cpu ,
467
496
"%s: Wanted CPU %d, running on %d, nr_cpu_ids = %d\n" ,
468
497
__func__ , scfp -> cpu , curcpu , nr_cpu_ids );
469
498
@@ -479,6 +508,8 @@ static int scftorture_invoker(void *arg)
479
508
VERBOSE_SCFTORTOUT ("scftorture_invoker %d started" , scfp -> cpu );
480
509
481
510
do {
511
+ scf_cleanup_free_list (cpu );
512
+
482
513
scftorture_invoke_one (scfp , & rand );
483
514
while (cpu_is_offline (cpu ) && !torture_must_stop ()) {
484
515
schedule_timeout_interruptible (HZ / 5 );
@@ -523,12 +554,15 @@ static void scf_torture_cleanup(void)
523
554
torture_stop_kthread ("scftorture_invoker" , scf_stats_p [i ].task );
524
555
else
525
556
goto end ;
526
- smp_call_function (scf_cleanup_handler , NULL , 0 );
557
+ smp_call_function (scf_cleanup_handler , NULL , 1 );
527
558
torture_stop_kthread (scf_torture_stats , scf_torture_stats_task );
528
559
scf_torture_stats_print (); // -After- the stats thread is stopped!
529
560
kfree (scf_stats_p ); // -After- the last stats print has completed!
530
561
scf_stats_p = NULL ;
531
562
563
+ for (i = 0 ; i < nr_cpu_ids ; i ++ )
564
+ scf_cleanup_free_list (i );
565
+
532
566
if (atomic_read (& n_errs ) || atomic_read (& n_mb_in_errs ) || atomic_read (& n_mb_out_errs ))
533
567
scftorture_print_module_parms ("End of test: FAILURE" );
534
568
else if (torture_onoff_failures ())
0 commit comments