@@ -376,8 +376,6 @@ struct workqueue_struct {
376
376
struct wq_node_nr_active * node_nr_active []; /* I: per-node nr_active */
377
377
};
378
378
379
- static struct kmem_cache * pwq_cache ;
380
-
381
379
/*
382
380
* Each pod type describes how CPUs should be grouped for unbound workqueues.
383
381
* See the comment above workqueue_attrs->affn_scope.
@@ -389,20 +387,15 @@ struct wq_pod_type {
389
387
int * cpu_pod ; /* cpu -> pod */
390
388
};
391
389
392
- static struct wq_pod_type wq_pod_types [WQ_AFFN_NR_TYPES ];
393
- static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE ;
394
-
395
390
static const char * wq_affn_names [WQ_AFFN_NR_TYPES ] = {
396
- [WQ_AFFN_DFL ] = "default" ,
397
- [WQ_AFFN_CPU ] = "cpu" ,
398
- [WQ_AFFN_SMT ] = "smt" ,
399
- [WQ_AFFN_CACHE ] = "cache" ,
400
- [WQ_AFFN_NUMA ] = "numa" ,
401
- [WQ_AFFN_SYSTEM ] = "system" ,
391
+ [WQ_AFFN_DFL ] = "default" ,
392
+ [WQ_AFFN_CPU ] = "cpu" ,
393
+ [WQ_AFFN_SMT ] = "smt" ,
394
+ [WQ_AFFN_CACHE ] = "cache" ,
395
+ [WQ_AFFN_NUMA ] = "numa" ,
396
+ [WQ_AFFN_SYSTEM ] = "system" ,
402
397
};
403
398
404
- static bool wq_topo_initialized __read_mostly = false;
405
-
406
399
/*
407
400
* Per-cpu work items which run for longer than the following threshold are
408
401
* automatically considered CPU intensive and excluded from concurrency
@@ -418,6 +411,12 @@ static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
418
411
module_param_named (power_efficient , wq_power_efficient , bool , 0444 );
419
412
420
413
static bool wq_online ; /* can kworkers be created yet? */
414
+ static bool wq_topo_initialized __read_mostly = false;
415
+
416
+ static struct kmem_cache * pwq_cache ;
417
+
418
+ static struct wq_pod_type wq_pod_types [WQ_AFFN_NR_TYPES ];
419
+ static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE ;
421
420
422
421
/* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */
423
422
static struct workqueue_attrs * wq_update_pod_attrs_buf ;
@@ -2231,7 +2230,6 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
2231
2230
*/
2232
2231
lockdep_assert_irqs_disabled ();
2233
2232
2234
-
2235
2233
/*
2236
2234
* For a draining wq, only works from the same workqueue are
2237
2235
* allowed. The __WQ_DESTROYING helps to spot the issue that
@@ -4121,8 +4119,8 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
4121
4119
local_irq_restore (flags );
4122
4120
4123
4121
/*
4124
- * This allows canceling during early boot. We know that @work
4125
- * isn't executing.
4122
+ * Skip __flush_work() during early boot when we know that @work isn't
4123
+ * executing. This allows canceling during early boot .
4126
4124
*/
4127
4125
if (wq_online )
4128
4126
__flush_work (work , true);
0 commit comments