Skip to content

Commit c5f8cd6

Browse files
committed
workqueue: Avoid premature init of wq->node_nr_active[].max
System workqueues are allocated early during boot from workqueue_init_early(). While allocating unbound workqueues, wq_update_node_max_active() is invoked from apply_workqueue_attrs() and accesses NUMA topology to initialize wq->node_nr_active[].max. However, topology information may not be set up at this point. wq_update_node_max_active() is explicitly invoked from workqueue_init_topology() later when topology information is known to be available. This doesn't seem to crash anything but it's doing useless work with dubious data. Let's skip the premature and duplicate node_max_active updates by initializing the field to WQ_DFL_MIN_ACTIVE on allocation and making wq_update_node_max_active() noop until workqueue_init_topology(). Signed-off-by: Tejun Heo <tj@kernel.org> --- kernel/workqueue.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9221a4c..a65081ec6780 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -386,6 +386,8 @@ static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = { [WQ_AFFN_SYSTEM] = "system", }; +static bool wq_topo_initialized = false; + /* * Per-cpu work items which run for longer than the following threshold are * automatically considered CPU intensive and excluded from concurrency @@ -1510,6 +1512,9 @@ static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu) lockdep_assert_held(&wq->mutex); + if (!wq_topo_initialized) + return; + if (!cpumask_test_cpu(off_cpu, effective)) off_cpu = -1; @@ -4356,6 +4361,7 @@ static void free_node_nr_active(struct wq_node_nr_active **nna_ar) static void init_node_nr_active(struct wq_node_nr_active *nna) { + nna->max = WQ_DFL_MIN_ACTIVE; atomic_set(&nna->nr, 0); raw_spin_lock_init(&nna->lock); INIT_LIST_HEAD(&nna->pending_pwqs); @@ -7400,6 +7406,8 @@ void __init workqueue_init_topology(void) init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache); init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa); + wq_topo_initialized = true; + mutex_lock(&wq_pool_mutex); /*
1 parent 15930da commit c5f8cd6

File tree

1 file changed

+8
-0
lines changed

1 file changed

+8
-0
lines changed

kernel/workqueue.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -386,6 +386,8 @@ static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = {
386386
[WQ_AFFN_SYSTEM] = "system",
387387
};
388388

389+
static bool wq_topo_initialized __read_mostly = false;
390+
389391
/*
390392
* Per-cpu work items which run for longer than the following threshold are
391393
* automatically considered CPU intensive and excluded from concurrency
@@ -1510,6 +1512,9 @@ static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu)
15101512

15111513
lockdep_assert_held(&wq->mutex);
15121514

1515+
if (!wq_topo_initialized)
1516+
return;
1517+
15131518
if (off_cpu >= 0 && !cpumask_test_cpu(off_cpu, effective))
15141519
off_cpu = -1;
15151520

@@ -4356,6 +4361,7 @@ static void free_node_nr_active(struct wq_node_nr_active **nna_ar)
43564361

43574362
static void init_node_nr_active(struct wq_node_nr_active *nna)
43584363
{
4364+
nna->max = WQ_DFL_MIN_ACTIVE;
43594365
atomic_set(&nna->nr, 0);
43604366
raw_spin_lock_init(&nna->lock);
43614367
INIT_LIST_HEAD(&nna->pending_pwqs);
@@ -7400,6 +7406,8 @@ void __init workqueue_init_topology(void)
74007406
init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache);
74017407
init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa);
74027408

7409+
wq_topo_initialized = true;
7410+
74037411
mutex_lock(&wq_pool_mutex);
74047412

74057413
/*

0 commit comments

Comments
 (0)