Skip to content

Commit 2f34d73

Browse files
committed
workqueue: Fix queue_work_on() with BH workqueues
When queue_work_on() is used to queue a BH work item on a remote CPU, the work item is queued on that CPU but kick_pool() raises softirq on the local CPU. This leads to stalls as the work item won't be executed until something else on the remote CPU schedules a BH work item or tasklet locally. Fix it by bouncing raising softirq to the target CPU using per-cpu irq_work. Signed-off-by: Tejun Heo <tj@kernel.org> Fixes: 4cb1ef6 ("workqueue: Implement BH workqueues to eventually replace tasklets")
1 parent bf52b1a commit 2f34d73

File tree

1 file changed

+36
-5
lines changed

1 file changed

+36
-5
lines changed

kernel/workqueue.c

Lines changed: 36 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@
5454
#include <linux/nmi.h>
5555
#include <linux/kvm_para.h>
5656
#include <linux/delay.h>
57+
#include <linux/irq_work.h>
5758

5859
#include "workqueue_internal.h"
5960

@@ -457,6 +458,10 @@ static bool wq_debug_force_rr_cpu = false;
457458
#endif
458459
module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
459460

461+
/* to raise softirq for the BH worker pools on other CPUs */
462+
static DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_work [NR_STD_WORKER_POOLS],
463+
bh_pool_irq_works);
464+
460465
/* the BH worker pools */
461466
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
462467
bh_worker_pools);
@@ -1197,6 +1202,13 @@ static bool assign_work(struct work_struct *work, struct worker *worker,
11971202
return true;
11981203
}
11991204

1205+
static struct irq_work *bh_pool_irq_work(struct worker_pool *pool)
1206+
{
1207+
int high = pool->attrs->nice == HIGHPRI_NICE_LEVEL ? 1 : 0;
1208+
1209+
return &per_cpu(bh_pool_irq_works, pool->cpu)[high];
1210+
}
1211+
12001212
/**
12011213
* kick_pool - wake up an idle worker if necessary
12021214
* @pool: pool to kick
@@ -1215,10 +1227,15 @@ static bool kick_pool(struct worker_pool *pool)
12151227
return false;
12161228

12171229
if (pool->flags & POOL_BH) {
1218-
if (pool->attrs->nice == HIGHPRI_NICE_LEVEL)
1219-
raise_softirq_irqoff(HI_SOFTIRQ);
1220-
else
1221-
raise_softirq_irqoff(TASKLET_SOFTIRQ);
1230+
if (likely(pool->cpu == smp_processor_id())) {
1231+
if (pool->attrs->nice == HIGHPRI_NICE_LEVEL)
1232+
raise_softirq_irqoff(HI_SOFTIRQ);
1233+
else
1234+
raise_softirq_irqoff(TASKLET_SOFTIRQ);
1235+
} else {
1236+
irq_work_queue_on(bh_pool_irq_work(pool), pool->cpu);
1237+
}
1238+
12221239
return true;
12231240
}
12241241

@@ -7367,6 +7384,16 @@ static inline void wq_watchdog_init(void) { }
73677384

73687385
#endif /* CONFIG_WQ_WATCHDOG */
73697386

7387+
static void bh_pool_kick_normal(struct irq_work *irq_work)
7388+
{
7389+
raise_softirq_irqoff(TASKLET_SOFTIRQ);
7390+
}
7391+
7392+
static void bh_pool_kick_highpri(struct irq_work *irq_work)
7393+
{
7394+
raise_softirq_irqoff(HI_SOFTIRQ);
7395+
}
7396+
73707397
static void __init restrict_unbound_cpumask(const char *name, const struct cpumask *mask)
73717398
{
73727399
if (!cpumask_intersects(wq_unbound_cpumask, mask)) {
@@ -7408,6 +7435,8 @@ void __init workqueue_init_early(void)
74087435
{
74097436
struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM];
74107437
int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
7438+
void (*irq_work_fns[2])(struct irq_work *) = { bh_pool_kick_normal,
7439+
bh_pool_kick_highpri };
74117440
int i, cpu;
74127441

74137442
BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
@@ -7455,8 +7484,10 @@ void __init workqueue_init_early(void)
74557484

74567485
i = 0;
74577486
for_each_bh_worker_pool(pool, cpu) {
7458-
init_cpu_worker_pool(pool, cpu, std_nice[i++]);
7487+
init_cpu_worker_pool(pool, cpu, std_nice[i]);
74597488
pool->flags |= POOL_BH;
7489+
init_irq_work(bh_pool_irq_work(pool), irq_work_fns[i]);
7490+
i++;
74607491
}
74617492

74627493
i = 0;

0 commit comments

Comments
 (0)