54
54
#include <linux/nmi.h>
55
55
#include <linux/kvm_para.h>
56
56
#include <linux/delay.h>
57
+ #include <linux/irq_work.h>
57
58
58
59
#include "workqueue_internal.h"
59
60
@@ -457,6 +458,10 @@ static bool wq_debug_force_rr_cpu = false;
457
458
#endif
458
459
module_param_named (debug_force_rr_cpu , wq_debug_force_rr_cpu , bool , 0644 );
459
460
461
+ /* to raise softirq for the BH worker pools on other CPUs */
462
+ static DEFINE_PER_CPU_SHARED_ALIGNED (struct irq_work [NR_STD_WORKER_POOLS ],
463
+ bh_pool_irq_works ) ;
464
+
460
465
/* the BH worker pools */
461
466
static DEFINE_PER_CPU_SHARED_ALIGNED (struct worker_pool [NR_STD_WORKER_POOLS ],
462
467
bh_worker_pools ) ;
@@ -1197,6 +1202,13 @@ static bool assign_work(struct work_struct *work, struct worker *worker,
1197
1202
return true;
1198
1203
}
1199
1204
1205
+ static struct irq_work * bh_pool_irq_work (struct worker_pool * pool )
1206
+ {
1207
+ int high = pool -> attrs -> nice == HIGHPRI_NICE_LEVEL ? 1 : 0 ;
1208
+
1209
+ return & per_cpu (bh_pool_irq_works , pool -> cpu )[high ];
1210
+ }
1211
+
1200
1212
/**
1201
1213
* kick_pool - wake up an idle worker if necessary
1202
1214
* @pool: pool to kick
@@ -1215,10 +1227,15 @@ static bool kick_pool(struct worker_pool *pool)
1215
1227
return false;
1216
1228
1217
1229
if (pool -> flags & POOL_BH ) {
1218
- if (pool -> attrs -> nice == HIGHPRI_NICE_LEVEL )
1219
- raise_softirq_irqoff (HI_SOFTIRQ );
1220
- else
1221
- raise_softirq_irqoff (TASKLET_SOFTIRQ );
1230
+ if (likely (pool -> cpu == smp_processor_id ())) {
1231
+ if (pool -> attrs -> nice == HIGHPRI_NICE_LEVEL )
1232
+ raise_softirq_irqoff (HI_SOFTIRQ );
1233
+ else
1234
+ raise_softirq_irqoff (TASKLET_SOFTIRQ );
1235
+ } else {
1236
+ irq_work_queue_on (bh_pool_irq_work (pool ), pool -> cpu );
1237
+ }
1238
+
1222
1239
return true;
1223
1240
}
1224
1241
@@ -7367,6 +7384,16 @@ static inline void wq_watchdog_init(void) { }
7367
7384
7368
7385
#endif /* CONFIG_WQ_WATCHDOG */
7369
7386
7387
+ static void bh_pool_kick_normal (struct irq_work * irq_work )
7388
+ {
7389
+ raise_softirq_irqoff (TASKLET_SOFTIRQ );
7390
+ }
7391
+
7392
+ static void bh_pool_kick_highpri (struct irq_work * irq_work )
7393
+ {
7394
+ raise_softirq_irqoff (HI_SOFTIRQ );
7395
+ }
7396
+
7370
7397
static void __init restrict_unbound_cpumask (const char * name , const struct cpumask * mask )
7371
7398
{
7372
7399
if (!cpumask_intersects (wq_unbound_cpumask , mask )) {
@@ -7408,6 +7435,8 @@ void __init workqueue_init_early(void)
7408
7435
{
7409
7436
struct wq_pod_type * pt = & wq_pod_types [WQ_AFFN_SYSTEM ];
7410
7437
int std_nice [NR_STD_WORKER_POOLS ] = { 0 , HIGHPRI_NICE_LEVEL };
7438
+ void (* irq_work_fns [2 ])(struct irq_work * ) = { bh_pool_kick_normal ,
7439
+ bh_pool_kick_highpri };
7411
7440
int i , cpu ;
7412
7441
7413
7442
BUILD_BUG_ON (__alignof__(struct pool_workqueue ) < __alignof__(long long ));
@@ -7455,8 +7484,10 @@ void __init workqueue_init_early(void)
7455
7484
7456
7485
i = 0 ;
7457
7486
for_each_bh_worker_pool (pool , cpu ) {
7458
- init_cpu_worker_pool (pool , cpu , std_nice [i ++ ]);
7487
+ init_cpu_worker_pool (pool , cpu , std_nice [i ]);
7459
7488
pool -> flags |= POOL_BH ;
7489
+ init_irq_work (bh_pool_irq_work (pool ), irq_work_fns [i ]);
7490
+ i ++ ;
7460
7491
}
7461
7492
7462
7493
i = 0 ;
0 commit comments