@@ -2029,7 +2029,7 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_
2029
2029
* try_to_grab_pending - steal work item from worklist and disable irq
2030
2030
* @work: work item to steal
2031
2031
* @is_dwork: @work is a delayed_work
2032
- * @flags : place to store irq state
2032
+ * @irq_flags : place to store irq state
2033
2033
*
2034
2034
* Try to grab PENDING bit of @work. This function can handle @work in any
2035
2035
* stable state - idle, on timer or on worklist.
@@ -2051,17 +2051,17 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_
2051
2051
* irqsafe, ensures that we return -EAGAIN for finite short period of time.
2052
2052
*
2053
2053
* On successful return, >= 0, irq is disabled and the caller is
2054
- * responsible for releasing it using local_irq_restore(*@flags ).
2054
+ * responsible for releasing it using local_irq_restore(*@irq_flags ).
2055
2055
*
2056
2056
* This function is safe to call from any context including IRQ handler.
2057
2057
*/
2058
2058
static int try_to_grab_pending (struct work_struct * work , bool is_dwork ,
2059
- unsigned long * flags )
2059
+ unsigned long * irq_flags )
2060
2060
{
2061
2061
struct worker_pool * pool ;
2062
2062
struct pool_workqueue * pwq ;
2063
2063
2064
- local_irq_save (* flags );
2064
+ local_irq_save (* irq_flags );
2065
2065
2066
2066
/* try to steal the timer if it exists */
2067
2067
if (is_dwork ) {
@@ -2136,7 +2136,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
2136
2136
raw_spin_unlock (& pool -> lock );
2137
2137
fail :
2138
2138
rcu_read_unlock ();
2139
- local_irq_restore (* flags );
2139
+ local_irq_restore (* irq_flags );
2140
2140
if (work_is_canceling (work ))
2141
2141
return - ENOENT ;
2142
2142
cpu_relax ();
@@ -2344,16 +2344,16 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
2344
2344
struct work_struct * work )
2345
2345
{
2346
2346
bool ret = false;
2347
- unsigned long flags ;
2347
+ unsigned long irq_flags ;
2348
2348
2349
- local_irq_save (flags );
2349
+ local_irq_save (irq_flags );
2350
2350
2351
2351
if (!test_and_set_bit (WORK_STRUCT_PENDING_BIT , work_data_bits (work ))) {
2352
2352
__queue_work (cpu , wq , work );
2353
2353
ret = true;
2354
2354
}
2355
2355
2356
- local_irq_restore (flags );
2356
+ local_irq_restore (irq_flags );
2357
2357
return ret ;
2358
2358
}
2359
2359
EXPORT_SYMBOL (queue_work_on );
@@ -2410,7 +2410,7 @@ static int select_numa_node_cpu(int node)
2410
2410
bool queue_work_node (int node , struct workqueue_struct * wq ,
2411
2411
struct work_struct * work )
2412
2412
{
2413
- unsigned long flags ;
2413
+ unsigned long irq_flags ;
2414
2414
bool ret = false;
2415
2415
2416
2416
/*
@@ -2424,7 +2424,7 @@ bool queue_work_node(int node, struct workqueue_struct *wq,
2424
2424
*/
2425
2425
WARN_ON_ONCE (!(wq -> flags & WQ_UNBOUND ));
2426
2426
2427
- local_irq_save (flags );
2427
+ local_irq_save (irq_flags );
2428
2428
2429
2429
if (!test_and_set_bit (WORK_STRUCT_PENDING_BIT , work_data_bits (work ))) {
2430
2430
int cpu = select_numa_node_cpu (node );
@@ -2433,7 +2433,7 @@ bool queue_work_node(int node, struct workqueue_struct *wq,
2433
2433
ret = true;
2434
2434
}
2435
2435
2436
- local_irq_restore (flags );
2436
+ local_irq_restore (irq_flags );
2437
2437
return ret ;
2438
2438
}
2439
2439
EXPORT_SYMBOL_GPL (queue_work_node );
@@ -2503,17 +2503,17 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
2503
2503
{
2504
2504
struct work_struct * work = & dwork -> work ;
2505
2505
bool ret = false;
2506
- unsigned long flags ;
2506
+ unsigned long irq_flags ;
2507
2507
2508
2508
/* read the comment in __queue_work() */
2509
- local_irq_save (flags );
2509
+ local_irq_save (irq_flags );
2510
2510
2511
2511
if (!test_and_set_bit (WORK_STRUCT_PENDING_BIT , work_data_bits (work ))) {
2512
2512
__queue_delayed_work (cpu , wq , dwork , delay );
2513
2513
ret = true;
2514
2514
}
2515
2515
2516
- local_irq_restore (flags );
2516
+ local_irq_restore (irq_flags );
2517
2517
return ret ;
2518
2518
}
2519
2519
EXPORT_SYMBOL (queue_delayed_work_on );
@@ -2539,16 +2539,16 @@ EXPORT_SYMBOL(queue_delayed_work_on);
2539
2539
bool mod_delayed_work_on (int cpu , struct workqueue_struct * wq ,
2540
2540
struct delayed_work * dwork , unsigned long delay )
2541
2541
{
2542
- unsigned long flags ;
2542
+ unsigned long irq_flags ;
2543
2543
int ret ;
2544
2544
2545
2545
do {
2546
- ret = try_to_grab_pending (& dwork -> work , true, & flags );
2546
+ ret = try_to_grab_pending (& dwork -> work , true, & irq_flags );
2547
2547
} while (unlikely (ret == - EAGAIN ));
2548
2548
2549
2549
if (likely (ret >= 0 )) {
2550
2550
__queue_delayed_work (cpu , wq , dwork , delay );
2551
- local_irq_restore (flags );
2551
+ local_irq_restore (irq_flags );
2552
2552
}
2553
2553
2554
2554
/* -ENOENT from try_to_grab_pending() becomes %true */
@@ -4105,18 +4105,18 @@ EXPORT_SYMBOL(flush_rcu_work);
4105
4105
4106
4106
static bool __cancel_work (struct work_struct * work , bool is_dwork )
4107
4107
{
4108
- unsigned long flags ;
4108
+ unsigned long irq_flags ;
4109
4109
int ret ;
4110
4110
4111
4111
do {
4112
- ret = try_to_grab_pending (work , is_dwork , & flags );
4112
+ ret = try_to_grab_pending (work , is_dwork , & irq_flags );
4113
4113
} while (unlikely (ret == - EAGAIN ));
4114
4114
4115
4115
if (unlikely (ret < 0 ))
4116
4116
return false;
4117
4117
4118
4118
set_work_pool_and_clear_pending (work , get_work_pool_id (work ));
4119
- local_irq_restore (flags );
4119
+ local_irq_restore (irq_flags );
4120
4120
return ret ;
4121
4121
}
4122
4122
@@ -4137,11 +4137,11 @@ static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *k
4137
4137
static bool __cancel_work_sync (struct work_struct * work , bool is_dwork )
4138
4138
{
4139
4139
static DECLARE_WAIT_QUEUE_HEAD (cancel_waitq );
4140
- unsigned long flags ;
4140
+ unsigned long irq_flags ;
4141
4141
int ret ;
4142
4142
4143
4143
do {
4144
- ret = try_to_grab_pending (work , is_dwork , & flags );
4144
+ ret = try_to_grab_pending (work , is_dwork , & irq_flags );
4145
4145
/*
4146
4146
* If someone else is already canceling, wait for it to
4147
4147
* finish. flush_work() doesn't work for PREEMPT_NONE
@@ -4175,7 +4175,7 @@ static bool __cancel_work_sync(struct work_struct *work, bool is_dwork)
4175
4175
4176
4176
/* tell other tasks trying to grab @work to back off */
4177
4177
mark_work_canceling (work );
4178
- local_irq_restore (flags );
4178
+ local_irq_restore (irq_flags );
4179
4179
4180
4180
/*
4181
4181
* Skip __flush_work() during early boot when we know that @work isn't
@@ -5381,15 +5381,15 @@ static void wq_adjust_max_active(struct workqueue_struct *wq)
5381
5381
5382
5382
activated = false;
5383
5383
for_each_pwq (pwq , wq ) {
5384
- unsigned long flags ;
5384
+ unsigned long irq_flags ;
5385
5385
5386
5386
/* can be called during early boot w/ irq disabled */
5387
- raw_spin_lock_irqsave (& pwq -> pool -> lock , flags );
5387
+ raw_spin_lock_irqsave (& pwq -> pool -> lock , irq_flags );
5388
5388
if (pwq_activate_first_inactive (pwq , true)) {
5389
5389
activated = true;
5390
5390
kick_pool (pwq -> pool );
5391
5391
}
5392
- raw_spin_unlock_irqrestore (& pwq -> pool -> lock , flags );
5392
+ raw_spin_unlock_irqrestore (& pwq -> pool -> lock , irq_flags );
5393
5393
}
5394
5394
} while (activated );
5395
5395
}
@@ -5762,7 +5762,7 @@ EXPORT_SYMBOL_GPL(workqueue_congested);
5762
5762
unsigned int work_busy (struct work_struct * work )
5763
5763
{
5764
5764
struct worker_pool * pool ;
5765
- unsigned long flags ;
5765
+ unsigned long irq_flags ;
5766
5766
unsigned int ret = 0 ;
5767
5767
5768
5768
if (work_pending (work ))
@@ -5771,10 +5771,10 @@ unsigned int work_busy(struct work_struct *work)
5771
5771
rcu_read_lock ();
5772
5772
pool = get_work_pool (work );
5773
5773
if (pool ) {
5774
- raw_spin_lock_irqsave (& pool -> lock , flags );
5774
+ raw_spin_lock_irqsave (& pool -> lock , irq_flags );
5775
5775
if (find_worker_executing_work (pool , work ))
5776
5776
ret |= WORK_BUSY_RUNNING ;
5777
- raw_spin_unlock_irqrestore (& pool -> lock , flags );
5777
+ raw_spin_unlock_irqrestore (& pool -> lock , irq_flags );
5778
5778
}
5779
5779
rcu_read_unlock ();
5780
5780
@@ -6006,7 +6006,7 @@ void show_one_workqueue(struct workqueue_struct *wq)
6006
6006
{
6007
6007
struct pool_workqueue * pwq ;
6008
6008
bool idle = true;
6009
- unsigned long flags ;
6009
+ unsigned long irq_flags ;
6010
6010
6011
6011
for_each_pwq (pwq , wq ) {
6012
6012
if (!pwq_is_empty (pwq )) {
@@ -6020,7 +6020,7 @@ void show_one_workqueue(struct workqueue_struct *wq)
6020
6020
pr_info ("workqueue %s: flags=0x%x\n" , wq -> name , wq -> flags );
6021
6021
6022
6022
for_each_pwq (pwq , wq ) {
6023
- raw_spin_lock_irqsave (& pwq -> pool -> lock , flags );
6023
+ raw_spin_lock_irqsave (& pwq -> pool -> lock , irq_flags );
6024
6024
if (!pwq_is_empty (pwq )) {
6025
6025
/*
6026
6026
* Defer printing to avoid deadlocks in console
@@ -6031,7 +6031,7 @@ void show_one_workqueue(struct workqueue_struct *wq)
6031
6031
show_pwq (pwq );
6032
6032
printk_deferred_exit ();
6033
6033
}
6034
- raw_spin_unlock_irqrestore (& pwq -> pool -> lock , flags );
6034
+ raw_spin_unlock_irqrestore (& pwq -> pool -> lock , irq_flags );
6035
6035
/*
6036
6036
* We could be printing a lot from atomic context, e.g.
6037
6037
* sysrq-t -> show_all_workqueues(). Avoid triggering
@@ -6050,10 +6050,10 @@ static void show_one_worker_pool(struct worker_pool *pool)
6050
6050
{
6051
6051
struct worker * worker ;
6052
6052
bool first = true;
6053
- unsigned long flags ;
6053
+ unsigned long irq_flags ;
6054
6054
unsigned long hung = 0 ;
6055
6055
6056
- raw_spin_lock_irqsave (& pool -> lock , flags );
6056
+ raw_spin_lock_irqsave (& pool -> lock , irq_flags );
6057
6057
if (pool -> nr_workers == pool -> nr_idle )
6058
6058
goto next_pool ;
6059
6059
@@ -6081,7 +6081,7 @@ static void show_one_worker_pool(struct worker_pool *pool)
6081
6081
pr_cont ("\n" );
6082
6082
printk_deferred_exit ();
6083
6083
next_pool :
6084
- raw_spin_unlock_irqrestore (& pool -> lock , flags );
6084
+ raw_spin_unlock_irqrestore (& pool -> lock , irq_flags );
6085
6085
/*
6086
6086
* We could be printing a lot from atomic context, e.g.
6087
6087
* sysrq-t -> show_all_workqueues(). Avoid triggering
@@ -7212,10 +7212,10 @@ static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
7212
7212
static void show_cpu_pool_hog (struct worker_pool * pool )
7213
7213
{
7214
7214
struct worker * worker ;
7215
- unsigned long flags ;
7215
+ unsigned long irq_flags ;
7216
7216
int bkt ;
7217
7217
7218
- raw_spin_lock_irqsave (& pool -> lock , flags );
7218
+ raw_spin_lock_irqsave (& pool -> lock , irq_flags );
7219
7219
7220
7220
hash_for_each (pool -> busy_hash , bkt , worker , hentry ) {
7221
7221
if (task_is_running (worker -> task )) {
@@ -7233,7 +7233,7 @@ static void show_cpu_pool_hog(struct worker_pool *pool)
7233
7233
}
7234
7234
}
7235
7235
7236
- raw_spin_unlock_irqrestore (& pool -> lock , flags );
7236
+ raw_spin_unlock_irqrestore (& pool -> lock , irq_flags );
7237
7237
}
7238
7238
7239
7239
static void show_cpu_pools_hogs (void )
0 commit comments