Skip to content

Commit c26e2f2

Browse files
committed
workqueue: Use variable name irq_flags for saving local irq flags
Using the generic term `flags` for irq flags is conventional but can be confusing as there's quite a bit of code dealing with work flags which involves some subtleties. Let's use a more explicit name `irq_flags` for local irq flags. No functional changes. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <jiangshanlai@gmail.com>
1 parent cdc6e4b commit c26e2f2

File tree

1 file changed

+38
-38
lines changed

1 file changed

+38
-38
lines changed

kernel/workqueue.c

Lines changed: 38 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -2029,7 +2029,7 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_
20292029
* try_to_grab_pending - steal work item from worklist and disable irq
20302030
* @work: work item to steal
20312031
* @is_dwork: @work is a delayed_work
2032-
* @flags: place to store irq state
2032+
* @irq_flags: place to store irq state
20332033
*
20342034
* Try to grab PENDING bit of @work. This function can handle @work in any
20352035
* stable state - idle, on timer or on worklist.
@@ -2051,17 +2051,17 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_
20512051
* irqsafe, ensures that we return -EAGAIN for finite short period of time.
20522052
*
20532053
* On successful return, >= 0, irq is disabled and the caller is
2054-
* responsible for releasing it using local_irq_restore(*@flags).
2054+
* responsible for releasing it using local_irq_restore(*@irq_flags).
20552055
*
20562056
* This function is safe to call from any context including IRQ handler.
20572057
*/
20582058
static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
2059-
unsigned long *flags)
2059+
unsigned long *irq_flags)
20602060
{
20612061
struct worker_pool *pool;
20622062
struct pool_workqueue *pwq;
20632063

2064-
local_irq_save(*flags);
2064+
local_irq_save(*irq_flags);
20652065

20662066
/* try to steal the timer if it exists */
20672067
if (is_dwork) {
@@ -2136,7 +2136,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
21362136
raw_spin_unlock(&pool->lock);
21372137
fail:
21382138
rcu_read_unlock();
2139-
local_irq_restore(*flags);
2139+
local_irq_restore(*irq_flags);
21402140
if (work_is_canceling(work))
21412141
return -ENOENT;
21422142
cpu_relax();
@@ -2344,16 +2344,16 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
23442344
struct work_struct *work)
23452345
{
23462346
bool ret = false;
2347-
unsigned long flags;
2347+
unsigned long irq_flags;
23482348

2349-
local_irq_save(flags);
2349+
local_irq_save(irq_flags);
23502350

23512351
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
23522352
__queue_work(cpu, wq, work);
23532353
ret = true;
23542354
}
23552355

2356-
local_irq_restore(flags);
2356+
local_irq_restore(irq_flags);
23572357
return ret;
23582358
}
23592359
EXPORT_SYMBOL(queue_work_on);
@@ -2410,7 +2410,7 @@ static int select_numa_node_cpu(int node)
24102410
bool queue_work_node(int node, struct workqueue_struct *wq,
24112411
struct work_struct *work)
24122412
{
2413-
unsigned long flags;
2413+
unsigned long irq_flags;
24142414
bool ret = false;
24152415

24162416
/*
@@ -2424,7 +2424,7 @@ bool queue_work_node(int node, struct workqueue_struct *wq,
24242424
*/
24252425
WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
24262426

2427-
local_irq_save(flags);
2427+
local_irq_save(irq_flags);
24282428

24292429
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
24302430
int cpu = select_numa_node_cpu(node);
@@ -2433,7 +2433,7 @@ bool queue_work_node(int node, struct workqueue_struct *wq,
24332433
ret = true;
24342434
}
24352435

2436-
local_irq_restore(flags);
2436+
local_irq_restore(irq_flags);
24372437
return ret;
24382438
}
24392439
EXPORT_SYMBOL_GPL(queue_work_node);
@@ -2503,17 +2503,17 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
25032503
{
25042504
struct work_struct *work = &dwork->work;
25052505
bool ret = false;
2506-
unsigned long flags;
2506+
unsigned long irq_flags;
25072507

25082508
/* read the comment in __queue_work() */
2509-
local_irq_save(flags);
2509+
local_irq_save(irq_flags);
25102510

25112511
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
25122512
__queue_delayed_work(cpu, wq, dwork, delay);
25132513
ret = true;
25142514
}
25152515

2516-
local_irq_restore(flags);
2516+
local_irq_restore(irq_flags);
25172517
return ret;
25182518
}
25192519
EXPORT_SYMBOL(queue_delayed_work_on);
@@ -2539,16 +2539,16 @@ EXPORT_SYMBOL(queue_delayed_work_on);
25392539
bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
25402540
struct delayed_work *dwork, unsigned long delay)
25412541
{
2542-
unsigned long flags;
2542+
unsigned long irq_flags;
25432543
int ret;
25442544

25452545
do {
2546-
ret = try_to_grab_pending(&dwork->work, true, &flags);
2546+
ret = try_to_grab_pending(&dwork->work, true, &irq_flags);
25472547
} while (unlikely(ret == -EAGAIN));
25482548

25492549
if (likely(ret >= 0)) {
25502550
__queue_delayed_work(cpu, wq, dwork, delay);
2551-
local_irq_restore(flags);
2551+
local_irq_restore(irq_flags);
25522552
}
25532553

25542554
/* -ENOENT from try_to_grab_pending() becomes %true */
@@ -4105,18 +4105,18 @@ EXPORT_SYMBOL(flush_rcu_work);
41054105

41064106
static bool __cancel_work(struct work_struct *work, bool is_dwork)
41074107
{
4108-
unsigned long flags;
4108+
unsigned long irq_flags;
41094109
int ret;
41104110

41114111
do {
4112-
ret = try_to_grab_pending(work, is_dwork, &flags);
4112+
ret = try_to_grab_pending(work, is_dwork, &irq_flags);
41134113
} while (unlikely(ret == -EAGAIN));
41144114

41154115
if (unlikely(ret < 0))
41164116
return false;
41174117

41184118
set_work_pool_and_clear_pending(work, get_work_pool_id(work));
4119-
local_irq_restore(flags);
4119+
local_irq_restore(irq_flags);
41204120
return ret;
41214121
}
41224122

@@ -4137,11 +4137,11 @@ static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *k
41374137
static bool __cancel_work_sync(struct work_struct *work, bool is_dwork)
41384138
{
41394139
static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
4140-
unsigned long flags;
4140+
unsigned long irq_flags;
41414141
int ret;
41424142

41434143
do {
4144-
ret = try_to_grab_pending(work, is_dwork, &flags);
4144+
ret = try_to_grab_pending(work, is_dwork, &irq_flags);
41454145
/*
41464146
* If someone else is already canceling, wait for it to
41474147
* finish. flush_work() doesn't work for PREEMPT_NONE
@@ -4175,7 +4175,7 @@ static bool __cancel_work_sync(struct work_struct *work, bool is_dwork)
41754175

41764176
/* tell other tasks trying to grab @work to back off */
41774177
mark_work_canceling(work);
4178-
local_irq_restore(flags);
4178+
local_irq_restore(irq_flags);
41794179

41804180
/*
41814181
* Skip __flush_work() during early boot when we know that @work isn't
@@ -5381,15 +5381,15 @@ static void wq_adjust_max_active(struct workqueue_struct *wq)
53815381

53825382
activated = false;
53835383
for_each_pwq(pwq, wq) {
5384-
unsigned long flags;
5384+
unsigned long irq_flags;
53855385

53865386
/* can be called during early boot w/ irq disabled */
5387-
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
5387+
raw_spin_lock_irqsave(&pwq->pool->lock, irq_flags);
53885388
if (pwq_activate_first_inactive(pwq, true)) {
53895389
activated = true;
53905390
kick_pool(pwq->pool);
53915391
}
5392-
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
5392+
raw_spin_unlock_irqrestore(&pwq->pool->lock, irq_flags);
53935393
}
53945394
} while (activated);
53955395
}
@@ -5762,7 +5762,7 @@ EXPORT_SYMBOL_GPL(workqueue_congested);
57625762
unsigned int work_busy(struct work_struct *work)
57635763
{
57645764
struct worker_pool *pool;
5765-
unsigned long flags;
5765+
unsigned long irq_flags;
57665766
unsigned int ret = 0;
57675767

57685768
if (work_pending(work))
@@ -5771,10 +5771,10 @@ unsigned int work_busy(struct work_struct *work)
57715771
rcu_read_lock();
57725772
pool = get_work_pool(work);
57735773
if (pool) {
5774-
raw_spin_lock_irqsave(&pool->lock, flags);
5774+
raw_spin_lock_irqsave(&pool->lock, irq_flags);
57755775
if (find_worker_executing_work(pool, work))
57765776
ret |= WORK_BUSY_RUNNING;
5777-
raw_spin_unlock_irqrestore(&pool->lock, flags);
5777+
raw_spin_unlock_irqrestore(&pool->lock, irq_flags);
57785778
}
57795779
rcu_read_unlock();
57805780

@@ -6006,7 +6006,7 @@ void show_one_workqueue(struct workqueue_struct *wq)
60066006
{
60076007
struct pool_workqueue *pwq;
60086008
bool idle = true;
6009-
unsigned long flags;
6009+
unsigned long irq_flags;
60106010

60116011
for_each_pwq(pwq, wq) {
60126012
if (!pwq_is_empty(pwq)) {
@@ -6020,7 +6020,7 @@ void show_one_workqueue(struct workqueue_struct *wq)
60206020
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
60216021

60226022
for_each_pwq(pwq, wq) {
6023-
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
6023+
raw_spin_lock_irqsave(&pwq->pool->lock, irq_flags);
60246024
if (!pwq_is_empty(pwq)) {
60256025
/*
60266026
* Defer printing to avoid deadlocks in console
@@ -6031,7 +6031,7 @@ void show_one_workqueue(struct workqueue_struct *wq)
60316031
show_pwq(pwq);
60326032
printk_deferred_exit();
60336033
}
6034-
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
6034+
raw_spin_unlock_irqrestore(&pwq->pool->lock, irq_flags);
60356035
/*
60366036
* We could be printing a lot from atomic context, e.g.
60376037
* sysrq-t -> show_all_workqueues(). Avoid triggering
@@ -6050,10 +6050,10 @@ static void show_one_worker_pool(struct worker_pool *pool)
60506050
{
60516051
struct worker *worker;
60526052
bool first = true;
6053-
unsigned long flags;
6053+
unsigned long irq_flags;
60546054
unsigned long hung = 0;
60556055

6056-
raw_spin_lock_irqsave(&pool->lock, flags);
6056+
raw_spin_lock_irqsave(&pool->lock, irq_flags);
60576057
if (pool->nr_workers == pool->nr_idle)
60586058
goto next_pool;
60596059

@@ -6081,7 +6081,7 @@ static void show_one_worker_pool(struct worker_pool *pool)
60816081
pr_cont("\n");
60826082
printk_deferred_exit();
60836083
next_pool:
6084-
raw_spin_unlock_irqrestore(&pool->lock, flags);
6084+
raw_spin_unlock_irqrestore(&pool->lock, irq_flags);
60856085
/*
60866086
* We could be printing a lot from atomic context, e.g.
60876087
* sysrq-t -> show_all_workqueues(). Avoid triggering
@@ -7212,10 +7212,10 @@ static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
72127212
static void show_cpu_pool_hog(struct worker_pool *pool)
72137213
{
72147214
struct worker *worker;
7215-
unsigned long flags;
7215+
unsigned long irq_flags;
72167216
int bkt;
72177217

7218-
raw_spin_lock_irqsave(&pool->lock, flags);
7218+
raw_spin_lock_irqsave(&pool->lock, irq_flags);
72197219

72207220
hash_for_each(pool->busy_hash, bkt, worker, hentry) {
72217221
if (task_is_running(worker->task)) {
@@ -7233,7 +7233,7 @@ static void show_cpu_pool_hog(struct worker_pool *pool)
72337233
}
72347234
}
72357235

7236-
raw_spin_unlock_irqrestore(&pool->lock, flags);
7236+
raw_spin_unlock_irqrestore(&pool->lock, irq_flags);
72377237
}
72387238

72397239
static void show_cpu_pools_hogs(void)

0 commit comments

Comments
 (0)