Skip to content

Commit bccdc1f

Browse files
committed
workqueue: Make @flags handling consistent across set_work_data() and friends
- set_work_data() takes a separate @flags argument but just ORs it to @DaTa. This is more confusing than helpful. Just take @DaTa. - Use the name @flags consistently and add the parameter to set_work_pool_and_{keep|clear}_pending(). This will be used by the planned disable/enable support. No functional changes. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <jiangshanlai@gmail.com>
1 parent afe928c commit bccdc1f

File tree

1 file changed

+16
-16
lines changed

1 file changed

+16
-16
lines changed

kernel/workqueue.c

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -777,29 +777,28 @@ static int work_next_color(int color)
777777
* but stay off timer and worklist for arbitrarily long and nobody should
778778
* try to steal the PENDING bit.
779779
*/
780-
static inline void set_work_data(struct work_struct *work, unsigned long data,
781-
unsigned long flags)
780+
static inline void set_work_data(struct work_struct *work, unsigned long data)
782781
{
783782
WARN_ON_ONCE(!work_pending(work));
784-
atomic_long_set(&work->data, data | flags | work_static(work));
783+
atomic_long_set(&work->data, data | work_static(work));
785784
}
786785

787786
static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
788-
unsigned long extra_flags)
787+
unsigned long flags)
789788
{
790-
set_work_data(work, (unsigned long)pwq,
791-
WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
789+
set_work_data(work, (unsigned long)pwq | WORK_STRUCT_PENDING |
790+
WORK_STRUCT_PWQ | flags);
792791
}
793792

794793
static void set_work_pool_and_keep_pending(struct work_struct *work,
795-
int pool_id)
794+
int pool_id, unsigned long flags)
796795
{
797-
set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
798-
WORK_STRUCT_PENDING);
796+
set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) |
797+
WORK_STRUCT_PENDING | flags);
799798
}
800799

801800
static void set_work_pool_and_clear_pending(struct work_struct *work,
802-
int pool_id)
801+
int pool_id, unsigned long flags)
803802
{
804803
/*
805804
* The following wmb is paired with the implied mb in
@@ -808,7 +807,8 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
808807
* owner.
809808
*/
810809
smp_wmb();
811-
set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
810+
set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) |
811+
flags);
812812
/*
813813
* The following mb guarantees that previous clear of a PENDING bit
814814
* will not be reordered with any speculative LOADS or STORES from
@@ -909,7 +909,7 @@ static void mark_work_canceling(struct work_struct *work)
909909
unsigned long pool_id = get_work_pool_id(work);
910910

911911
pool_id <<= WORK_OFFQ_POOL_SHIFT;
912-
set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
912+
set_work_data(work, pool_id | WORK_STRUCT_PENDING | WORK_OFFQ_CANCELING);
913913
}
914914

915915
static bool work_is_canceling(struct work_struct *work)
@@ -2127,7 +2127,7 @@ static int try_to_grab_pending(struct work_struct *work, u32 cflags,
21272127
* this destroys work->data needed by the next step, stash it.
21282128
*/
21292129
work_data = *work_data_bits(work);
2130-
set_work_pool_and_keep_pending(work, pool->id);
2130+
set_work_pool_and_keep_pending(work, pool->id, 0);
21312131

21322132
/* must be the last step, see the function comment */
21332133
pwq_dec_nr_in_flight(pwq, work_data);
@@ -3205,7 +3205,7 @@ __acquires(&pool->lock)
32053205
* PENDING and queued state changes happen together while IRQ is
32063206
* disabled.
32073207
*/
3208-
set_work_pool_and_clear_pending(work, pool->id);
3208+
set_work_pool_and_clear_pending(work, pool->id, 0);
32093209

32103210
pwq->stats[PWQ_STAT_STARTED]++;
32113211
raw_spin_unlock_irq(&pool->lock);
@@ -4188,7 +4188,7 @@ static bool __cancel_work(struct work_struct *work, u32 cflags)
41884188
if (unlikely(ret < 0))
41894189
return false;
41904190

4191-
set_work_pool_and_clear_pending(work, get_work_pool_id(work));
4191+
set_work_pool_and_clear_pending(work, get_work_pool_id(work), 0);
41924192
local_irq_restore(irq_flags);
41934193
return ret;
41944194
}
@@ -4215,7 +4215,7 @@ static bool __cancel_work_sync(struct work_struct *work, u32 cflags)
42154215
* with prepare_to_wait() above so that either waitqueue_active() is
42164216
* visible here or !work_is_canceling() is visible there.
42174217
*/
4218-
set_work_pool_and_clear_pending(work, WORK_OFFQ_POOL_NONE);
4218+
set_work_pool_and_clear_pending(work, WORK_OFFQ_POOL_NONE, 0);
42194219

42204220
if (waitqueue_active(&wq_cancel_waitq))
42214221
__wake_up(&wq_cancel_waitq, TASK_NORMAL, 1, work);

0 commit comments

Comments
 (0)