@@ -777,29 +777,28 @@ static int work_next_color(int color)
777
777
* but stay off timer and worklist for arbitrarily long and nobody should
778
778
* try to steal the PENDING bit.
779
779
*/
780
- static inline void set_work_data (struct work_struct * work , unsigned long data ,
781
- unsigned long flags )
780
+ static inline void set_work_data (struct work_struct * work , unsigned long data )
782
781
{
783
782
WARN_ON_ONCE (!work_pending (work ));
784
- atomic_long_set (& work -> data , data | flags | work_static (work ));
783
+ atomic_long_set (& work -> data , data | work_static (work ));
785
784
}
786
785
787
786
static void set_work_pwq (struct work_struct * work , struct pool_workqueue * pwq ,
788
- unsigned long extra_flags )
787
+ unsigned long flags )
789
788
{
790
- set_work_data (work , (unsigned long )pwq ,
791
- WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags );
789
+ set_work_data (work , (unsigned long )pwq | WORK_STRUCT_PENDING |
790
+ WORK_STRUCT_PWQ | flags );
792
791
}
793
792
794
793
static void set_work_pool_and_keep_pending (struct work_struct * work ,
795
- int pool_id )
794
+ int pool_id , unsigned long flags )
796
795
{
797
- set_work_data (work , (unsigned long )pool_id << WORK_OFFQ_POOL_SHIFT ,
798
- WORK_STRUCT_PENDING );
796
+ set_work_data (work , (( unsigned long )pool_id << WORK_OFFQ_POOL_SHIFT ) |
797
+ WORK_STRUCT_PENDING | flags );
799
798
}
800
799
801
800
static void set_work_pool_and_clear_pending (struct work_struct * work ,
802
- int pool_id )
801
+ int pool_id , unsigned long flags )
803
802
{
804
803
/*
805
804
* The following wmb is paired with the implied mb in
@@ -808,7 +807,8 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
808
807
* owner.
809
808
*/
810
809
smp_wmb ();
811
- set_work_data (work , (unsigned long )pool_id << WORK_OFFQ_POOL_SHIFT , 0 );
810
+ set_work_data (work , ((unsigned long )pool_id << WORK_OFFQ_POOL_SHIFT ) |
811
+ flags );
812
812
/*
813
813
* The following mb guarantees that previous clear of a PENDING bit
814
814
* will not be reordered with any speculative LOADS or STORES from
@@ -909,7 +909,7 @@ static void mark_work_canceling(struct work_struct *work)
909
909
unsigned long pool_id = get_work_pool_id (work );
910
910
911
911
pool_id <<= WORK_OFFQ_POOL_SHIFT ;
912
- set_work_data (work , pool_id | WORK_OFFQ_CANCELING , WORK_STRUCT_PENDING );
912
+ set_work_data (work , pool_id | WORK_STRUCT_PENDING | WORK_OFFQ_CANCELING );
913
913
}
914
914
915
915
static bool work_is_canceling (struct work_struct * work )
@@ -2127,7 +2127,7 @@ static int try_to_grab_pending(struct work_struct *work, u32 cflags,
2127
2127
* this destroys work->data needed by the next step, stash it.
2128
2128
*/
2129
2129
work_data = * work_data_bits (work );
2130
- set_work_pool_and_keep_pending (work , pool -> id );
2130
+ set_work_pool_and_keep_pending (work , pool -> id , 0 );
2131
2131
2132
2132
/* must be the last step, see the function comment */
2133
2133
pwq_dec_nr_in_flight (pwq , work_data );
@@ -3205,7 +3205,7 @@ __acquires(&pool->lock)
3205
3205
* PENDING and queued state changes happen together while IRQ is
3206
3206
* disabled.
3207
3207
*/
3208
- set_work_pool_and_clear_pending (work , pool -> id );
3208
+ set_work_pool_and_clear_pending (work , pool -> id , 0 );
3209
3209
3210
3210
pwq -> stats [PWQ_STAT_STARTED ]++ ;
3211
3211
raw_spin_unlock_irq (& pool -> lock );
@@ -4188,7 +4188,7 @@ static bool __cancel_work(struct work_struct *work, u32 cflags)
4188
4188
if (unlikely (ret < 0 ))
4189
4189
return false;
4190
4190
4191
- set_work_pool_and_clear_pending (work , get_work_pool_id (work ));
4191
+ set_work_pool_and_clear_pending (work , get_work_pool_id (work ), 0 );
4192
4192
local_irq_restore (irq_flags );
4193
4193
return ret ;
4194
4194
}
@@ -4215,7 +4215,7 @@ static bool __cancel_work_sync(struct work_struct *work, u32 cflags)
4215
4215
* with prepare_to_wait() above so that either waitqueue_active() is
4216
4216
* visible here or !work_is_canceling() is visible there.
4217
4217
*/
4218
- set_work_pool_and_clear_pending (work , WORK_OFFQ_POOL_NONE );
4218
+ set_work_pool_and_clear_pending (work , WORK_OFFQ_POOL_NONE , 0 );
4219
4219
4220
4220
if (waitqueue_active (& wq_cancel_waitq ))
4221
4221
__wake_up (& wq_cancel_waitq , TASK_NORMAL , 1 , work );
0 commit comments