@@ -96,6 +96,10 @@ enum worker_flags {
96
96
WORKER_UNBOUND | WORKER_REBOUND ,
97
97
};
98
98
99
+ enum work_cancel_flags {
100
+ WORK_CANCEL_DELAYED = 1 << 0 , /* canceling a delayed_work */
101
+ };
102
+
99
103
enum wq_internal_consts {
100
104
NR_STD_WORKER_POOLS = 2 , /* # standard pools per cpu */
101
105
@@ -2028,7 +2032,7 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_
2028
2032
/**
2029
2033
* try_to_grab_pending - steal work item from worklist and disable irq
2030
2034
* @work: work item to steal
2031
- * @is_dwork: @work is a delayed_work
2035
+ * @cflags: %WORK_CANCEL_ flags
2032
2036
* @irq_flags: place to store irq state
2033
2037
*
2034
2038
* Try to grab PENDING bit of @work. This function can handle @work in any
@@ -2055,7 +2059,7 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_
2055
2059
*
2056
2060
* This function is safe to call from any context including IRQ handler.
2057
2061
*/
2058
- static int try_to_grab_pending (struct work_struct * work , bool is_dwork ,
2062
+ static int try_to_grab_pending (struct work_struct * work , u32 cflags ,
2059
2063
unsigned long * irq_flags )
2060
2064
{
2061
2065
struct worker_pool * pool ;
@@ -2064,7 +2068,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
2064
2068
local_irq_save (* irq_flags );
2065
2069
2066
2070
/* try to steal the timer if it exists */
2067
- if (is_dwork ) {
2071
+ if (cflags & WORK_CANCEL_DELAYED ) {
2068
2072
struct delayed_work * dwork = to_delayed_work (work );
2069
2073
2070
2074
/*
@@ -2543,7 +2547,8 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
2543
2547
int ret ;
2544
2548
2545
2549
do {
2546
- ret = try_to_grab_pending (& dwork -> work , true, & irq_flags );
2550
+ ret = try_to_grab_pending (& dwork -> work , WORK_CANCEL_DELAYED ,
2551
+ & irq_flags );
2547
2552
} while (unlikely (ret == - EAGAIN ));
2548
2553
2549
2554
if (likely (ret >= 0 )) {
@@ -4103,13 +4108,13 @@ bool flush_rcu_work(struct rcu_work *rwork)
4103
4108
}
4104
4109
EXPORT_SYMBOL (flush_rcu_work );
4105
4110
4106
- static bool __cancel_work (struct work_struct * work , bool is_dwork )
4111
+ static bool __cancel_work (struct work_struct * work , u32 cflags )
4107
4112
{
4108
4113
unsigned long irq_flags ;
4109
4114
int ret ;
4110
4115
4111
4116
do {
4112
- ret = try_to_grab_pending (work , is_dwork , & irq_flags );
4117
+ ret = try_to_grab_pending (work , cflags , & irq_flags );
4113
4118
} while (unlikely (ret == - EAGAIN ));
4114
4119
4115
4120
if (unlikely (ret < 0 ))
@@ -4134,14 +4139,14 @@ static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *k
4134
4139
return autoremove_wake_function (wait , mode , sync , key );
4135
4140
}
4136
4141
4137
- static bool __cancel_work_sync (struct work_struct * work , bool is_dwork )
4142
+ static bool __cancel_work_sync (struct work_struct * work , u32 cflags )
4138
4143
{
4139
4144
static DECLARE_WAIT_QUEUE_HEAD (cancel_waitq );
4140
4145
unsigned long irq_flags ;
4141
4146
int ret ;
4142
4147
4143
4148
do {
4144
- ret = try_to_grab_pending (work , is_dwork , & irq_flags );
4149
+ ret = try_to_grab_pending (work , cflags , & irq_flags );
4145
4150
/*
4146
4151
* If someone else is already canceling, wait for it to
4147
4152
* finish. flush_work() doesn't work for PREEMPT_NONE
@@ -4203,7 +4208,7 @@ static bool __cancel_work_sync(struct work_struct *work, bool is_dwork)
4203
4208
*/
4204
4209
bool cancel_work (struct work_struct * work )
4205
4210
{
4206
- return __cancel_work (work , false );
4211
+ return __cancel_work (work , 0 );
4207
4212
}
4208
4213
EXPORT_SYMBOL (cancel_work );
4209
4214
@@ -4227,7 +4232,7 @@ EXPORT_SYMBOL(cancel_work);
4227
4232
*/
4228
4233
bool cancel_work_sync (struct work_struct * work )
4229
4234
{
4230
- return __cancel_work_sync (work , false );
4235
+ return __cancel_work_sync (work , 0 );
4231
4236
}
4232
4237
EXPORT_SYMBOL_GPL (cancel_work_sync );
4233
4238
@@ -4249,7 +4254,7 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
4249
4254
*/
4250
4255
bool cancel_delayed_work (struct delayed_work * dwork )
4251
4256
{
4252
- return __cancel_work (& dwork -> work , true );
4257
+ return __cancel_work (& dwork -> work , WORK_CANCEL_DELAYED );
4253
4258
}
4254
4259
EXPORT_SYMBOL (cancel_delayed_work );
4255
4260
@@ -4264,7 +4269,7 @@ EXPORT_SYMBOL(cancel_delayed_work);
4264
4269
*/
4265
4270
bool cancel_delayed_work_sync (struct delayed_work * dwork )
4266
4271
{
4267
- return __cancel_work_sync (& dwork -> work , true );
4272
+ return __cancel_work_sync (& dwork -> work , WORK_CANCEL_DELAYED );
4268
4273
}
4269
4274
EXPORT_SYMBOL (cancel_delayed_work_sync );
4270
4275
0 commit comments