Skip to content

Commit 978b840

Browse files
committed
workqueue: Factor out work_grab_pending() from __cancel_work_sync()
The planned disable/enable support will need the same logic. Let's factor it out. No functional changes. v2: Update function comment to include @irq_flags. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <jiangshanlai@gmail.com>
1 parent e9a8e01 commit 978b840

File tree

1 file changed

+80
-52
lines changed

1 file changed

+80
-52
lines changed

kernel/workqueue.c

Lines changed: 80 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -484,6 +484,12 @@ static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
484484
/* I: attributes used when instantiating ordered pools on demand */
485485
static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
486486

487+
/*
488+
* Used to synchronize multiple cancel_sync attempts on the same work item. See
489+
* work_grab_pending() and __cancel_work_sync().
490+
*/
491+
static DECLARE_WAIT_QUEUE_HEAD(wq_cancel_waitq);
492+
487493
/*
488494
* I: kthread_worker to release pwq's. pwq release needs to be bounced to a
489495
* process context while holding a pool lock. Bounce to a dedicated kthread
@@ -2147,6 +2153,75 @@ static int try_to_grab_pending(struct work_struct *work, u32 cflags,
21472153
return -EAGAIN;
21482154
}
21492155

2156+
struct cwt_wait {
2157+
wait_queue_entry_t wait;
2158+
struct work_struct *work;
2159+
};
2160+
2161+
static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
2162+
{
2163+
struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
2164+
2165+
if (cwait->work != key)
2166+
return 0;
2167+
return autoremove_wake_function(wait, mode, sync, key);
2168+
}
2169+
2170+
/**
2171+
* work_grab_pending - steal work item from worklist and disable irq
2172+
* @work: work item to steal
2173+
* @cflags: %WORK_CANCEL_ flags
2174+
* @irq_flags: place to store IRQ state
2175+
*
2176+
* Grab PENDING bit of @work. @work can be in any stable state - idle, on timer
2177+
* or on worklist.
2178+
*
2179+
* Must be called in process context. IRQ is disabled on return with IRQ state
2180+
* stored in *@irq_flags. The caller is responsible for re-enabling it using
2181+
* local_irq_restore().
2182+
*
2183+
* Returns %true if @work was pending. %false if idle.
2184+
*/
2185+
static bool work_grab_pending(struct work_struct *work, u32 cflags,
2186+
unsigned long *irq_flags)
2187+
{
2188+
struct cwt_wait cwait;
2189+
int ret;
2190+
2191+
might_sleep();
2192+
repeat:
2193+
ret = try_to_grab_pending(work, cflags, irq_flags);
2194+
if (likely(ret >= 0))
2195+
return ret;
2196+
if (ret != -ENOENT)
2197+
goto repeat;
2198+
2199+
/*
2200+
* Someone is already canceling. Wait for it to finish. flush_work()
2201+
* doesn't work for PREEMPT_NONE because we may get woken up between
2202+
* @work's completion and the other canceling task resuming and clearing
2203+
* CANCELING - flush_work() will return false immediately as @work is no
2204+
* longer busy, try_to_grab_pending() will return -ENOENT as @work is
2205+
* still being canceled and the other canceling task won't be able to
2206+
* clear CANCELING as we're hogging the CPU.
2207+
*
2208+
* Let's wait for completion using a waitqueue. As this may lead to the
2209+
* thundering herd problem, use a custom wake function which matches
2210+
* @work along with exclusive wait and wakeup.
2211+
*/
2212+
init_wait(&cwait.wait);
2213+
cwait.wait.func = cwt_wakefn;
2214+
cwait.work = work;
2215+
2216+
prepare_to_wait_exclusive(&wq_cancel_waitq, &cwait.wait,
2217+
TASK_UNINTERRUPTIBLE);
2218+
if (work_is_canceling(work))
2219+
schedule();
2220+
finish_wait(&wq_cancel_waitq, &cwait.wait);
2221+
2222+
goto repeat;
2223+
}
2224+
21502225
/**
21512226
* insert_work - insert a work into a pool
21522227
* @pwq: pwq @work belongs to
@@ -4125,60 +4200,13 @@ static bool __cancel_work(struct work_struct *work, u32 cflags)
41254200
return ret;
41264201
}
41274202

4128-
struct cwt_wait {
4129-
wait_queue_entry_t wait;
4130-
struct work_struct *work;
4131-
};
4132-
4133-
static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
4134-
{
4135-
struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
4136-
4137-
if (cwait->work != key)
4138-
return 0;
4139-
return autoremove_wake_function(wait, mode, sync, key);
4140-
}
4141-
41424203
static bool __cancel_work_sync(struct work_struct *work, u32 cflags)
41434204
{
4144-
static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
41454205
unsigned long irq_flags;
4146-
int ret;
4147-
4148-
do {
4149-
ret = try_to_grab_pending(work, cflags, &irq_flags);
4150-
/*
4151-
* If someone else is already canceling, wait for it to
4152-
* finish. flush_work() doesn't work for PREEMPT_NONE
4153-
* because we may get scheduled between @work's completion
4154-
* and the other canceling task resuming and clearing
4155-
* CANCELING - flush_work() will return false immediately
4156-
* as @work is no longer busy, try_to_grab_pending() will
4157-
* return -ENOENT as @work is still being canceled and the
4158-
* other canceling task won't be able to clear CANCELING as
4159-
* we're hogging the CPU.
4160-
*
4161-
* Let's wait for completion using a waitqueue. As this
4162-
* may lead to the thundering herd problem, use a custom
4163-
* wake function which matches @work along with exclusive
4164-
* wait and wakeup.
4165-
*/
4166-
if (unlikely(ret == -ENOENT)) {
4167-
struct cwt_wait cwait;
4168-
4169-
init_wait(&cwait.wait);
4170-
cwait.wait.func = cwt_wakefn;
4171-
cwait.work = work;
4172-
4173-
prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
4174-
TASK_UNINTERRUPTIBLE);
4175-
if (work_is_canceling(work))
4176-
schedule();
4177-
finish_wait(&cancel_waitq, &cwait.wait);
4178-
}
4179-
} while (unlikely(ret < 0));
4206+
bool ret;
41804207

4181-
/* tell other tasks trying to grab @work to back off */
4208+
/* claim @work and tell other tasks trying to grab @work to back off */
4209+
ret = work_grab_pending(work, cflags, &irq_flags);
41824210
mark_work_canceling(work);
41834211
local_irq_restore(irq_flags);
41844212

@@ -4197,8 +4225,8 @@ static bool __cancel_work_sync(struct work_struct *work, u32 cflags)
41974225
* visible there.
41984226
*/
41994227
smp_mb();
4200-
if (waitqueue_active(&cancel_waitq))
4201-
__wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
4228+
if (waitqueue_active(&wq_cancel_waitq))
4229+
__wake_up(&wq_cancel_waitq, TASK_NORMAL, 1, work);
42024230

42034231
return ret;
42044232
}

0 commit comments

Comments
 (0)