Skip to content

Commit cdc6e4b

Browse files
committed
workqueue: Reorganize flush and cancel[_sync] functions
They are currently a bit disorganized with flush and cancel functions mixed. Reoranize them so that flush functions come first, cancel next and cancel_sync last. This way, we won't have to add prototypes for internal functions for the planned disable/enable support. This is pure code reorganization. No functional changes. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <jiangshanlai@gmail.com>
1 parent c514068 commit cdc6e4b

File tree

1 file changed

+68
-68
lines changed

1 file changed

+68
-68
lines changed

kernel/workqueue.c

Lines changed: 68 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -4061,6 +4061,65 @@ bool flush_work(struct work_struct *work)
40614061
}
40624062
EXPORT_SYMBOL_GPL(flush_work);
40634063

4064+
/**
4065+
* flush_delayed_work - wait for a dwork to finish executing the last queueing
4066+
* @dwork: the delayed work to flush
4067+
*
4068+
* Delayed timer is cancelled and the pending work is queued for
4069+
* immediate execution. Like flush_work(), this function only
4070+
* considers the last queueing instance of @dwork.
4071+
*
4072+
* Return:
4073+
* %true if flush_work() waited for the work to finish execution,
4074+
* %false if it was already idle.
4075+
*/
4076+
bool flush_delayed_work(struct delayed_work *dwork)
4077+
{
4078+
local_irq_disable();
4079+
if (del_timer_sync(&dwork->timer))
4080+
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
4081+
local_irq_enable();
4082+
return flush_work(&dwork->work);
4083+
}
4084+
EXPORT_SYMBOL(flush_delayed_work);
4085+
4086+
/**
4087+
* flush_rcu_work - wait for a rwork to finish executing the last queueing
4088+
* @rwork: the rcu work to flush
4089+
*
4090+
* Return:
4091+
* %true if flush_rcu_work() waited for the work to finish execution,
4092+
* %false if it was already idle.
4093+
*/
4094+
bool flush_rcu_work(struct rcu_work *rwork)
4095+
{
4096+
if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
4097+
rcu_barrier();
4098+
flush_work(&rwork->work);
4099+
return true;
4100+
} else {
4101+
return flush_work(&rwork->work);
4102+
}
4103+
}
4104+
EXPORT_SYMBOL(flush_rcu_work);
4105+
4106+
static bool __cancel_work(struct work_struct *work, bool is_dwork)
4107+
{
4108+
unsigned long flags;
4109+
int ret;
4110+
4111+
do {
4112+
ret = try_to_grab_pending(work, is_dwork, &flags);
4113+
} while (unlikely(ret == -EAGAIN));
4114+
4115+
if (unlikely(ret < 0))
4116+
return false;
4117+
4118+
set_work_pool_and_clear_pending(work, get_work_pool_id(work));
4119+
local_irq_restore(flags);
4120+
return ret;
4121+
}
4122+
40644123
struct cwt_wait {
40654124
wait_queue_entry_t wait;
40664125
struct work_struct *work;
@@ -4139,6 +4198,15 @@ static bool __cancel_work_sync(struct work_struct *work, bool is_dwork)
41394198
return ret;
41404199
}
41414200

4201+
/*
4202+
* See cancel_delayed_work()
4203+
*/
4204+
bool cancel_work(struct work_struct *work)
4205+
{
4206+
return __cancel_work(work, false);
4207+
}
4208+
EXPORT_SYMBOL(cancel_work);
4209+
41424210
/**
41434211
* cancel_work_sync - cancel a work and wait for it to finish
41444212
* @work: the work to cancel
@@ -4163,74 +4231,6 @@ bool cancel_work_sync(struct work_struct *work)
41634231
}
41644232
EXPORT_SYMBOL_GPL(cancel_work_sync);
41654233

4166-
/**
4167-
* flush_delayed_work - wait for a dwork to finish executing the last queueing
4168-
* @dwork: the delayed work to flush
4169-
*
4170-
* Delayed timer is cancelled and the pending work is queued for
4171-
* immediate execution. Like flush_work(), this function only
4172-
* considers the last queueing instance of @dwork.
4173-
*
4174-
* Return:
4175-
* %true if flush_work() waited for the work to finish execution,
4176-
* %false if it was already idle.
4177-
*/
4178-
bool flush_delayed_work(struct delayed_work *dwork)
4179-
{
4180-
local_irq_disable();
4181-
if (del_timer_sync(&dwork->timer))
4182-
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
4183-
local_irq_enable();
4184-
return flush_work(&dwork->work);
4185-
}
4186-
EXPORT_SYMBOL(flush_delayed_work);
4187-
4188-
/**
4189-
* flush_rcu_work - wait for a rwork to finish executing the last queueing
4190-
* @rwork: the rcu work to flush
4191-
*
4192-
* Return:
4193-
* %true if flush_rcu_work() waited for the work to finish execution,
4194-
* %false if it was already idle.
4195-
*/
4196-
bool flush_rcu_work(struct rcu_work *rwork)
4197-
{
4198-
if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
4199-
rcu_barrier();
4200-
flush_work(&rwork->work);
4201-
return true;
4202-
} else {
4203-
return flush_work(&rwork->work);
4204-
}
4205-
}
4206-
EXPORT_SYMBOL(flush_rcu_work);
4207-
4208-
static bool __cancel_work(struct work_struct *work, bool is_dwork)
4209-
{
4210-
unsigned long flags;
4211-
int ret;
4212-
4213-
do {
4214-
ret = try_to_grab_pending(work, is_dwork, &flags);
4215-
} while (unlikely(ret == -EAGAIN));
4216-
4217-
if (unlikely(ret < 0))
4218-
return false;
4219-
4220-
set_work_pool_and_clear_pending(work, get_work_pool_id(work));
4221-
local_irq_restore(flags);
4222-
return ret;
4223-
}
4224-
4225-
/*
4226-
* See cancel_delayed_work()
4227-
*/
4228-
bool cancel_work(struct work_struct *work)
4229-
{
4230-
return __cancel_work(work, false);
4231-
}
4232-
EXPORT_SYMBOL(cancel_work);
4233-
42344234
/**
42354235
* cancel_delayed_work - cancel a delayed work
42364236
* @dwork: delayed_work to cancel

0 commit comments

Comments
 (0)