Skip to content

Commit c35aea3

Browse files
committed
workqueue: Update lock debugging code
These changes are in preparation of BH workqueue which will execute work items from BH context. - Update lock and RCU depth checks in process_one_work() so that it remembers and checks against the starting depths and prints out the depth changes. - Factor out lockdep annotations in the flush paths into touch_{wq|work}_lockdep_map(). The work->lockdep_map touching is moved from __flush_work() to its callee - start_flush_work(). This brings it closer to the wq counterpart and will allow testing the associated wq's flags which will be needed to support BH workqueues. This is not expected to cause any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org> Tested-by: Allen Pais <allen.lkml@gmail.com>
1 parent d412ace commit c35aea3

File tree

1 file changed

+34
-17
lines changed

1 file changed

+34
-17
lines changed

kernel/workqueue.c

Lines changed: 34 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -2965,6 +2965,7 @@ __acquires(&pool->lock)
29652965
struct pool_workqueue *pwq = get_work_pwq(work);
29662966
struct worker_pool *pool = worker->pool;
29672967
unsigned long work_data;
2968+
int lockdep_start_depth, rcu_start_depth;
29682969
#ifdef CONFIG_LOCKDEP
29692970
/*
29702971
* It is permissible to free the struct work_struct from
@@ -3027,6 +3028,8 @@ __acquires(&pool->lock)
30273028
pwq->stats[PWQ_STAT_STARTED]++;
30283029
raw_spin_unlock_irq(&pool->lock);
30293030

3031+
rcu_start_depth = rcu_preempt_depth();
3032+
lockdep_start_depth = lockdep_depth(current);
30303033
lock_map_acquire(&pwq->wq->lockdep_map);
30313034
lock_map_acquire(&lockdep_map);
30323035
/*
@@ -3062,12 +3065,15 @@ __acquires(&pool->lock)
30623065
lock_map_release(&lockdep_map);
30633066
lock_map_release(&pwq->wq->lockdep_map);
30643067

3065-
if (unlikely(in_atomic() || lockdep_depth(current) > 0 ||
3066-
rcu_preempt_depth() > 0)) {
3067-
pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d/%d\n"
3068-
" last function: %ps\n",
3069-
current->comm, preempt_count(), rcu_preempt_depth(),
3070-
task_pid_nr(current), worker->current_func);
3068+
if (unlikely((worker->task && in_atomic()) ||
3069+
lockdep_depth(current) != lockdep_start_depth ||
3070+
rcu_preempt_depth() != rcu_start_depth)) {
3071+
pr_err("BUG: workqueue leaked atomic, lock or RCU: %s[%d]\n"
3072+
" preempt=0x%08x lock=%d->%d RCU=%d->%d workfn=%ps\n",
3073+
current->comm, task_pid_nr(current), preempt_count(),
3074+
lockdep_start_depth, lockdep_depth(current),
3075+
rcu_start_depth, rcu_preempt_depth(),
3076+
worker->current_func);
30713077
debug_show_held_locks(current);
30723078
dump_stack();
30733079
}
@@ -3549,6 +3555,19 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
35493555
return wait;
35503556
}
35513557

3558+
static void touch_wq_lockdep_map(struct workqueue_struct *wq)
3559+
{
3560+
lock_map_acquire(&wq->lockdep_map);
3561+
lock_map_release(&wq->lockdep_map);
3562+
}
3563+
3564+
static void touch_work_lockdep_map(struct work_struct *work,
3565+
struct workqueue_struct *wq)
3566+
{
3567+
lock_map_acquire(&work->lockdep_map);
3568+
lock_map_release(&work->lockdep_map);
3569+
}
3570+
35523571
/**
35533572
* __flush_workqueue - ensure that any scheduled work has run to completion.
35543573
* @wq: workqueue to flush
@@ -3568,8 +3587,7 @@ void __flush_workqueue(struct workqueue_struct *wq)
35683587
if (WARN_ON(!wq_online))
35693588
return;
35703589

3571-
lock_map_acquire(&wq->lockdep_map);
3572-
lock_map_release(&wq->lockdep_map);
3590+
touch_wq_lockdep_map(wq);
35733591

35743592
mutex_lock(&wq->mutex);
35753593

@@ -3768,6 +3786,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
37683786
struct worker *worker = NULL;
37693787
struct worker_pool *pool;
37703788
struct pool_workqueue *pwq;
3789+
struct workqueue_struct *wq;
37713790

37723791
might_sleep();
37733792

@@ -3791,11 +3810,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
37913810
pwq = worker->current_pwq;
37923811
}
37933812

3794-
check_flush_dependency(pwq->wq, work);
3813+
wq = pwq->wq;
3814+
check_flush_dependency(wq, work);
37953815

37963816
insert_wq_barrier(pwq, barr, work, worker);
37973817
raw_spin_unlock_irq(&pool->lock);
37983818

3819+
touch_work_lockdep_map(work, wq);
3820+
37993821
/*
38003822
* Force a lock recursion deadlock when using flush_work() inside a
38013823
* single-threaded or rescuer equipped workqueue.
@@ -3805,11 +3827,9 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
38053827
* workqueues the deadlock happens when the rescuer stalls, blocking
38063828
* forward progress.
38073829
*/
3808-
if (!from_cancel &&
3809-
(pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
3810-
lock_map_acquire(&pwq->wq->lockdep_map);
3811-
lock_map_release(&pwq->wq->lockdep_map);
3812-
}
3830+
if (!from_cancel && (wq->saved_max_active == 1 || wq->rescuer))
3831+
touch_wq_lockdep_map(wq);
3832+
38133833
rcu_read_unlock();
38143834
return true;
38153835
already_gone:
@@ -3828,9 +3848,6 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
38283848
if (WARN_ON(!work->func))
38293849
return false;
38303850

3831-
lock_map_acquire(&work->lockdep_map);
3832-
lock_map_release(&work->lockdep_map);
3833-
38343851
if (start_flush_work(work, &barr, from_cancel)) {
38353852
wait_for_completion(&barr.done);
38363853
destroy_work_on_stack(&barr.work);

0 commit comments

Comments
 (0)