Skip to content

Commit 6dc6767

Browse files
htejungregkh
authored andcommitted
workqueue: Update lock debugging code
[ Upstream commit c35aea3 ] These changes are in preparation of BH workqueue which will execute work items from BH context. - Update lock and RCU depth checks in process_one_work() so that it remembers and checks against the starting depths and prints out the depth changes. - Factor out lockdep annotations in the flush paths into touch_{wq|work}_lockdep_map(). The work->lockdep_map touching is moved from __flush_work() to its callee - start_flush_work(). This brings it closer to the wq counterpart and will allow testing the associated wq's flags which will be needed to support BH workqueues. This is not expected to cause any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org> Tested-by: Allen Pais <allen.lkml@gmail.com> Stable-dep-of: de35994 ("workqueue: Do not warn when cancelling WQ_MEM_RECLAIM work from !WQ_MEM_RECLAIM worker") Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent 2717b5e commit 6dc6767

File tree

1 file changed

+34
-17
lines changed

1 file changed

+34
-17
lines changed

kernel/workqueue.c

Lines changed: 34 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -2541,6 +2541,7 @@ __acquires(&pool->lock)
25412541
struct pool_workqueue *pwq = get_work_pwq(work);
25422542
struct worker_pool *pool = worker->pool;
25432543
unsigned long work_data;
2544+
int lockdep_start_depth, rcu_start_depth;
25442545
#ifdef CONFIG_LOCKDEP
25452546
/*
25462547
* It is permissible to free the struct work_struct from
@@ -2603,6 +2604,8 @@ __acquires(&pool->lock)
26032604
pwq->stats[PWQ_STAT_STARTED]++;
26042605
raw_spin_unlock_irq(&pool->lock);
26052606

2607+
rcu_start_depth = rcu_preempt_depth();
2608+
lockdep_start_depth = lockdep_depth(current);
26062609
lock_map_acquire(&pwq->wq->lockdep_map);
26072610
lock_map_acquire(&lockdep_map);
26082611
/*
@@ -2638,12 +2641,15 @@ __acquires(&pool->lock)
26382641
lock_map_release(&lockdep_map);
26392642
lock_map_release(&pwq->wq->lockdep_map);
26402643

2641-
if (unlikely(in_atomic() || lockdep_depth(current) > 0 ||
2642-
rcu_preempt_depth() > 0)) {
2643-
pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d/%d\n"
2644-
" last function: %ps\n",
2645-
current->comm, preempt_count(), rcu_preempt_depth(),
2646-
task_pid_nr(current), worker->current_func);
2644+
if (unlikely((worker->task && in_atomic()) ||
2645+
lockdep_depth(current) != lockdep_start_depth ||
2646+
rcu_preempt_depth() != rcu_start_depth)) {
2647+
pr_err("BUG: workqueue leaked atomic, lock or RCU: %s[%d]\n"
2648+
" preempt=0x%08x lock=%d->%d RCU=%d->%d workfn=%ps\n",
2649+
current->comm, task_pid_nr(current), preempt_count(),
2650+
lockdep_start_depth, lockdep_depth(current),
2651+
rcu_start_depth, rcu_preempt_depth(),
2652+
worker->current_func);
26472653
debug_show_held_locks(current);
26482654
dump_stack();
26492655
}
@@ -3123,6 +3129,19 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
31233129
return wait;
31243130
}
31253131

3132+
static void touch_wq_lockdep_map(struct workqueue_struct *wq)
3133+
{
3134+
lock_map_acquire(&wq->lockdep_map);
3135+
lock_map_release(&wq->lockdep_map);
3136+
}
3137+
3138+
static void touch_work_lockdep_map(struct work_struct *work,
3139+
struct workqueue_struct *wq)
3140+
{
3141+
lock_map_acquire(&work->lockdep_map);
3142+
lock_map_release(&work->lockdep_map);
3143+
}
3144+
31263145
/**
31273146
* __flush_workqueue - ensure that any scheduled work has run to completion.
31283147
* @wq: workqueue to flush
@@ -3142,8 +3161,7 @@ void __flush_workqueue(struct workqueue_struct *wq)
31423161
if (WARN_ON(!wq_online))
31433162
return;
31443163

3145-
lock_map_acquire(&wq->lockdep_map);
3146-
lock_map_release(&wq->lockdep_map);
3164+
touch_wq_lockdep_map(wq);
31473165

31483166
mutex_lock(&wq->mutex);
31493167

@@ -3342,6 +3360,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
33423360
struct worker *worker = NULL;
33433361
struct worker_pool *pool;
33443362
struct pool_workqueue *pwq;
3363+
struct workqueue_struct *wq;
33453364

33463365
might_sleep();
33473366

@@ -3365,11 +3384,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
33653384
pwq = worker->current_pwq;
33663385
}
33673386

3368-
check_flush_dependency(pwq->wq, work);
3387+
wq = pwq->wq;
3388+
check_flush_dependency(wq, work);
33693389

33703390
insert_wq_barrier(pwq, barr, work, worker);
33713391
raw_spin_unlock_irq(&pool->lock);
33723392

3393+
touch_work_lockdep_map(work, wq);
3394+
33733395
/*
33743396
* Force a lock recursion deadlock when using flush_work() inside a
33753397
* single-threaded or rescuer equipped workqueue.
@@ -3379,11 +3401,9 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
33793401
* workqueues the deadlock happens when the rescuer stalls, blocking
33803402
* forward progress.
33813403
*/
3382-
if (!from_cancel &&
3383-
(pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
3384-
lock_map_acquire(&pwq->wq->lockdep_map);
3385-
lock_map_release(&pwq->wq->lockdep_map);
3386-
}
3404+
if (!from_cancel && (wq->saved_max_active == 1 || wq->rescuer))
3405+
touch_wq_lockdep_map(wq);
3406+
33873407
rcu_read_unlock();
33883408
return true;
33893409
already_gone:
@@ -3402,9 +3422,6 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
34023422
if (WARN_ON(!work->func))
34033423
return false;
34043424

3405-
lock_map_acquire(&work->lockdep_map);
3406-
lock_map_release(&work->lockdep_map);
3407-
34083425
if (start_flush_work(work, &barr, from_cancel)) {
34093426
wait_for_completion(&barr.done);
34103427
destroy_work_on_stack(&barr.work);

0 commit comments

Comments
 (0)