@@ -2965,6 +2965,7 @@ __acquires(&pool->lock)
2965
2965
struct pool_workqueue * pwq = get_work_pwq (work );
2966
2966
struct worker_pool * pool = worker -> pool ;
2967
2967
unsigned long work_data ;
2968
+ int lockdep_start_depth , rcu_start_depth ;
2968
2969
#ifdef CONFIG_LOCKDEP
2969
2970
/*
2970
2971
* It is permissible to free the struct work_struct from
@@ -3027,6 +3028,8 @@ __acquires(&pool->lock)
3027
3028
pwq -> stats [PWQ_STAT_STARTED ]++ ;
3028
3029
raw_spin_unlock_irq (& pool -> lock );
3029
3030
3031
+ rcu_start_depth = rcu_preempt_depth ();
3032
+ lockdep_start_depth = lockdep_depth (current );
3030
3033
lock_map_acquire (& pwq -> wq -> lockdep_map );
3031
3034
lock_map_acquire (& lockdep_map );
3032
3035
/*
@@ -3062,12 +3065,15 @@ __acquires(&pool->lock)
3062
3065
lock_map_release (& lockdep_map );
3063
3066
lock_map_release (& pwq -> wq -> lockdep_map );
3064
3067
3065
- if (unlikely (in_atomic () || lockdep_depth (current ) > 0 ||
3066
- rcu_preempt_depth () > 0 )) {
3067
- pr_err ("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d/%d\n"
3068
- " last function: %ps\n" ,
3069
- current -> comm , preempt_count (), rcu_preempt_depth (),
3070
- task_pid_nr (current ), worker -> current_func );
3068
+ if (unlikely ((worker -> task && in_atomic ()) ||
3069
+ lockdep_depth (current ) != lockdep_start_depth ||
3070
+ rcu_preempt_depth () != rcu_start_depth )) {
3071
+ pr_err ("BUG: workqueue leaked atomic, lock or RCU: %s[%d]\n"
3072
+ " preempt=0x%08x lock=%d->%d RCU=%d->%d workfn=%ps\n" ,
3073
+ current -> comm , task_pid_nr (current ), preempt_count (),
3074
+ lockdep_start_depth , lockdep_depth (current ),
3075
+ rcu_start_depth , rcu_preempt_depth (),
3076
+ worker -> current_func );
3071
3077
debug_show_held_locks (current );
3072
3078
dump_stack ();
3073
3079
}
@@ -3549,6 +3555,19 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
3549
3555
return wait ;
3550
3556
}
3551
3557
3558
+ static void touch_wq_lockdep_map (struct workqueue_struct * wq )
3559
+ {
3560
+ lock_map_acquire (& wq -> lockdep_map );
3561
+ lock_map_release (& wq -> lockdep_map );
3562
+ }
3563
+
3564
+ static void touch_work_lockdep_map (struct work_struct * work ,
3565
+ struct workqueue_struct * wq )
3566
+ {
3567
+ lock_map_acquire (& work -> lockdep_map );
3568
+ lock_map_release (& work -> lockdep_map );
3569
+ }
3570
+
3552
3571
/**
3553
3572
* __flush_workqueue - ensure that any scheduled work has run to completion.
3554
3573
* @wq: workqueue to flush
@@ -3568,8 +3587,7 @@ void __flush_workqueue(struct workqueue_struct *wq)
3568
3587
if (WARN_ON (!wq_online ))
3569
3588
return ;
3570
3589
3571
- lock_map_acquire (& wq -> lockdep_map );
3572
- lock_map_release (& wq -> lockdep_map );
3590
+ touch_wq_lockdep_map (wq );
3573
3591
3574
3592
mutex_lock (& wq -> mutex );
3575
3593
@@ -3768,6 +3786,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
3768
3786
struct worker * worker = NULL ;
3769
3787
struct worker_pool * pool ;
3770
3788
struct pool_workqueue * pwq ;
3789
+ struct workqueue_struct * wq ;
3771
3790
3772
3791
might_sleep ();
3773
3792
@@ -3791,11 +3810,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
3791
3810
pwq = worker -> current_pwq ;
3792
3811
}
3793
3812
3794
- check_flush_dependency (pwq -> wq , work );
3813
+ wq = pwq -> wq ;
3814
+ check_flush_dependency (wq , work );
3795
3815
3796
3816
insert_wq_barrier (pwq , barr , work , worker );
3797
3817
raw_spin_unlock_irq (& pool -> lock );
3798
3818
3819
+ touch_work_lockdep_map (work , wq );
3820
+
3799
3821
/*
3800
3822
* Force a lock recursion deadlock when using flush_work() inside a
3801
3823
* single-threaded or rescuer equipped workqueue.
@@ -3805,11 +3827,9 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
3805
3827
* workqueues the deadlock happens when the rescuer stalls, blocking
3806
3828
* forward progress.
3807
3829
*/
3808
- if (!from_cancel &&
3809
- (pwq -> wq -> saved_max_active == 1 || pwq -> wq -> rescuer )) {
3810
- lock_map_acquire (& pwq -> wq -> lockdep_map );
3811
- lock_map_release (& pwq -> wq -> lockdep_map );
3812
- }
3830
+ if (!from_cancel && (wq -> saved_max_active == 1 || wq -> rescuer ))
3831
+ touch_wq_lockdep_map (wq );
3832
+
3813
3833
rcu_read_unlock ();
3814
3834
return true;
3815
3835
already_gone :
@@ -3828,9 +3848,6 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
3828
3848
if (WARN_ON (!work -> func ))
3829
3849
return false;
3830
3850
3831
- lock_map_acquire (& work -> lockdep_map );
3832
- lock_map_release (& work -> lockdep_map );
3833
-
3834
3851
if (start_flush_work (work , & barr , from_cancel )) {
3835
3852
wait_for_completion (& barr .done );
3836
3853
destroy_work_on_stack (& barr .work );
0 commit comments