@@ -599,27 +599,34 @@ static bool is_async(struct device *dev)
599
599
600
600
static bool dpm_async_fn (struct device * dev , async_func_t func )
601
601
{
602
- reinit_completion (& dev -> power .completion );
602
+ if (!is_async (dev ))
603
+ return false;
603
604
604
- if (is_async (dev )) {
605
- dev -> power .work_in_progress = true;
605
+ dev -> power .work_in_progress = true;
606
606
607
- get_device (dev );
607
+ get_device (dev );
608
608
609
- if (async_schedule_dev_nocall (func , dev ))
610
- return true;
609
+ if (async_schedule_dev_nocall (func , dev ))
610
+ return true;
611
+
612
+ put_device (dev );
611
613
612
- put_device (dev );
613
- }
614
614
/*
615
- * Because async_schedule_dev_nocall() above has returned false or it
616
- * has not been called at all, func() is not running and it is safe to
617
- * update the work_in_progress flag without extra synchronization.
615
+ * async_schedule_dev_nocall() above has returned false, so func() is
616
+ * not running and it is safe to update power.work_in_progress without
617
+ * extra synchronization.
618
618
*/
619
619
dev -> power .work_in_progress = false;
620
+
620
621
return false;
621
622
}
622
623
624
+ static void dpm_clear_async_state (struct device * dev )
625
+ {
626
+ reinit_completion (& dev -> power .completion );
627
+ dev -> power .work_in_progress = false;
628
+ }
629
+
623
630
/**
624
631
* device_resume_noirq - Execute a "noirq resume" callback for given device.
625
632
* @dev: Device to handle.
@@ -729,8 +736,10 @@ static void dpm_noirq_resume_devices(pm_message_t state)
729
736
* Trigger the resume of "async" devices upfront so they don't have to
730
737
* wait for the "non-async" ones they don't depend on.
731
738
*/
732
- list_for_each_entry (dev , & dpm_noirq_list , power .entry )
739
+ list_for_each_entry (dev , & dpm_noirq_list , power .entry ) {
740
+ dpm_clear_async_state (dev );
733
741
dpm_async_fn (dev , async_resume_noirq );
742
+ }
734
743
735
744
while (!list_empty (& dpm_noirq_list )) {
736
745
dev = to_device (dpm_noirq_list .next );
@@ -869,8 +878,10 @@ void dpm_resume_early(pm_message_t state)
869
878
* Trigger the resume of "async" devices upfront so they don't have to
870
879
* wait for the "non-async" ones they don't depend on.
871
880
*/
872
- list_for_each_entry (dev , & dpm_late_early_list , power .entry )
881
+ list_for_each_entry (dev , & dpm_late_early_list , power .entry ) {
882
+ dpm_clear_async_state (dev );
873
883
dpm_async_fn (dev , async_resume_early );
884
+ }
874
885
875
886
while (!list_empty (& dpm_late_early_list )) {
876
887
dev = to_device (dpm_late_early_list .next );
@@ -1042,8 +1053,10 @@ void dpm_resume(pm_message_t state)
1042
1053
* Trigger the resume of "async" devices upfront so they don't have to
1043
1054
* wait for the "non-async" ones they don't depend on.
1044
1055
*/
1045
- list_for_each_entry (dev , & dpm_suspended_list , power .entry )
1056
+ list_for_each_entry (dev , & dpm_suspended_list , power .entry ) {
1057
+ dpm_clear_async_state (dev );
1046
1058
dpm_async_fn (dev , async_resume );
1059
+ }
1047
1060
1048
1061
while (!list_empty (& dpm_suspended_list )) {
1049
1062
dev = to_device (dpm_suspended_list .next );
@@ -1320,6 +1333,7 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
1320
1333
1321
1334
list_move (& dev -> power .entry , & dpm_noirq_list );
1322
1335
1336
+ dpm_clear_async_state (dev );
1323
1337
if (dpm_async_fn (dev , async_suspend_noirq ))
1324
1338
continue ;
1325
1339
@@ -1497,6 +1511,7 @@ int dpm_suspend_late(pm_message_t state)
1497
1511
1498
1512
list_move (& dev -> power .entry , & dpm_late_early_list );
1499
1513
1514
+ dpm_clear_async_state (dev );
1500
1515
if (dpm_async_fn (dev , async_suspend_late ))
1501
1516
continue ;
1502
1517
@@ -1764,6 +1779,7 @@ int dpm_suspend(pm_message_t state)
1764
1779
1765
1780
list_move (& dev -> power .entry , & dpm_suspended_list );
1766
1781
1782
+ dpm_clear_async_state (dev );
1767
1783
if (dpm_async_fn (dev , async_suspend ))
1768
1784
continue ;
1769
1785
0 commit comments