@@ -579,15 +579,15 @@ bool dev_pm_skip_resume(struct device *dev)
579
579
}
580
580
581
581
/**
582
- * __device_resume_noirq - Execute a "noirq resume" callback for given device.
582
+ * device_resume_noirq - Execute a "noirq resume" callback for given device.
583
583
* @dev: Device to handle.
584
584
* @state: PM transition of the system being carried out.
585
585
* @async: If true, the device is being resumed asynchronously.
586
586
*
587
587
* The driver of @dev will not receive interrupts while this function is being
588
588
* executed.
589
589
*/
590
- static void __device_resume_noirq (struct device * dev , pm_message_t state , bool async )
590
+ static void device_resume_noirq (struct device * dev , pm_message_t state , bool async )
591
591
{
592
592
pm_callback_t callback = NULL ;
593
593
const char * info = NULL ;
@@ -674,35 +674,33 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
674
674
{
675
675
reinit_completion (& dev -> power .completion );
676
676
677
- if (!is_async (dev ))
678
- return false;
679
-
680
- get_device (dev );
677
+ if (is_async (dev )) {
678
+ dev -> power .async_in_progress = true;
681
679
682
- if (async_schedule_dev_nocall (func , dev ))
683
- return true;
680
+ get_device (dev );
684
681
685
- put_device (dev );
682
+ if (async_schedule_dev_nocall (func , dev ))
683
+ return true;
686
684
685
+ put_device (dev );
686
+ }
687
+ /*
688
+ * Because async_schedule_dev_nocall() above has returned false or it
689
+ * has not been called at all, func() is not running and it is safe to
690
+ * update the async_in_progress flag without extra synchronization.
691
+ */
692
+ dev -> power .async_in_progress = false;
687
693
return false;
688
694
}
689
695
690
696
static void async_resume_noirq (void * data , async_cookie_t cookie )
691
697
{
692
698
struct device * dev = data ;
693
699
694
- __device_resume_noirq (dev , pm_transition , true);
700
+ device_resume_noirq (dev , pm_transition , true);
695
701
put_device (dev );
696
702
}
697
703
698
- static void device_resume_noirq (struct device * dev )
699
- {
700
- if (dpm_async_fn (dev , async_resume_noirq ))
701
- return ;
702
-
703
- __device_resume_noirq (dev , pm_transition , false);
704
- }
705
-
706
704
static void dpm_noirq_resume_devices (pm_message_t state )
707
705
{
708
706
struct device * dev ;
@@ -712,18 +710,28 @@ static void dpm_noirq_resume_devices(pm_message_t state)
712
710
mutex_lock (& dpm_list_mtx );
713
711
pm_transition = state ;
714
712
713
+ /*
714
+ * Trigger the resume of "async" devices upfront so they don't have to
715
+ * wait for the "non-async" ones they don't depend on.
716
+ */
717
+ list_for_each_entry (dev , & dpm_noirq_list , power .entry )
718
+ dpm_async_fn (dev , async_resume_noirq );
719
+
715
720
while (!list_empty (& dpm_noirq_list )) {
716
721
dev = to_device (dpm_noirq_list .next );
717
- get_device (dev );
718
722
list_move_tail (& dev -> power .entry , & dpm_late_early_list );
719
723
720
- mutex_unlock (& dpm_list_mtx );
724
+ if (!dev -> power .async_in_progress ) {
725
+ get_device (dev );
721
726
722
- device_resume_noirq ( dev );
727
+ mutex_unlock ( & dpm_list_mtx );
723
728
724
- put_device (dev );
729
+ device_resume_noirq (dev , state , false );
725
730
726
- mutex_lock (& dpm_list_mtx );
731
+ put_device (dev );
732
+
733
+ mutex_lock (& dpm_list_mtx );
734
+ }
727
735
}
728
736
mutex_unlock (& dpm_list_mtx );
729
737
async_synchronize_full ();
@@ -747,14 +755,14 @@ void dpm_resume_noirq(pm_message_t state)
747
755
}
748
756
749
757
/**
750
- * __device_resume_early - Execute an "early resume" callback for given device.
758
+ * device_resume_early - Execute an "early resume" callback for given device.
751
759
* @dev: Device to handle.
752
760
* @state: PM transition of the system being carried out.
753
761
* @async: If true, the device is being resumed asynchronously.
754
762
*
755
763
* Runtime PM is disabled for @dev while this function is being executed.
756
764
*/
757
- static void __device_resume_early (struct device * dev , pm_message_t state , bool async )
765
+ static void device_resume_early (struct device * dev , pm_message_t state , bool async )
758
766
{
759
767
pm_callback_t callback = NULL ;
760
768
const char * info = NULL ;
@@ -820,18 +828,10 @@ static void async_resume_early(void *data, async_cookie_t cookie)
820
828
{
821
829
struct device * dev = data ;
822
830
823
- __device_resume_early (dev , pm_transition , true);
831
+ device_resume_early (dev , pm_transition , true);
824
832
put_device (dev );
825
833
}
826
834
827
- static void device_resume_early (struct device * dev )
828
- {
829
- if (dpm_async_fn (dev , async_resume_early ))
830
- return ;
831
-
832
- __device_resume_early (dev , pm_transition , false);
833
- }
834
-
835
835
/**
836
836
* dpm_resume_early - Execute "early resume" callbacks for all devices.
837
837
* @state: PM transition of the system being carried out.
@@ -845,18 +845,28 @@ void dpm_resume_early(pm_message_t state)
845
845
mutex_lock (& dpm_list_mtx );
846
846
pm_transition = state ;
847
847
848
+ /*
849
+ * Trigger the resume of "async" devices upfront so they don't have to
850
+ * wait for the "non-async" ones they don't depend on.
851
+ */
852
+ list_for_each_entry (dev , & dpm_late_early_list , power .entry )
853
+ dpm_async_fn (dev , async_resume_early );
854
+
848
855
while (!list_empty (& dpm_late_early_list )) {
849
856
dev = to_device (dpm_late_early_list .next );
850
- get_device (dev );
851
857
list_move_tail (& dev -> power .entry , & dpm_suspended_list );
852
858
853
- mutex_unlock (& dpm_list_mtx );
859
+ if (!dev -> power .async_in_progress ) {
860
+ get_device (dev );
854
861
855
- device_resume_early ( dev );
862
+ mutex_unlock ( & dpm_list_mtx );
856
863
857
- put_device (dev );
864
+ device_resume_early (dev , state , false );
858
865
859
- mutex_lock (& dpm_list_mtx );
866
+ put_device (dev );
867
+
868
+ mutex_lock (& dpm_list_mtx );
869
+ }
860
870
}
861
871
mutex_unlock (& dpm_list_mtx );
862
872
async_synchronize_full ();
@@ -876,12 +886,12 @@ void dpm_resume_start(pm_message_t state)
876
886
EXPORT_SYMBOL_GPL (dpm_resume_start );
877
887
878
888
/**
879
- * __device_resume - Execute "resume" callbacks for given device.
889
+ * device_resume - Execute "resume" callbacks for given device.
880
890
* @dev: Device to handle.
881
891
* @state: PM transition of the system being carried out.
882
892
* @async: If true, the device is being resumed asynchronously.
883
893
*/
884
- static void __device_resume (struct device * dev , pm_message_t state , bool async )
894
+ static void device_resume (struct device * dev , pm_message_t state , bool async )
885
895
{
886
896
pm_callback_t callback = NULL ;
887
897
const char * info = NULL ;
@@ -975,18 +985,10 @@ static void async_resume(void *data, async_cookie_t cookie)
975
985
{
976
986
struct device * dev = data ;
977
987
978
- __device_resume (dev , pm_transition , true);
988
+ device_resume (dev , pm_transition , true);
979
989
put_device (dev );
980
990
}
981
991
982
- static void device_resume (struct device * dev )
983
- {
984
- if (dpm_async_fn (dev , async_resume ))
985
- return ;
986
-
987
- __device_resume (dev , pm_transition , false);
988
- }
989
-
990
992
/**
991
993
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
992
994
* @state: PM transition of the system being carried out.
@@ -1006,16 +1008,25 @@ void dpm_resume(pm_message_t state)
1006
1008
pm_transition = state ;
1007
1009
async_error = 0 ;
1008
1010
1011
+ /*
1012
+ * Trigger the resume of "async" devices upfront so they don't have to
1013
+ * wait for the "non-async" ones they don't depend on.
1014
+ */
1015
+ list_for_each_entry (dev , & dpm_suspended_list , power .entry )
1016
+ dpm_async_fn (dev , async_resume );
1017
+
1009
1018
while (!list_empty (& dpm_suspended_list )) {
1010
1019
dev = to_device (dpm_suspended_list .next );
1011
1020
1012
1021
get_device (dev );
1013
1022
1014
- mutex_unlock (& dpm_list_mtx );
1023
+ if (!dev -> power .async_in_progress ) {
1024
+ mutex_unlock (& dpm_list_mtx );
1015
1025
1016
- device_resume (dev );
1026
+ device_resume (dev , state , false );
1017
1027
1018
- mutex_lock (& dpm_list_mtx );
1028
+ mutex_lock (& dpm_list_mtx );
1029
+ }
1019
1030
1020
1031
if (!list_empty (& dev -> power .entry ))
1021
1032
list_move_tail (& dev -> power .entry , & dpm_prepared_list );
0 commit comments