@@ -579,15 +579,15 @@ bool dev_pm_skip_resume(struct device *dev)
579
579
}
580
580
581
581
/**
582
- * device_resume_noirq - Execute a "noirq resume" callback for given device.
582
+ * __device_resume_noirq - Execute a "noirq resume" callback for given device.
583
583
* @dev: Device to handle.
584
584
* @state: PM transition of the system being carried out.
585
585
* @async: If true, the device is being resumed asynchronously.
586
586
*
587
587
* The driver of @dev will not receive interrupts while this function is being
588
588
* executed.
589
589
*/
590
- static int device_resume_noirq (struct device * dev , pm_message_t state , bool async )
590
+ static void __device_resume_noirq (struct device * dev , pm_message_t state , bool async )
591
591
{
592
592
pm_callback_t callback = NULL ;
593
593
const char * info = NULL ;
@@ -655,7 +655,13 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
655
655
Out :
656
656
complete_all (& dev -> power .completion );
657
657
TRACE_RESUME (error );
658
- return error ;
658
+
659
+ if (error ) {
660
+ suspend_stats .failed_resume_noirq ++ ;
661
+ dpm_save_failed_step (SUSPEND_RESUME_NOIRQ );
662
+ dpm_save_failed_dev (dev_name (dev ));
663
+ pm_dev_err (dev , state , async ? " async noirq" : " noirq" , error );
664
+ }
659
665
}
660
666
661
667
static bool is_async (struct device * dev )
@@ -668,27 +674,35 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
668
674
{
669
675
reinit_completion (& dev -> power .completion );
670
676
671
- if (is_async (dev )) {
672
- get_device (dev );
673
- async_schedule_dev (func , dev );
677
+ if (!is_async (dev ))
678
+ return false;
679
+
680
+ get_device (dev );
681
+
682
+ if (async_schedule_dev_nocall (func , dev ))
674
683
return true;
675
- }
684
+
685
+ put_device (dev );
676
686
677
687
return false;
678
688
}
679
689
680
690
static void async_resume_noirq (void * data , async_cookie_t cookie )
681
691
{
682
692
struct device * dev = data ;
683
- int error ;
684
-
685
- error = device_resume_noirq (dev , pm_transition , true);
686
- if (error )
687
- pm_dev_err (dev , pm_transition , " async" , error );
688
693
694
+ __device_resume_noirq (dev , pm_transition , true);
689
695
put_device (dev );
690
696
}
691
697
698
+ static void device_resume_noirq (struct device * dev )
699
+ {
700
+ if (dpm_async_fn (dev , async_resume_noirq ))
701
+ return ;
702
+
703
+ __device_resume_noirq (dev , pm_transition , false);
704
+ }
705
+
692
706
static void dpm_noirq_resume_devices (pm_message_t state )
693
707
{
694
708
struct device * dev ;
@@ -698,32 +712,14 @@ static void dpm_noirq_resume_devices(pm_message_t state)
698
712
mutex_lock (& dpm_list_mtx );
699
713
pm_transition = state ;
700
714
701
- /*
702
- * Advanced the async threads upfront,
703
- * in case the starting of async threads is
704
- * delayed by non-async resuming devices.
705
- */
706
- list_for_each_entry (dev , & dpm_noirq_list , power .entry )
707
- dpm_async_fn (dev , async_resume_noirq );
708
-
709
715
while (!list_empty (& dpm_noirq_list )) {
710
716
dev = to_device (dpm_noirq_list .next );
711
717
get_device (dev );
712
718
list_move_tail (& dev -> power .entry , & dpm_late_early_list );
713
719
714
720
mutex_unlock (& dpm_list_mtx );
715
721
716
- if (!is_async (dev )) {
717
- int error ;
718
-
719
- error = device_resume_noirq (dev , state , false);
720
- if (error ) {
721
- suspend_stats .failed_resume_noirq ++ ;
722
- dpm_save_failed_step (SUSPEND_RESUME_NOIRQ );
723
- dpm_save_failed_dev (dev_name (dev ));
724
- pm_dev_err (dev , state , " noirq" , error );
725
- }
726
- }
722
+ device_resume_noirq (dev );
727
723
728
724
put_device (dev );
729
725
@@ -751,14 +747,14 @@ void dpm_resume_noirq(pm_message_t state)
751
747
}
752
748
753
749
/**
754
- * device_resume_early - Execute an "early resume" callback for given device.
750
+ * __device_resume_early - Execute an "early resume" callback for given device.
755
751
* @dev: Device to handle.
756
752
* @state: PM transition of the system being carried out.
757
753
* @async: If true, the device is being resumed asynchronously.
758
754
*
759
755
* Runtime PM is disabled for @dev while this function is being executed.
760
756
*/
761
- static int device_resume_early (struct device * dev , pm_message_t state , bool async )
757
+ static void __device_resume_early (struct device * dev , pm_message_t state , bool async )
762
758
{
763
759
pm_callback_t callback = NULL ;
764
760
const char * info = NULL ;
@@ -811,21 +807,31 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
811
807
812
808
pm_runtime_enable (dev );
813
809
complete_all (& dev -> power .completion );
814
- return error ;
810
+
811
+ if (error ) {
812
+ suspend_stats .failed_resume_early ++ ;
813
+ dpm_save_failed_step (SUSPEND_RESUME_EARLY );
814
+ dpm_save_failed_dev (dev_name (dev ));
815
+ pm_dev_err (dev , state , async ? " async early" : " early" , error );
816
+ }
815
817
}
816
818
817
819
static void async_resume_early (void * data , async_cookie_t cookie )
818
820
{
819
821
struct device * dev = data ;
820
- int error ;
821
-
822
- error = device_resume_early (dev , pm_transition , true);
823
- if (error )
824
- pm_dev_err (dev , pm_transition , " async" , error );
825
822
823
+ __device_resume_early (dev , pm_transition , true);
826
824
put_device (dev );
827
825
}
828
826
827
+ static void device_resume_early (struct device * dev )
828
+ {
829
+ if (dpm_async_fn (dev , async_resume_early ))
830
+ return ;
831
+
832
+ __device_resume_early (dev , pm_transition , false);
833
+ }
834
+
829
835
/**
830
836
* dpm_resume_early - Execute "early resume" callbacks for all devices.
831
837
* @state: PM transition of the system being carried out.
@@ -839,32 +845,14 @@ void dpm_resume_early(pm_message_t state)
839
845
mutex_lock (& dpm_list_mtx );
840
846
pm_transition = state ;
841
847
842
- /*
843
- * Advanced the async threads upfront,
844
- * in case the starting of async threads is
845
- * delayed by non-async resuming devices.
846
- */
847
- list_for_each_entry (dev , & dpm_late_early_list , power .entry )
848
- dpm_async_fn (dev , async_resume_early );
849
-
850
848
while (!list_empty (& dpm_late_early_list )) {
851
849
dev = to_device (dpm_late_early_list .next );
852
850
get_device (dev );
853
851
list_move_tail (& dev -> power .entry , & dpm_suspended_list );
854
852
855
853
mutex_unlock (& dpm_list_mtx );
856
854
857
- if (!is_async (dev )) {
858
- int error ;
859
-
860
- error = device_resume_early (dev , state , false);
861
- if (error ) {
862
- suspend_stats .failed_resume_early ++ ;
863
- dpm_save_failed_step (SUSPEND_RESUME_EARLY );
864
- dpm_save_failed_dev (dev_name (dev ));
865
- pm_dev_err (dev , state , " early" , error );
866
- }
867
- }
855
+ device_resume_early (dev );
868
856
869
857
put_device (dev );
870
858
@@ -888,12 +876,12 @@ void dpm_resume_start(pm_message_t state)
888
876
EXPORT_SYMBOL_GPL (dpm_resume_start );
889
877
890
878
/**
891
- * device_resume - Execute "resume" callbacks for given device.
879
+ * __device_resume - Execute "resume" callbacks for given device.
892
880
* @dev: Device to handle.
893
881
* @state: PM transition of the system being carried out.
894
882
* @async: If true, the device is being resumed asynchronously.
895
883
*/
896
- static int device_resume (struct device * dev , pm_message_t state , bool async )
884
+ static void __device_resume (struct device * dev , pm_message_t state , bool async )
897
885
{
898
886
pm_callback_t callback = NULL ;
899
887
const char * info = NULL ;
@@ -975,20 +963,30 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
975
963
976
964
TRACE_RESUME (error );
977
965
978
- return error ;
966
+ if (error ) {
967
+ suspend_stats .failed_resume ++ ;
968
+ dpm_save_failed_step (SUSPEND_RESUME );
969
+ dpm_save_failed_dev (dev_name (dev ));
970
+ pm_dev_err (dev , state , async ? " async" : "" , error );
971
+ }
979
972
}
980
973
981
974
static void async_resume (void * data , async_cookie_t cookie )
982
975
{
983
976
struct device * dev = data ;
984
- int error ;
985
977
986
- error = device_resume (dev , pm_transition , true);
987
- if (error )
988
- pm_dev_err (dev , pm_transition , " async" , error );
978
+ __device_resume (dev , pm_transition , true);
989
979
put_device (dev );
990
980
}
991
981
982
+ static void device_resume (struct device * dev )
983
+ {
984
+ if (dpm_async_fn (dev , async_resume ))
985
+ return ;
986
+
987
+ __device_resume (dev , pm_transition , false);
988
+ }
989
+
992
990
/**
993
991
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
994
992
* @state: PM transition of the system being carried out.
@@ -1008,27 +1006,17 @@ void dpm_resume(pm_message_t state)
1008
1006
pm_transition = state ;
1009
1007
async_error = 0 ;
1010
1008
1011
- list_for_each_entry (dev , & dpm_suspended_list , power .entry )
1012
- dpm_async_fn (dev , async_resume );
1013
-
1014
1009
while (!list_empty (& dpm_suspended_list )) {
1015
1010
dev = to_device (dpm_suspended_list .next );
1011
+
1016
1012
get_device (dev );
1017
- if (!is_async (dev )) {
1018
- int error ;
1019
1013
1020
- mutex_unlock (& dpm_list_mtx );
1014
+ mutex_unlock (& dpm_list_mtx );
1015
+
1016
+ device_resume (dev );
1021
1017
1022
- error = device_resume (dev , state , false);
1023
- if (error ) {
1024
- suspend_stats .failed_resume ++ ;
1025
- dpm_save_failed_step (SUSPEND_RESUME );
1026
- dpm_save_failed_dev (dev_name (dev ));
1027
- pm_dev_err (dev , state , "" , error );
1028
- }
1018
+ mutex_lock (& dpm_list_mtx );
1029
1019
1030
- mutex_lock (& dpm_list_mtx );
1031
- }
1032
1020
if (!list_empty (& dev -> power .entry ))
1033
1021
list_move_tail (& dev -> power .entry , & dpm_prepared_list );
1034
1022
0 commit comments