Skip to content

Commit 3e99977

Browse files
committed
PM: sleep: Restore asynchronous device resume optimization
Before commit 7839d00 ("PM: sleep: Fix possible deadlocks in core system-wide PM code"), the resume of devices that were allowed to resume asynchronously was scheduled before starting the resume of the other devices, so the former did not have to wait for the latter unless functional dependencies were present. Commit 7839d00 removed that optimization in order to address a correctness issue, but it can be restored with the help of a new device power management flag, so do that now. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Reviewed-by: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
1 parent 7839d00 commit 3e99977

File tree

2 files changed

+65
-53
lines changed

2 files changed

+65
-53
lines changed

drivers/base/power/main.c

Lines changed: 64 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -579,15 +579,15 @@ bool dev_pm_skip_resume(struct device *dev)
579579
}
580580

581581
/**
582-
* __device_resume_noirq - Execute a "noirq resume" callback for given device.
582+
* device_resume_noirq - Execute a "noirq resume" callback for given device.
583583
* @dev: Device to handle.
584584
* @state: PM transition of the system being carried out.
585585
* @async: If true, the device is being resumed asynchronously.
586586
*
587587
* The driver of @dev will not receive interrupts while this function is being
588588
* executed.
589589
*/
590-
static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async)
590+
static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
591591
{
592592
pm_callback_t callback = NULL;
593593
const char *info = NULL;
@@ -674,35 +674,33 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
674674
{
675675
reinit_completion(&dev->power.completion);
676676

677-
if (!is_async(dev))
678-
return false;
679-
680-
get_device(dev);
677+
if (is_async(dev)) {
678+
dev->power.async_in_progress = true;
681679

682-
if (async_schedule_dev_nocall(func, dev))
683-
return true;
680+
get_device(dev);
684681

685-
put_device(dev);
682+
if (async_schedule_dev_nocall(func, dev))
683+
return true;
686684

685+
put_device(dev);
686+
}
687+
/*
688+
* Because async_schedule_dev_nocall() above has returned false or it
689+
* has not been called at all, func() is not running and it is safe to
690+
* update the async_in_progress flag without extra synchronization.
691+
*/
692+
dev->power.async_in_progress = false;
687693
return false;
688694
}
689695

690696
static void async_resume_noirq(void *data, async_cookie_t cookie)
691697
{
692698
struct device *dev = data;
693699

694-
__device_resume_noirq(dev, pm_transition, true);
700+
device_resume_noirq(dev, pm_transition, true);
695701
put_device(dev);
696702
}
697703

698-
static void device_resume_noirq(struct device *dev)
699-
{
700-
if (dpm_async_fn(dev, async_resume_noirq))
701-
return;
702-
703-
__device_resume_noirq(dev, pm_transition, false);
704-
}
705-
706704
static void dpm_noirq_resume_devices(pm_message_t state)
707705
{
708706
struct device *dev;
@@ -712,18 +710,28 @@ static void dpm_noirq_resume_devices(pm_message_t state)
712710
mutex_lock(&dpm_list_mtx);
713711
pm_transition = state;
714712

713+
/*
714+
* Trigger the resume of "async" devices upfront so they don't have to
715+
* wait for the "non-async" ones they don't depend on.
716+
*/
717+
list_for_each_entry(dev, &dpm_noirq_list, power.entry)
718+
dpm_async_fn(dev, async_resume_noirq);
719+
715720
while (!list_empty(&dpm_noirq_list)) {
716721
dev = to_device(dpm_noirq_list.next);
717-
get_device(dev);
718722
list_move_tail(&dev->power.entry, &dpm_late_early_list);
719723

720-
mutex_unlock(&dpm_list_mtx);
724+
if (!dev->power.async_in_progress) {
725+
get_device(dev);
721726

722-
device_resume_noirq(dev);
727+
mutex_unlock(&dpm_list_mtx);
723728

724-
put_device(dev);
729+
device_resume_noirq(dev, state, false);
725730

726-
mutex_lock(&dpm_list_mtx);
731+
put_device(dev);
732+
733+
mutex_lock(&dpm_list_mtx);
734+
}
727735
}
728736
mutex_unlock(&dpm_list_mtx);
729737
async_synchronize_full();
@@ -747,14 +755,14 @@ void dpm_resume_noirq(pm_message_t state)
747755
}
748756

749757
/**
750-
* __device_resume_early - Execute an "early resume" callback for given device.
758+
* device_resume_early - Execute an "early resume" callback for given device.
751759
* @dev: Device to handle.
752760
* @state: PM transition of the system being carried out.
753761
* @async: If true, the device is being resumed asynchronously.
754762
*
755763
* Runtime PM is disabled for @dev while this function is being executed.
756764
*/
757-
static void __device_resume_early(struct device *dev, pm_message_t state, bool async)
765+
static void device_resume_early(struct device *dev, pm_message_t state, bool async)
758766
{
759767
pm_callback_t callback = NULL;
760768
const char *info = NULL;
@@ -820,18 +828,10 @@ static void async_resume_early(void *data, async_cookie_t cookie)
820828
{
821829
struct device *dev = data;
822830

823-
__device_resume_early(dev, pm_transition, true);
831+
device_resume_early(dev, pm_transition, true);
824832
put_device(dev);
825833
}
826834

827-
static void device_resume_early(struct device *dev)
828-
{
829-
if (dpm_async_fn(dev, async_resume_early))
830-
return;
831-
832-
__device_resume_early(dev, pm_transition, false);
833-
}
834-
835835
/**
836836
* dpm_resume_early - Execute "early resume" callbacks for all devices.
837837
* @state: PM transition of the system being carried out.
@@ -845,18 +845,28 @@ void dpm_resume_early(pm_message_t state)
845845
mutex_lock(&dpm_list_mtx);
846846
pm_transition = state;
847847

848+
/*
849+
* Trigger the resume of "async" devices upfront so they don't have to
850+
* wait for the "non-async" ones they don't depend on.
851+
*/
852+
list_for_each_entry(dev, &dpm_late_early_list, power.entry)
853+
dpm_async_fn(dev, async_resume_early);
854+
848855
while (!list_empty(&dpm_late_early_list)) {
849856
dev = to_device(dpm_late_early_list.next);
850-
get_device(dev);
851857
list_move_tail(&dev->power.entry, &dpm_suspended_list);
852858

853-
mutex_unlock(&dpm_list_mtx);
859+
if (!dev->power.async_in_progress) {
860+
get_device(dev);
854861

855-
device_resume_early(dev);
862+
mutex_unlock(&dpm_list_mtx);
856863

857-
put_device(dev);
864+
device_resume_early(dev, state, false);
858865

859-
mutex_lock(&dpm_list_mtx);
866+
put_device(dev);
867+
868+
mutex_lock(&dpm_list_mtx);
869+
}
860870
}
861871
mutex_unlock(&dpm_list_mtx);
862872
async_synchronize_full();
@@ -876,12 +886,12 @@ void dpm_resume_start(pm_message_t state)
876886
EXPORT_SYMBOL_GPL(dpm_resume_start);
877887

878888
/**
879-
* __device_resume - Execute "resume" callbacks for given device.
889+
* device_resume - Execute "resume" callbacks for given device.
880890
* @dev: Device to handle.
881891
* @state: PM transition of the system being carried out.
882892
* @async: If true, the device is being resumed asynchronously.
883893
*/
884-
static void __device_resume(struct device *dev, pm_message_t state, bool async)
894+
static void device_resume(struct device *dev, pm_message_t state, bool async)
885895
{
886896
pm_callback_t callback = NULL;
887897
const char *info = NULL;
@@ -975,18 +985,10 @@ static void async_resume(void *data, async_cookie_t cookie)
975985
{
976986
struct device *dev = data;
977987

978-
__device_resume(dev, pm_transition, true);
988+
device_resume(dev, pm_transition, true);
979989
put_device(dev);
980990
}
981991

982-
static void device_resume(struct device *dev)
983-
{
984-
if (dpm_async_fn(dev, async_resume))
985-
return;
986-
987-
__device_resume(dev, pm_transition, false);
988-
}
989-
990992
/**
991993
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
992994
* @state: PM transition of the system being carried out.
@@ -1006,16 +1008,25 @@ void dpm_resume(pm_message_t state)
10061008
pm_transition = state;
10071009
async_error = 0;
10081010

1011+
/*
1012+
* Trigger the resume of "async" devices upfront so they don't have to
1013+
* wait for the "non-async" ones they don't depend on.
1014+
*/
1015+
list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1016+
dpm_async_fn(dev, async_resume);
1017+
10091018
while (!list_empty(&dpm_suspended_list)) {
10101019
dev = to_device(dpm_suspended_list.next);
10111020

10121021
get_device(dev);
10131022

1014-
mutex_unlock(&dpm_list_mtx);
1023+
if (!dev->power.async_in_progress) {
1024+
mutex_unlock(&dpm_list_mtx);
10151025

1016-
device_resume(dev);
1026+
device_resume(dev, state, false);
10171027

1018-
mutex_lock(&dpm_list_mtx);
1028+
mutex_lock(&dpm_list_mtx);
1029+
}
10191030

10201031
if (!list_empty(&dev->power.entry))
10211032
list_move_tail(&dev->power.entry, &dpm_prepared_list);

include/linux/pm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -681,6 +681,7 @@ struct dev_pm_info {
681681
bool wakeup_path:1;
682682
bool syscore:1;
683683
bool no_pm_callbacks:1; /* Owned by the PM core */
684+
bool async_in_progress:1; /* Owned by the PM core */
684685
unsigned int must_resume:1; /* Owned by the PM core */
685686
unsigned int may_skip_resume:1; /* Set by subsystems */
686687
#else

0 commit comments

Comments
 (0)