2 * drivers/base/power/main.c - Where the driver meets power management.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
7 * This file is released under the GPLv2
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
36 * The entries in the dpm_list list are in a depth first order, simply
37 * because children are guaranteed to be discovered after parents, and
38 * are inserted at the back of the list on discovery.
40 * Since device_pm_add() may be called with a device lock held,
41 * we must never try to acquire a device lock while holding
46 LIST_HEAD(dpm_prepared_list);
47 LIST_HEAD(dpm_suspended_list);
48 LIST_HEAD(dpm_noirq_list);
50 struct suspend_stats suspend_stats;
51 static DEFINE_MUTEX(dpm_list_mtx);
52 static pm_message_t pm_transition;
54 static int async_error;
57 * device_pm_init - Initialize the PM-related part of a device object.
58 * @dev: Device object being initialized.
60 void device_pm_init(struct device *dev)
62 dev->power.is_prepared = false;
63 dev->power.is_suspended = false;
64 init_completion(&dev->power.completion);
65 complete_all(&dev->power.completion);
66 dev->power.wakeup = NULL;
67 spin_lock_init(&dev->power.lock);
69 INIT_LIST_HEAD(&dev->power.entry);
70 dev->power.power_state = PMSG_INVALID;
74 * device_pm_lock - Lock the list of active devices used by the PM core.
76 void device_pm_lock(void)
78 mutex_lock(&dpm_list_mtx);
82 * device_pm_unlock - Unlock the list of active devices used by the PM core.
84 void device_pm_unlock(void)
86 mutex_unlock(&dpm_list_mtx);
90 * device_pm_add - Add a device to the PM core's list of active devices.
91 * @dev: Device to add to the list.
93 void device_pm_add(struct device *dev)
95 pr_debug("PM: Adding info for %s:%s\n",
96 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
97 mutex_lock(&dpm_list_mtx);
98 if (dev->parent && dev->parent->power.is_prepared)
99 dev_warn(dev, "parent %s should not be sleeping\n",
100 dev_name(dev->parent));
101 list_add_tail(&dev->power.entry, &dpm_list);
102 dev_pm_qos_constraints_init(dev);
103 mutex_unlock(&dpm_list_mtx);
107 * device_pm_remove - Remove a device from the PM core's list of active devices.
108 * @dev: Device to be removed from the list.
110 void device_pm_remove(struct device *dev)
112 pr_debug("PM: Removing info for %s:%s\n",
113 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
114 complete_all(&dev->power.completion);
115 mutex_lock(&dpm_list_mtx);
116 dev_pm_qos_constraints_destroy(dev);
117 list_del_init(&dev->power.entry);
118 mutex_unlock(&dpm_list_mtx);
119 device_wakeup_disable(dev);
120 pm_runtime_remove(dev);
124 * device_pm_move_before - Move device in the PM core's list of active devices.
125 * @deva: Device to move in dpm_list.
126 * @devb: Device @deva should come before.
128 void device_pm_move_before(struct device *deva, struct device *devb)
130 pr_debug("PM: Moving %s:%s before %s:%s\n",
131 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
132 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
133 /* Delete deva from dpm_list and reinsert before devb. */
134 list_move_tail(&deva->power.entry, &devb->power.entry);
138 * device_pm_move_after - Move device in the PM core's list of active devices.
139 * @deva: Device to move in dpm_list.
140 * @devb: Device @deva should come after.
142 void device_pm_move_after(struct device *deva, struct device *devb)
144 pr_debug("PM: Moving %s:%s after %s:%s\n",
145 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
146 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
147 /* Delete deva from dpm_list and reinsert after devb. */
148 list_move(&deva->power.entry, &devb->power.entry);
152 * device_pm_move_last - Move device to end of the PM core's list of devices.
153 * @dev: Device to move in dpm_list.
155 void device_pm_move_last(struct device *dev)
157 pr_debug("PM: Moving %s:%s to end of list\n",
158 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
159 list_move_tail(&dev->power.entry, &dpm_list);
162 static ktime_t initcall_debug_start(struct device *dev)
164 ktime_t calltime = ktime_set(0, 0);
166 if (initcall_debug) {
167 pr_info("calling %s+ @ %i, parent: %s\n",
168 dev_name(dev), task_pid_nr(current),
169 dev->parent ? dev_name(dev->parent) : "none");
170 calltime = ktime_get();
176 static void initcall_debug_report(struct device *dev, ktime_t calltime,
179 ktime_t delta, rettime;
181 if (initcall_debug) {
182 rettime = ktime_get();
183 delta = ktime_sub(rettime, calltime);
184 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
185 error, (unsigned long long)ktime_to_ns(delta) >> 10);
190 * dpm_wait - Wait for a PM operation to complete.
191 * @dev: Device to wait for.
192 * @async: If unset, wait only if the device's power.async_suspend flag is set.
194 static void dpm_wait(struct device *dev, bool async)
199 if (async || (pm_async_enabled && dev->power.async_suspend))
200 wait_for_completion(&dev->power.completion);
203 static int dpm_wait_fn(struct device *dev, void *async_ptr)
205 dpm_wait(dev, *((bool *)async_ptr));
209 static void dpm_wait_for_children(struct device *dev, bool async)
211 device_for_each_child(dev, &async, dpm_wait_fn);
214 static int dpm_run_callback(struct device *dev, int (*cb)(struct device *))
222 calltime = initcall_debug_start(dev);
225 suspend_report_result(cb, error);
227 initcall_debug_report(dev, calltime, error);
233 * pm_op - Execute the PM operation appropriate for given PM event.
234 * @dev: Device to handle.
235 * @ops: PM operations to choose from.
236 * @state: PM transition of the system being carried out.
238 static int pm_op(struct device *dev,
239 const struct dev_pm_ops *ops,
244 switch (state.event) {
245 #ifdef CONFIG_SUSPEND
246 case PM_EVENT_SUSPEND:
247 error = dpm_run_callback(dev, ops->suspend);
249 case PM_EVENT_RESUME:
250 error = dpm_run_callback(dev, ops->resume);
252 #endif /* CONFIG_SUSPEND */
253 #ifdef CONFIG_HIBERNATE_CALLBACKS
254 case PM_EVENT_FREEZE:
255 case PM_EVENT_QUIESCE:
256 error = dpm_run_callback(dev, ops->freeze);
258 case PM_EVENT_HIBERNATE:
259 error = dpm_run_callback(dev, ops->poweroff);
262 case PM_EVENT_RECOVER:
263 error = dpm_run_callback(dev, ops->thaw);
265 case PM_EVENT_RESTORE:
266 error = dpm_run_callback(dev, ops->restore);
268 #endif /* CONFIG_HIBERNATE_CALLBACKS */
277 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
278 * @dev: Device to handle.
279 * @ops: PM operations to choose from.
280 * @state: PM transition of the system being carried out.
282 * The driver of @dev will not receive interrupts while this function is being
285 static int pm_noirq_op(struct device *dev,
286 const struct dev_pm_ops *ops,
291 switch (state.event) {
292 #ifdef CONFIG_SUSPEND
293 case PM_EVENT_SUSPEND:
294 error = dpm_run_callback(dev, ops->suspend_noirq);
296 case PM_EVENT_RESUME:
297 error = dpm_run_callback(dev, ops->resume_noirq);
299 #endif /* CONFIG_SUSPEND */
300 #ifdef CONFIG_HIBERNATE_CALLBACKS
301 case PM_EVENT_FREEZE:
302 case PM_EVENT_QUIESCE:
303 error = dpm_run_callback(dev, ops->freeze_noirq);
305 case PM_EVENT_HIBERNATE:
306 error = dpm_run_callback(dev, ops->poweroff_noirq);
309 case PM_EVENT_RECOVER:
310 error = dpm_run_callback(dev, ops->thaw_noirq);
312 case PM_EVENT_RESTORE:
313 error = dpm_run_callback(dev, ops->restore_noirq);
315 #endif /* CONFIG_HIBERNATE_CALLBACKS */
323 static char *pm_verb(int event)
326 case PM_EVENT_SUSPEND:
328 case PM_EVENT_RESUME:
330 case PM_EVENT_FREEZE:
332 case PM_EVENT_QUIESCE:
334 case PM_EVENT_HIBERNATE:
338 case PM_EVENT_RESTORE:
340 case PM_EVENT_RECOVER:
343 return "(unknown PM event)";
347 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
349 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
350 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
351 ", may wakeup" : "");
354 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
357 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
358 dev_name(dev), pm_verb(state.event), info, error);
361 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
367 calltime = ktime_get();
368 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
369 do_div(usecs64, NSEC_PER_USEC);
373 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
374 info ?: "", info ? " " : "", pm_verb(state.event),
375 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
378 /*------------------------- Resume routines -------------------------*/
381 * device_resume_noirq - Execute an "early resume" callback for given device.
382 * @dev: Device to handle.
383 * @state: PM transition of the system being carried out.
385 * The driver of @dev will not receive interrupts while this function is being
388 static int device_resume_noirq(struct device *dev, pm_message_t state)
395 if (dev->pm_domain) {
396 pm_dev_dbg(dev, state, "EARLY power domain ");
397 error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
398 } else if (dev->type && dev->type->pm) {
399 pm_dev_dbg(dev, state, "EARLY type ");
400 error = pm_noirq_op(dev, dev->type->pm, state);
401 } else if (dev->class && dev->class->pm) {
402 pm_dev_dbg(dev, state, "EARLY class ");
403 error = pm_noirq_op(dev, dev->class->pm, state);
404 } else if (dev->bus && dev->bus->pm) {
405 pm_dev_dbg(dev, state, "EARLY ");
406 error = pm_noirq_op(dev, dev->bus->pm, state);
414 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
415 * @state: PM transition of the system being carried out.
417 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
418 * enable device drivers to receive interrupts.
420 void dpm_resume_noirq(pm_message_t state)
422 ktime_t starttime = ktime_get();
424 mutex_lock(&dpm_list_mtx);
425 while (!list_empty(&dpm_noirq_list)) {
426 struct device *dev = to_device(dpm_noirq_list.next);
430 list_move_tail(&dev->power.entry, &dpm_suspended_list);
431 mutex_unlock(&dpm_list_mtx);
433 error = device_resume_noirq(dev, state);
435 suspend_stats.failed_resume_noirq++;
436 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
437 dpm_save_failed_dev(dev_name(dev));
438 pm_dev_err(dev, state, " early", error);
441 mutex_lock(&dpm_list_mtx);
444 mutex_unlock(&dpm_list_mtx);
445 dpm_show_time(starttime, state, "early");
446 resume_device_irqs();
448 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
451 * device_resume - Execute "resume" callbacks for given device.
452 * @dev: Device to handle.
453 * @state: PM transition of the system being carried out.
454 * @async: If true, the device is being resumed asynchronously.
456 static int device_resume(struct device *dev, pm_message_t state, bool async)
464 dpm_wait(dev->parent, async);
468 * This is a fib. But we'll allow new children to be added below
469 * a resumed device, even if the device hasn't been completed yet.
471 dev->power.is_prepared = false;
473 if (!dev->power.is_suspended)
476 pm_runtime_enable(dev);
479 if (dev->pm_domain) {
480 pm_dev_dbg(dev, state, "power domain ");
481 error = pm_op(dev, &dev->pm_domain->ops, state);
485 if (dev->type && dev->type->pm) {
486 pm_dev_dbg(dev, state, "type ");
487 error = pm_op(dev, dev->type->pm, state);
492 if (dev->class->pm) {
493 pm_dev_dbg(dev, state, "class ");
494 error = pm_op(dev, dev->class->pm, state);
496 } else if (dev->class->resume) {
497 pm_dev_dbg(dev, state, "legacy class ");
498 error = dpm_run_callback(dev, dev->class->resume);
505 pm_dev_dbg(dev, state, "");
506 error = pm_op(dev, dev->bus->pm, state);
507 } else if (dev->bus->resume) {
508 pm_dev_dbg(dev, state, "legacy ");
509 error = dpm_run_callback(dev, dev->bus->resume);
514 dev->power.is_suspended = false;
518 complete_all(&dev->power.completion);
523 pm_runtime_put_sync(dev);
528 static void async_resume(void *data, async_cookie_t cookie)
530 struct device *dev = (struct device *)data;
533 error = device_resume(dev, pm_transition, true);
535 pm_dev_err(dev, pm_transition, " async", error);
539 static bool is_async(struct device *dev)
541 return dev->power.async_suspend && pm_async_enabled
542 && !pm_trace_is_enabled();
546 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
547 * @state: PM transition of the system being carried out.
549 * Execute the appropriate "resume" callback for all devices whose status
550 * indicates that they are suspended.
552 void dpm_resume(pm_message_t state)
555 ktime_t starttime = ktime_get();
559 mutex_lock(&dpm_list_mtx);
560 pm_transition = state;
563 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
564 INIT_COMPLETION(dev->power.completion);
567 async_schedule(async_resume, dev);
571 while (!list_empty(&dpm_suspended_list)) {
572 dev = to_device(dpm_suspended_list.next);
574 if (!is_async(dev)) {
577 mutex_unlock(&dpm_list_mtx);
579 error = device_resume(dev, state, false);
581 suspend_stats.failed_resume++;
582 dpm_save_failed_step(SUSPEND_RESUME);
583 dpm_save_failed_dev(dev_name(dev));
584 pm_dev_err(dev, state, "", error);
587 mutex_lock(&dpm_list_mtx);
589 if (!list_empty(&dev->power.entry))
590 list_move_tail(&dev->power.entry, &dpm_prepared_list);
593 mutex_unlock(&dpm_list_mtx);
594 async_synchronize_full();
595 dpm_show_time(starttime, state, NULL);
599 * device_complete - Complete a PM transition for given device.
600 * @dev: Device to handle.
601 * @state: PM transition of the system being carried out.
603 static void device_complete(struct device *dev, pm_message_t state)
607 if (dev->pm_domain) {
608 pm_dev_dbg(dev, state, "completing power domain ");
609 if (dev->pm_domain->ops.complete)
610 dev->pm_domain->ops.complete(dev);
611 } else if (dev->type && dev->type->pm) {
612 pm_dev_dbg(dev, state, "completing type ");
613 if (dev->type->pm->complete)
614 dev->type->pm->complete(dev);
615 } else if (dev->class && dev->class->pm) {
616 pm_dev_dbg(dev, state, "completing class ");
617 if (dev->class->pm->complete)
618 dev->class->pm->complete(dev);
619 } else if (dev->bus && dev->bus->pm) {
620 pm_dev_dbg(dev, state, "completing ");
621 if (dev->bus->pm->complete)
622 dev->bus->pm->complete(dev);
629 * dpm_complete - Complete a PM transition for all non-sysdev devices.
630 * @state: PM transition of the system being carried out.
632 * Execute the ->complete() callbacks for all devices whose PM status is not
633 * DPM_ON (this allows new devices to be registered).
635 void dpm_complete(pm_message_t state)
637 struct list_head list;
641 INIT_LIST_HEAD(&list);
642 mutex_lock(&dpm_list_mtx);
643 while (!list_empty(&dpm_prepared_list)) {
644 struct device *dev = to_device(dpm_prepared_list.prev);
647 dev->power.is_prepared = false;
648 list_move(&dev->power.entry, &list);
649 mutex_unlock(&dpm_list_mtx);
651 device_complete(dev, state);
653 mutex_lock(&dpm_list_mtx);
656 list_splice(&list, &dpm_list);
657 mutex_unlock(&dpm_list_mtx);
661 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
662 * @state: PM transition of the system being carried out.
664 * Execute "resume" callbacks for all devices and complete the PM transition of
667 void dpm_resume_end(pm_message_t state)
672 EXPORT_SYMBOL_GPL(dpm_resume_end);
675 /*------------------------- Suspend routines -------------------------*/
678 * resume_event - Return a "resume" message for given "suspend" sleep state.
679 * @sleep_state: PM message representing a sleep state.
681 * Return a PM message representing the resume event corresponding to given
684 static pm_message_t resume_event(pm_message_t sleep_state)
686 switch (sleep_state.event) {
687 case PM_EVENT_SUSPEND:
689 case PM_EVENT_FREEZE:
690 case PM_EVENT_QUIESCE:
692 case PM_EVENT_HIBERNATE:
699 * device_suspend_noirq - Execute a "late suspend" callback for given device.
700 * @dev: Device to handle.
701 * @state: PM transition of the system being carried out.
703 * The driver of @dev will not receive interrupts while this function is being
706 static int device_suspend_noirq(struct device *dev, pm_message_t state)
710 if (dev->pm_domain) {
711 pm_dev_dbg(dev, state, "LATE power domain ");
712 error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
713 } else if (dev->type && dev->type->pm) {
714 pm_dev_dbg(dev, state, "LATE type ");
715 error = pm_noirq_op(dev, dev->type->pm, state);
716 } else if (dev->class && dev->class->pm) {
717 pm_dev_dbg(dev, state, "LATE class ");
718 error = pm_noirq_op(dev, dev->class->pm, state);
719 } else if (dev->bus && dev->bus->pm) {
720 pm_dev_dbg(dev, state, "LATE ");
721 error = pm_noirq_op(dev, dev->bus->pm, state);
728 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
729 * @state: PM transition of the system being carried out.
731 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
732 * handlers for all non-sysdev devices.
734 int dpm_suspend_noirq(pm_message_t state)
736 ktime_t starttime = ktime_get();
739 suspend_device_irqs();
740 mutex_lock(&dpm_list_mtx);
741 while (!list_empty(&dpm_suspended_list)) {
742 struct device *dev = to_device(dpm_suspended_list.prev);
745 mutex_unlock(&dpm_list_mtx);
747 error = device_suspend_noirq(dev, state);
749 mutex_lock(&dpm_list_mtx);
751 pm_dev_err(dev, state, " late", error);
752 suspend_stats.failed_suspend_noirq++;
753 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
754 dpm_save_failed_dev(dev_name(dev));
758 if (!list_empty(&dev->power.entry))
759 list_move(&dev->power.entry, &dpm_noirq_list);
762 mutex_unlock(&dpm_list_mtx);
764 dpm_resume_noirq(resume_event(state));
766 dpm_show_time(starttime, state, "late");
769 EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
772 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
773 * @dev: Device to suspend.
774 * @state: PM transition of the system being carried out.
775 * @cb: Suspend callback to execute.
777 static int legacy_suspend(struct device *dev, pm_message_t state,
778 int (*cb)(struct device *dev, pm_message_t state))
783 calltime = initcall_debug_start(dev);
785 error = cb(dev, state);
786 suspend_report_result(cb, error);
788 initcall_debug_report(dev, calltime, error);
794 * device_suspend - Execute "suspend" callbacks for given device.
795 * @dev: Device to handle.
796 * @state: PM transition of the system being carried out.
797 * @async: If true, the device is being suspended asynchronously.
799 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
803 dpm_wait_for_children(dev, async);
808 pm_runtime_get_noresume(dev);
809 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
810 pm_wakeup_event(dev, 0);
812 if (pm_wakeup_pending()) {
813 pm_runtime_put_sync(dev);
814 async_error = -EBUSY;
820 if (dev->pm_domain) {
821 pm_dev_dbg(dev, state, "power domain ");
822 error = pm_op(dev, &dev->pm_domain->ops, state);
826 if (dev->type && dev->type->pm) {
827 pm_dev_dbg(dev, state, "type ");
828 error = pm_op(dev, dev->type->pm, state);
833 if (dev->class->pm) {
834 pm_dev_dbg(dev, state, "class ");
835 error = pm_op(dev, dev->class->pm, state);
837 } else if (dev->class->suspend) {
838 pm_dev_dbg(dev, state, "legacy class ");
839 error = legacy_suspend(dev, state, dev->class->suspend);
846 pm_dev_dbg(dev, state, "");
847 error = pm_op(dev, dev->bus->pm, state);
848 } else if (dev->bus->suspend) {
849 pm_dev_dbg(dev, state, "legacy ");
850 error = legacy_suspend(dev, state, dev->bus->suspend);
856 dev->power.is_suspended = true;
857 if (dev->power.wakeup_path
858 && dev->parent && !dev->parent->power.ignore_children)
859 dev->parent->power.wakeup_path = true;
863 complete_all(&dev->power.completion);
866 pm_runtime_put_sync(dev);
868 } else if (dev->power.is_suspended) {
869 __pm_runtime_disable(dev, false);
875 static void async_suspend(void *data, async_cookie_t cookie)
877 struct device *dev = (struct device *)data;
880 error = __device_suspend(dev, pm_transition, true);
882 dpm_save_failed_dev(dev_name(dev));
883 pm_dev_err(dev, pm_transition, " async", error);
889 static int device_suspend(struct device *dev)
891 INIT_COMPLETION(dev->power.completion);
893 if (pm_async_enabled && dev->power.async_suspend) {
895 async_schedule(async_suspend, dev);
899 return __device_suspend(dev, pm_transition, false);
903 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
904 * @state: PM transition of the system being carried out.
906 int dpm_suspend(pm_message_t state)
908 ktime_t starttime = ktime_get();
913 mutex_lock(&dpm_list_mtx);
914 pm_transition = state;
916 while (!list_empty(&dpm_prepared_list)) {
917 struct device *dev = to_device(dpm_prepared_list.prev);
920 mutex_unlock(&dpm_list_mtx);
922 error = device_suspend(dev);
924 mutex_lock(&dpm_list_mtx);
926 pm_dev_err(dev, state, "", error);
927 dpm_save_failed_dev(dev_name(dev));
931 if (!list_empty(&dev->power.entry))
932 list_move(&dev->power.entry, &dpm_suspended_list);
937 mutex_unlock(&dpm_list_mtx);
938 async_synchronize_full();
942 suspend_stats.failed_suspend++;
943 dpm_save_failed_step(SUSPEND_SUSPEND);
945 dpm_show_time(starttime, state, NULL);
950 * device_prepare - Prepare a device for system power transition.
951 * @dev: Device to handle.
952 * @state: PM transition of the system being carried out.
954 * Execute the ->prepare() callback(s) for given device. No new children of the
955 * device may be registered after this function has returned.
957 static int device_prepare(struct device *dev, pm_message_t state)
963 dev->power.wakeup_path = device_may_wakeup(dev);
965 if (dev->pm_domain) {
966 pm_dev_dbg(dev, state, "preparing power domain ");
967 if (dev->pm_domain->ops.prepare)
968 error = dev->pm_domain->ops.prepare(dev);
969 suspend_report_result(dev->pm_domain->ops.prepare, error);
970 } else if (dev->type && dev->type->pm) {
971 pm_dev_dbg(dev, state, "preparing type ");
972 if (dev->type->pm->prepare)
973 error = dev->type->pm->prepare(dev);
974 suspend_report_result(dev->type->pm->prepare, error);
975 } else if (dev->class && dev->class->pm) {
976 pm_dev_dbg(dev, state, "preparing class ");
977 if (dev->class->pm->prepare)
978 error = dev->class->pm->prepare(dev);
979 suspend_report_result(dev->class->pm->prepare, error);
980 } else if (dev->bus && dev->bus->pm) {
981 pm_dev_dbg(dev, state, "preparing ");
982 if (dev->bus->pm->prepare)
983 error = dev->bus->pm->prepare(dev);
984 suspend_report_result(dev->bus->pm->prepare, error);
993 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
994 * @state: PM transition of the system being carried out.
996 * Execute the ->prepare() callback(s) for all devices.
998 int dpm_prepare(pm_message_t state)
1004 mutex_lock(&dpm_list_mtx);
1005 while (!list_empty(&dpm_list)) {
1006 struct device *dev = to_device(dpm_list.next);
1009 mutex_unlock(&dpm_list_mtx);
1011 error = device_prepare(dev, state);
1013 mutex_lock(&dpm_list_mtx);
1015 if (error == -EAGAIN) {
1020 printk(KERN_INFO "PM: Device %s not prepared "
1021 "for power transition: code %d\n",
1022 dev_name(dev), error);
1026 dev->power.is_prepared = true;
1027 if (!list_empty(&dev->power.entry))
1028 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1031 mutex_unlock(&dpm_list_mtx);
1036 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1037 * @state: PM transition of the system being carried out.
1039 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1040 * callbacks for them.
1042 int dpm_suspend_start(pm_message_t state)
1046 error = dpm_prepare(state);
1048 suspend_stats.failed_prepare++;
1049 dpm_save_failed_step(SUSPEND_PREPARE);
1051 error = dpm_suspend(state);
1054 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1056 void __suspend_report_result(const char *function, void *fn, int ret)
1059 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1061 EXPORT_SYMBOL_GPL(__suspend_report_result);
1064 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1065 * @dev: Device to wait for.
1066 * @subordinate: Device that needs to wait for @dev.
1068 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1070 dpm_wait(dev, subordinate->power.async_suspend);
1073 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);