2 * drivers/base/power/main.c - Where the driver meets power management.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
7 * This file is released under the GPLv2
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
35 typedef int (*pm_callback_t)(struct device *);
38 * The entries in the dpm_list list are in a depth first order, simply
39 * because children are guaranteed to be discovered after parents, and
40 * are inserted at the back of the list on discovery.
42 * Since device_pm_add() may be called with a device lock held,
43 * we must never try to acquire a device lock while holding
48 LIST_HEAD(dpm_prepared_list);
49 LIST_HEAD(dpm_suspended_list);
50 LIST_HEAD(dpm_noirq_list);
52 struct suspend_stats suspend_stats;
53 static DEFINE_MUTEX(dpm_list_mtx);
54 static pm_message_t pm_transition;
56 static int async_error;
59 * device_pm_init - Initialize the PM-related part of a device object.
60 * @dev: Device object being initialized.
62 void device_pm_init(struct device *dev)
64 dev->power.is_prepared = false;
65 dev->power.is_suspended = false;
66 init_completion(&dev->power.completion);
67 complete_all(&dev->power.completion);
68 dev->power.wakeup = NULL;
69 spin_lock_init(&dev->power.lock);
71 INIT_LIST_HEAD(&dev->power.entry);
72 dev->power.power_state = PMSG_INVALID;
76 * device_pm_lock - Lock the list of active devices used by the PM core.
78 void device_pm_lock(void)
80 mutex_lock(&dpm_list_mtx);
84 * device_pm_unlock - Unlock the list of active devices used by the PM core.
86 void device_pm_unlock(void)
88 mutex_unlock(&dpm_list_mtx);
92 * device_pm_add - Add a device to the PM core's list of active devices.
93 * @dev: Device to add to the list.
95 void device_pm_add(struct device *dev)
97 pr_debug("PM: Adding info for %s:%s\n",
98 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
99 mutex_lock(&dpm_list_mtx);
100 if (dev->parent && dev->parent->power.is_prepared)
101 dev_warn(dev, "parent %s should not be sleeping\n",
102 dev_name(dev->parent));
103 list_add_tail(&dev->power.entry, &dpm_list);
104 dev_pm_qos_constraints_init(dev);
105 mutex_unlock(&dpm_list_mtx);
109 * device_pm_remove - Remove a device from the PM core's list of active devices.
110 * @dev: Device to be removed from the list.
112 void device_pm_remove(struct device *dev)
114 pr_debug("PM: Removing info for %s:%s\n",
115 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
116 complete_all(&dev->power.completion);
117 mutex_lock(&dpm_list_mtx);
118 dev_pm_qos_constraints_destroy(dev);
119 list_del_init(&dev->power.entry);
120 mutex_unlock(&dpm_list_mtx);
121 device_wakeup_disable(dev);
122 pm_runtime_remove(dev);
126 * device_pm_move_before - Move device in the PM core's list of active devices.
127 * @deva: Device to move in dpm_list.
128 * @devb: Device @deva should come before.
130 void device_pm_move_before(struct device *deva, struct device *devb)
132 pr_debug("PM: Moving %s:%s before %s:%s\n",
133 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
134 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
135 /* Delete deva from dpm_list and reinsert before devb. */
136 list_move_tail(&deva->power.entry, &devb->power.entry);
140 * device_pm_move_after - Move device in the PM core's list of active devices.
141 * @deva: Device to move in dpm_list.
142 * @devb: Device @deva should come after.
144 void device_pm_move_after(struct device *deva, struct device *devb)
146 pr_debug("PM: Moving %s:%s after %s:%s\n",
147 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
148 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
149 /* Delete deva from dpm_list and reinsert after devb. */
150 list_move(&deva->power.entry, &devb->power.entry);
154 * device_pm_move_last - Move device to end of the PM core's list of devices.
155 * @dev: Device to move in dpm_list.
157 void device_pm_move_last(struct device *dev)
159 pr_debug("PM: Moving %s:%s to end of list\n",
160 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
161 list_move_tail(&dev->power.entry, &dpm_list);
164 static ktime_t initcall_debug_start(struct device *dev)
166 ktime_t calltime = ktime_set(0, 0);
168 if (initcall_debug) {
169 pr_info("calling %s+ @ %i, parent: %s\n",
170 dev_name(dev), task_pid_nr(current),
171 dev->parent ? dev_name(dev->parent) : "none");
172 calltime = ktime_get();
178 static void initcall_debug_report(struct device *dev, ktime_t calltime,
181 ktime_t delta, rettime;
183 if (initcall_debug) {
184 rettime = ktime_get();
185 delta = ktime_sub(rettime, calltime);
186 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
187 error, (unsigned long long)ktime_to_ns(delta) >> 10);
192 * dpm_wait - Wait for a PM operation to complete.
193 * @dev: Device to wait for.
194 * @async: If unset, wait only if the device's power.async_suspend flag is set.
196 static void dpm_wait(struct device *dev, bool async)
201 if (async || (pm_async_enabled && dev->power.async_suspend))
202 wait_for_completion(&dev->power.completion);
205 static int dpm_wait_fn(struct device *dev, void *async_ptr)
207 dpm_wait(dev, *((bool *)async_ptr));
211 static void dpm_wait_for_children(struct device *dev, bool async)
213 device_for_each_child(dev, &async, dpm_wait_fn);
217 * pm_op - Return the PM operation appropriate for given PM event.
218 * @ops: PM operations to choose from.
219 * @state: PM transition of the system being carried out.
221 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
223 switch (state.event) {
224 #ifdef CONFIG_SUSPEND
225 case PM_EVENT_SUSPEND:
227 case PM_EVENT_RESUME:
229 #endif /* CONFIG_SUSPEND */
230 #ifdef CONFIG_HIBERNATE_CALLBACKS
231 case PM_EVENT_FREEZE:
232 case PM_EVENT_QUIESCE:
234 case PM_EVENT_HIBERNATE:
235 return ops->poweroff;
237 case PM_EVENT_RECOVER:
240 case PM_EVENT_RESTORE:
242 #endif /* CONFIG_HIBERNATE_CALLBACKS */
249 * pm_noirq_op - Return the PM operation appropriate for given PM event.
250 * @ops: PM operations to choose from.
251 * @state: PM transition of the system being carried out.
253 * The driver of @dev will not receive interrupts while this function is being
256 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
258 switch (state.event) {
259 #ifdef CONFIG_SUSPEND
260 case PM_EVENT_SUSPEND:
261 return ops->suspend_noirq;
262 case PM_EVENT_RESUME:
263 return ops->resume_noirq;
264 #endif /* CONFIG_SUSPEND */
265 #ifdef CONFIG_HIBERNATE_CALLBACKS
266 case PM_EVENT_FREEZE:
267 case PM_EVENT_QUIESCE:
268 return ops->freeze_noirq;
269 case PM_EVENT_HIBERNATE:
270 return ops->poweroff_noirq;
272 case PM_EVENT_RECOVER:
273 return ops->thaw_noirq;
274 case PM_EVENT_RESTORE:
275 return ops->restore_noirq;
276 #endif /* CONFIG_HIBERNATE_CALLBACKS */
282 static char *pm_verb(int event)
285 case PM_EVENT_SUSPEND:
287 case PM_EVENT_RESUME:
289 case PM_EVENT_FREEZE:
291 case PM_EVENT_QUIESCE:
293 case PM_EVENT_HIBERNATE:
297 case PM_EVENT_RESTORE:
299 case PM_EVENT_RECOVER:
302 return "(unknown PM event)";
306 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
308 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
309 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
310 ", may wakeup" : "");
313 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
316 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
317 dev_name(dev), pm_verb(state.event), info, error);
320 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
326 calltime = ktime_get();
327 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
328 do_div(usecs64, NSEC_PER_USEC);
332 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
333 info ?: "", info ? " " : "", pm_verb(state.event),
334 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
337 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
338 pm_message_t state, char *info)
346 calltime = initcall_debug_start(dev);
348 pm_dev_dbg(dev, state, info);
350 suspend_report_result(cb, error);
352 initcall_debug_report(dev, calltime, error);
357 /*------------------------- Resume routines -------------------------*/
360 * device_resume_noirq - Execute an "early resume" callback for given device.
361 * @dev: Device to handle.
362 * @state: PM transition of the system being carried out.
364 * The driver of @dev will not receive interrupts while this function is being
367 static int device_resume_noirq(struct device *dev, pm_message_t state)
369 pm_callback_t callback = NULL;
376 if (dev->pm_domain) {
377 info = "EARLY power domain ";
378 callback = pm_noirq_op(&dev->pm_domain->ops, state);
379 } else if (dev->type && dev->type->pm) {
380 info = "EARLY type ";
381 callback = pm_noirq_op(dev->type->pm, state);
382 } else if (dev->class && dev->class->pm) {
383 info = "EARLY class ";
384 callback = pm_noirq_op(dev->class->pm, state);
385 } else if (dev->bus && dev->bus->pm) {
387 callback = pm_noirq_op(dev->bus->pm, state);
390 error = dpm_run_callback(callback, dev, state, info);
397 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
398 * @state: PM transition of the system being carried out.
400 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
401 * enable device drivers to receive interrupts.
403 void dpm_resume_noirq(pm_message_t state)
405 ktime_t starttime = ktime_get();
407 mutex_lock(&dpm_list_mtx);
408 while (!list_empty(&dpm_noirq_list)) {
409 struct device *dev = to_device(dpm_noirq_list.next);
413 list_move_tail(&dev->power.entry, &dpm_suspended_list);
414 mutex_unlock(&dpm_list_mtx);
416 error = device_resume_noirq(dev, state);
418 suspend_stats.failed_resume_noirq++;
419 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
420 dpm_save_failed_dev(dev_name(dev));
421 pm_dev_err(dev, state, " early", error);
424 mutex_lock(&dpm_list_mtx);
427 mutex_unlock(&dpm_list_mtx);
428 dpm_show_time(starttime, state, "early");
429 resume_device_irqs();
431 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
434 * device_resume - Execute "resume" callbacks for given device.
435 * @dev: Device to handle.
436 * @state: PM transition of the system being carried out.
437 * @async: If true, the device is being resumed asynchronously.
439 static int device_resume(struct device *dev, pm_message_t state, bool async)
441 pm_callback_t callback = NULL;
449 dpm_wait(dev->parent, async);
453 * This is a fib. But we'll allow new children to be added below
454 * a resumed device, even if the device hasn't been completed yet.
456 dev->power.is_prepared = false;
458 if (!dev->power.is_suspended)
461 pm_runtime_enable(dev);
464 if (dev->pm_domain) {
465 info = "power domain ";
466 callback = pm_op(&dev->pm_domain->ops, state);
470 if (dev->type && dev->type->pm) {
472 callback = pm_op(dev->type->pm, state);
477 if (dev->class->pm) {
479 callback = pm_op(dev->class->pm, state);
481 } else if (dev->class->resume) {
482 info = "legacy class ";
483 callback = dev->class->resume;
491 callback = pm_op(dev->bus->pm, state);
492 } else if (dev->bus->resume) {
494 callback = dev->bus->resume;
499 error = dpm_run_callback(callback, dev, state, info);
500 dev->power.is_suspended = false;
504 complete_all(&dev->power.completion);
509 pm_runtime_put_sync(dev);
514 static void async_resume(void *data, async_cookie_t cookie)
516 struct device *dev = (struct device *)data;
519 error = device_resume(dev, pm_transition, true);
521 pm_dev_err(dev, pm_transition, " async", error);
525 static bool is_async(struct device *dev)
527 return dev->power.async_suspend && pm_async_enabled
528 && !pm_trace_is_enabled();
532 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
533 * @state: PM transition of the system being carried out.
535 * Execute the appropriate "resume" callback for all devices whose status
536 * indicates that they are suspended.
538 void dpm_resume(pm_message_t state)
541 ktime_t starttime = ktime_get();
545 mutex_lock(&dpm_list_mtx);
546 pm_transition = state;
549 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
550 INIT_COMPLETION(dev->power.completion);
553 async_schedule(async_resume, dev);
557 while (!list_empty(&dpm_suspended_list)) {
558 dev = to_device(dpm_suspended_list.next);
560 if (!is_async(dev)) {
563 mutex_unlock(&dpm_list_mtx);
565 error = device_resume(dev, state, false);
567 suspend_stats.failed_resume++;
568 dpm_save_failed_step(SUSPEND_RESUME);
569 dpm_save_failed_dev(dev_name(dev));
570 pm_dev_err(dev, state, "", error);
573 mutex_lock(&dpm_list_mtx);
575 if (!list_empty(&dev->power.entry))
576 list_move_tail(&dev->power.entry, &dpm_prepared_list);
579 mutex_unlock(&dpm_list_mtx);
580 async_synchronize_full();
581 dpm_show_time(starttime, state, NULL);
585 * device_complete - Complete a PM transition for given device.
586 * @dev: Device to handle.
587 * @state: PM transition of the system being carried out.
589 static void device_complete(struct device *dev, pm_message_t state)
593 if (dev->pm_domain) {
594 pm_dev_dbg(dev, state, "completing power domain ");
595 if (dev->pm_domain->ops.complete)
596 dev->pm_domain->ops.complete(dev);
597 } else if (dev->type && dev->type->pm) {
598 pm_dev_dbg(dev, state, "completing type ");
599 if (dev->type->pm->complete)
600 dev->type->pm->complete(dev);
601 } else if (dev->class && dev->class->pm) {
602 pm_dev_dbg(dev, state, "completing class ");
603 if (dev->class->pm->complete)
604 dev->class->pm->complete(dev);
605 } else if (dev->bus && dev->bus->pm) {
606 pm_dev_dbg(dev, state, "completing ");
607 if (dev->bus->pm->complete)
608 dev->bus->pm->complete(dev);
615 * dpm_complete - Complete a PM transition for all non-sysdev devices.
616 * @state: PM transition of the system being carried out.
618 * Execute the ->complete() callbacks for all devices whose PM status is not
619 * DPM_ON (this allows new devices to be registered).
621 void dpm_complete(pm_message_t state)
623 struct list_head list;
627 INIT_LIST_HEAD(&list);
628 mutex_lock(&dpm_list_mtx);
629 while (!list_empty(&dpm_prepared_list)) {
630 struct device *dev = to_device(dpm_prepared_list.prev);
633 dev->power.is_prepared = false;
634 list_move(&dev->power.entry, &list);
635 mutex_unlock(&dpm_list_mtx);
637 device_complete(dev, state);
639 mutex_lock(&dpm_list_mtx);
642 list_splice(&list, &dpm_list);
643 mutex_unlock(&dpm_list_mtx);
647 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
648 * @state: PM transition of the system being carried out.
650 * Execute "resume" callbacks for all devices and complete the PM transition of
653 void dpm_resume_end(pm_message_t state)
658 EXPORT_SYMBOL_GPL(dpm_resume_end);
661 /*------------------------- Suspend routines -------------------------*/
664 * resume_event - Return a "resume" message for given "suspend" sleep state.
665 * @sleep_state: PM message representing a sleep state.
667 * Return a PM message representing the resume event corresponding to given
670 static pm_message_t resume_event(pm_message_t sleep_state)
672 switch (sleep_state.event) {
673 case PM_EVENT_SUSPEND:
675 case PM_EVENT_FREEZE:
676 case PM_EVENT_QUIESCE:
678 case PM_EVENT_HIBERNATE:
685 * device_suspend_noirq - Execute a "late suspend" callback for given device.
686 * @dev: Device to handle.
687 * @state: PM transition of the system being carried out.
689 * The driver of @dev will not receive interrupts while this function is being
692 static int device_suspend_noirq(struct device *dev, pm_message_t state)
694 pm_callback_t callback = NULL;
697 if (dev->pm_domain) {
698 info = "LATE power domain ";
699 callback = pm_noirq_op(&dev->pm_domain->ops, state);
700 } else if (dev->type && dev->type->pm) {
702 callback = pm_noirq_op(dev->type->pm, state);
703 } else if (dev->class && dev->class->pm) {
704 info = "LATE class ";
705 callback = pm_noirq_op(dev->class->pm, state);
706 } else if (dev->bus && dev->bus->pm) {
708 callback = pm_noirq_op(dev->bus->pm, state);
711 return dpm_run_callback(callback, dev, state, info);
715 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
716 * @state: PM transition of the system being carried out.
718 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
719 * handlers for all non-sysdev devices.
721 int dpm_suspend_noirq(pm_message_t state)
723 ktime_t starttime = ktime_get();
726 suspend_device_irqs();
727 mutex_lock(&dpm_list_mtx);
728 while (!list_empty(&dpm_suspended_list)) {
729 struct device *dev = to_device(dpm_suspended_list.prev);
732 mutex_unlock(&dpm_list_mtx);
734 error = device_suspend_noirq(dev, state);
736 mutex_lock(&dpm_list_mtx);
738 pm_dev_err(dev, state, " late", error);
739 suspend_stats.failed_suspend_noirq++;
740 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
741 dpm_save_failed_dev(dev_name(dev));
745 if (!list_empty(&dev->power.entry))
746 list_move(&dev->power.entry, &dpm_noirq_list);
749 mutex_unlock(&dpm_list_mtx);
751 dpm_resume_noirq(resume_event(state));
753 dpm_show_time(starttime, state, "late");
756 EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
759 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
760 * @dev: Device to suspend.
761 * @state: PM transition of the system being carried out.
762 * @cb: Suspend callback to execute.
764 static int legacy_suspend(struct device *dev, pm_message_t state,
765 int (*cb)(struct device *dev, pm_message_t state))
770 calltime = initcall_debug_start(dev);
772 error = cb(dev, state);
773 suspend_report_result(cb, error);
775 initcall_debug_report(dev, calltime, error);
781 * device_suspend - Execute "suspend" callbacks for given device.
782 * @dev: Device to handle.
783 * @state: PM transition of the system being carried out.
784 * @async: If true, the device is being suspended asynchronously.
786 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
788 pm_callback_t callback = NULL;
792 dpm_wait_for_children(dev, async);
797 pm_runtime_get_noresume(dev);
798 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
799 pm_wakeup_event(dev, 0);
801 if (pm_wakeup_pending()) {
802 pm_runtime_put_sync(dev);
803 async_error = -EBUSY;
809 if (dev->pm_domain) {
810 info = "power domain ";
811 callback = pm_op(&dev->pm_domain->ops, state);
815 if (dev->type && dev->type->pm) {
817 callback = pm_op(dev->type->pm, state);
822 if (dev->class->pm) {
824 callback = pm_op(dev->class->pm, state);
826 } else if (dev->class->suspend) {
827 pm_dev_dbg(dev, state, "legacy class ");
828 error = legacy_suspend(dev, state, dev->class->suspend);
836 callback = pm_op(dev->bus->pm, state);
837 } else if (dev->bus->suspend) {
838 pm_dev_dbg(dev, state, "legacy ");
839 error = legacy_suspend(dev, state, dev->bus->suspend);
845 error = dpm_run_callback(callback, dev, state, info);
849 dev->power.is_suspended = true;
850 if (dev->power.wakeup_path
851 && dev->parent && !dev->parent->power.ignore_children)
852 dev->parent->power.wakeup_path = true;
856 complete_all(&dev->power.completion);
859 pm_runtime_put_sync(dev);
861 } else if (dev->power.is_suspended) {
862 __pm_runtime_disable(dev, false);
868 static void async_suspend(void *data, async_cookie_t cookie)
870 struct device *dev = (struct device *)data;
873 error = __device_suspend(dev, pm_transition, true);
875 dpm_save_failed_dev(dev_name(dev));
876 pm_dev_err(dev, pm_transition, " async", error);
882 static int device_suspend(struct device *dev)
884 INIT_COMPLETION(dev->power.completion);
886 if (pm_async_enabled && dev->power.async_suspend) {
888 async_schedule(async_suspend, dev);
892 return __device_suspend(dev, pm_transition, false);
896 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
897 * @state: PM transition of the system being carried out.
899 int dpm_suspend(pm_message_t state)
901 ktime_t starttime = ktime_get();
906 mutex_lock(&dpm_list_mtx);
907 pm_transition = state;
909 while (!list_empty(&dpm_prepared_list)) {
910 struct device *dev = to_device(dpm_prepared_list.prev);
913 mutex_unlock(&dpm_list_mtx);
915 error = device_suspend(dev);
917 mutex_lock(&dpm_list_mtx);
919 pm_dev_err(dev, state, "", error);
920 dpm_save_failed_dev(dev_name(dev));
924 if (!list_empty(&dev->power.entry))
925 list_move(&dev->power.entry, &dpm_suspended_list);
930 mutex_unlock(&dpm_list_mtx);
931 async_synchronize_full();
935 suspend_stats.failed_suspend++;
936 dpm_save_failed_step(SUSPEND_SUSPEND);
938 dpm_show_time(starttime, state, NULL);
943 * device_prepare - Prepare a device for system power transition.
944 * @dev: Device to handle.
945 * @state: PM transition of the system being carried out.
947 * Execute the ->prepare() callback(s) for given device. No new children of the
948 * device may be registered after this function has returned.
950 static int device_prepare(struct device *dev, pm_message_t state)
956 dev->power.wakeup_path = device_may_wakeup(dev);
958 if (dev->pm_domain) {
959 pm_dev_dbg(dev, state, "preparing power domain ");
960 if (dev->pm_domain->ops.prepare)
961 error = dev->pm_domain->ops.prepare(dev);
962 suspend_report_result(dev->pm_domain->ops.prepare, error);
963 } else if (dev->type && dev->type->pm) {
964 pm_dev_dbg(dev, state, "preparing type ");
965 if (dev->type->pm->prepare)
966 error = dev->type->pm->prepare(dev);
967 suspend_report_result(dev->type->pm->prepare, error);
968 } else if (dev->class && dev->class->pm) {
969 pm_dev_dbg(dev, state, "preparing class ");
970 if (dev->class->pm->prepare)
971 error = dev->class->pm->prepare(dev);
972 suspend_report_result(dev->class->pm->prepare, error);
973 } else if (dev->bus && dev->bus->pm) {
974 pm_dev_dbg(dev, state, "preparing ");
975 if (dev->bus->pm->prepare)
976 error = dev->bus->pm->prepare(dev);
977 suspend_report_result(dev->bus->pm->prepare, error);
986 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
987 * @state: PM transition of the system being carried out.
989 * Execute the ->prepare() callback(s) for all devices.
991 int dpm_prepare(pm_message_t state)
997 mutex_lock(&dpm_list_mtx);
998 while (!list_empty(&dpm_list)) {
999 struct device *dev = to_device(dpm_list.next);
1002 mutex_unlock(&dpm_list_mtx);
1004 error = device_prepare(dev, state);
1006 mutex_lock(&dpm_list_mtx);
1008 if (error == -EAGAIN) {
1013 printk(KERN_INFO "PM: Device %s not prepared "
1014 "for power transition: code %d\n",
1015 dev_name(dev), error);
1019 dev->power.is_prepared = true;
1020 if (!list_empty(&dev->power.entry))
1021 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1024 mutex_unlock(&dpm_list_mtx);
1029 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1030 * @state: PM transition of the system being carried out.
1032 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1033 * callbacks for them.
1035 int dpm_suspend_start(pm_message_t state)
1039 error = dpm_prepare(state);
1041 suspend_stats.failed_prepare++;
1042 dpm_save_failed_step(SUSPEND_PREPARE);
1044 error = dpm_suspend(state);
1047 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1049 void __suspend_report_result(const char *function, void *fn, int ret)
1052 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1054 EXPORT_SYMBOL_GPL(__suspend_report_result);
1057 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1058 * @dev: Device to wait for.
1059 * @subordinate: Device that needs to wait for @dev.
1061 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1063 dpm_wait(dev, subordinate->power.async_suspend);
1066 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);