]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/base/power/main.c
b570189d4f2dfc1618063851de4dcbb353818439
[mv-sheeva.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
31
32 #include "../base.h"
33 #include "power.h"
34
35 /*
36  * The entries in the dpm_list list are in a depth first order, simply
37  * because children are guaranteed to be discovered after parents, and
38  * are inserted at the back of the list on discovery.
39  *
40  * Since device_pm_add() may be called with a device lock held,
41  * we must never try to acquire a device lock while holding
42  * dpm_list_mutex.
43  */
44
45 LIST_HEAD(dpm_list);
46 LIST_HEAD(dpm_prepared_list);
47 LIST_HEAD(dpm_suspended_list);
48 LIST_HEAD(dpm_noirq_list);
49
50 struct suspend_stats suspend_stats;
51 static DEFINE_MUTEX(dpm_list_mtx);
52 static pm_message_t pm_transition;
53
54 static int async_error;
55
56 /**
57  * device_pm_init - Initialize the PM-related part of a device object.
58  * @dev: Device object being initialized.
59  */
60 void device_pm_init(struct device *dev)
61 {
62         dev->power.is_prepared = false;
63         dev->power.is_suspended = false;
64         init_completion(&dev->power.completion);
65         complete_all(&dev->power.completion);
66         dev->power.wakeup = NULL;
67         spin_lock_init(&dev->power.lock);
68         pm_runtime_init(dev);
69         INIT_LIST_HEAD(&dev->power.entry);
70         dev->power.power_state = PMSG_INVALID;
71 }
72
73 /**
74  * device_pm_lock - Lock the list of active devices used by the PM core.
75  */
76 void device_pm_lock(void)
77 {
78         mutex_lock(&dpm_list_mtx);
79 }
80
81 /**
82  * device_pm_unlock - Unlock the list of active devices used by the PM core.
83  */
84 void device_pm_unlock(void)
85 {
86         mutex_unlock(&dpm_list_mtx);
87 }
88
89 /**
90  * device_pm_add - Add a device to the PM core's list of active devices.
91  * @dev: Device to add to the list.
92  */
93 void device_pm_add(struct device *dev)
94 {
95         pr_debug("PM: Adding info for %s:%s\n",
96                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
97         mutex_lock(&dpm_list_mtx);
98         if (dev->parent && dev->parent->power.is_prepared)
99                 dev_warn(dev, "parent %s should not be sleeping\n",
100                         dev_name(dev->parent));
101         list_add_tail(&dev->power.entry, &dpm_list);
102         dev_pm_qos_constraints_init(dev);
103         mutex_unlock(&dpm_list_mtx);
104 }
105
106 /**
107  * device_pm_remove - Remove a device from the PM core's list of active devices.
108  * @dev: Device to be removed from the list.
109  */
110 void device_pm_remove(struct device *dev)
111 {
112         pr_debug("PM: Removing info for %s:%s\n",
113                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
114         complete_all(&dev->power.completion);
115         mutex_lock(&dpm_list_mtx);
116         dev_pm_qos_constraints_destroy(dev);
117         list_del_init(&dev->power.entry);
118         mutex_unlock(&dpm_list_mtx);
119         device_wakeup_disable(dev);
120         pm_runtime_remove(dev);
121 }
122
123 /**
124  * device_pm_move_before - Move device in the PM core's list of active devices.
125  * @deva: Device to move in dpm_list.
126  * @devb: Device @deva should come before.
127  */
128 void device_pm_move_before(struct device *deva, struct device *devb)
129 {
130         pr_debug("PM: Moving %s:%s before %s:%s\n",
131                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
132                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
133         /* Delete deva from dpm_list and reinsert before devb. */
134         list_move_tail(&deva->power.entry, &devb->power.entry);
135 }
136
137 /**
138  * device_pm_move_after - Move device in the PM core's list of active devices.
139  * @deva: Device to move in dpm_list.
140  * @devb: Device @deva should come after.
141  */
142 void device_pm_move_after(struct device *deva, struct device *devb)
143 {
144         pr_debug("PM: Moving %s:%s after %s:%s\n",
145                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
146                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
147         /* Delete deva from dpm_list and reinsert after devb. */
148         list_move(&deva->power.entry, &devb->power.entry);
149 }
150
151 /**
152  * device_pm_move_last - Move device to end of the PM core's list of devices.
153  * @dev: Device to move in dpm_list.
154  */
155 void device_pm_move_last(struct device *dev)
156 {
157         pr_debug("PM: Moving %s:%s to end of list\n",
158                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
159         list_move_tail(&dev->power.entry, &dpm_list);
160 }
161
162 static ktime_t initcall_debug_start(struct device *dev)
163 {
164         ktime_t calltime = ktime_set(0, 0);
165
166         if (initcall_debug) {
167                 pr_info("calling  %s+ @ %i, parent: %s\n",
168                         dev_name(dev), task_pid_nr(current),
169                         dev->parent ? dev_name(dev->parent) : "none");
170                 calltime = ktime_get();
171         }
172
173         return calltime;
174 }
175
176 static void initcall_debug_report(struct device *dev, ktime_t calltime,
177                                   int error)
178 {
179         ktime_t delta, rettime;
180
181         if (initcall_debug) {
182                 rettime = ktime_get();
183                 delta = ktime_sub(rettime, calltime);
184                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
185                         error, (unsigned long long)ktime_to_ns(delta) >> 10);
186         }
187 }
188
189 /**
190  * dpm_wait - Wait for a PM operation to complete.
191  * @dev: Device to wait for.
192  * @async: If unset, wait only if the device's power.async_suspend flag is set.
193  */
194 static void dpm_wait(struct device *dev, bool async)
195 {
196         if (!dev)
197                 return;
198
199         if (async || (pm_async_enabled && dev->power.async_suspend))
200                 wait_for_completion(&dev->power.completion);
201 }
202
203 static int dpm_wait_fn(struct device *dev, void *async_ptr)
204 {
205         dpm_wait(dev, *((bool *)async_ptr));
206         return 0;
207 }
208
209 static void dpm_wait_for_children(struct device *dev, bool async)
210 {
211        device_for_each_child(dev, &async, dpm_wait_fn);
212 }
213
214 static int dpm_run_callback(struct device *dev, int (*cb)(struct device *))
215 {
216         ktime_t calltime;
217         int error;
218
219         if (!cb)
220                 return 0;
221
222         calltime = initcall_debug_start(dev);
223
224         error = cb(dev);
225         suspend_report_result(cb, error);
226
227         initcall_debug_report(dev, calltime, error);
228
229         return error;
230 }
231
232 /**
233  * pm_op - Execute the PM operation appropriate for given PM event.
234  * @dev: Device to handle.
235  * @ops: PM operations to choose from.
236  * @state: PM transition of the system being carried out.
237  */
238 static int pm_op(struct device *dev,
239                  const struct dev_pm_ops *ops,
240                  pm_message_t state)
241 {
242         int error = 0;
243
244         switch (state.event) {
245 #ifdef CONFIG_SUSPEND
246         case PM_EVENT_SUSPEND:
247                 error = dpm_run_callback(dev, ops->suspend);
248                 break;
249         case PM_EVENT_RESUME:
250                 error = dpm_run_callback(dev, ops->resume);
251                 break;
252 #endif /* CONFIG_SUSPEND */
253 #ifdef CONFIG_HIBERNATE_CALLBACKS
254         case PM_EVENT_FREEZE:
255         case PM_EVENT_QUIESCE:
256                 error = dpm_run_callback(dev, ops->freeze);
257                 break;
258         case PM_EVENT_HIBERNATE:
259                 error = dpm_run_callback(dev, ops->poweroff);
260                 break;
261         case PM_EVENT_THAW:
262         case PM_EVENT_RECOVER:
263                 error = dpm_run_callback(dev, ops->thaw);
264                 break;
265         case PM_EVENT_RESTORE:
266                 error = dpm_run_callback(dev, ops->restore);
267                 break;
268 #endif /* CONFIG_HIBERNATE_CALLBACKS */
269         default:
270                 error = -EINVAL;
271         }
272
273         return error;
274 }
275
276 /**
277  * pm_noirq_op - Execute the PM operation appropriate for given PM event.
278  * @dev: Device to handle.
279  * @ops: PM operations to choose from.
280  * @state: PM transition of the system being carried out.
281  *
282  * The driver of @dev will not receive interrupts while this function is being
283  * executed.
284  */
285 static int pm_noirq_op(struct device *dev,
286                         const struct dev_pm_ops *ops,
287                         pm_message_t state)
288 {
289         int error = 0;
290
291         switch (state.event) {
292 #ifdef CONFIG_SUSPEND
293         case PM_EVENT_SUSPEND:
294                 error = dpm_run_callback(dev, ops->suspend_noirq);
295                 break;
296         case PM_EVENT_RESUME:
297                 error = dpm_run_callback(dev, ops->resume_noirq);
298                 break;
299 #endif /* CONFIG_SUSPEND */
300 #ifdef CONFIG_HIBERNATE_CALLBACKS
301         case PM_EVENT_FREEZE:
302         case PM_EVENT_QUIESCE:
303                 error = dpm_run_callback(dev, ops->freeze_noirq);
304                 break;
305         case PM_EVENT_HIBERNATE:
306                 error = dpm_run_callback(dev, ops->poweroff_noirq);
307                 break;
308         case PM_EVENT_THAW:
309         case PM_EVENT_RECOVER:
310                 error = dpm_run_callback(dev, ops->thaw_noirq);
311                 break;
312         case PM_EVENT_RESTORE:
313                 error = dpm_run_callback(dev, ops->restore_noirq);
314                 break;
315 #endif /* CONFIG_HIBERNATE_CALLBACKS */
316         default:
317                 error = -EINVAL;
318         }
319
320         return error;
321 }
322
323 static char *pm_verb(int event)
324 {
325         switch (event) {
326         case PM_EVENT_SUSPEND:
327                 return "suspend";
328         case PM_EVENT_RESUME:
329                 return "resume";
330         case PM_EVENT_FREEZE:
331                 return "freeze";
332         case PM_EVENT_QUIESCE:
333                 return "quiesce";
334         case PM_EVENT_HIBERNATE:
335                 return "hibernate";
336         case PM_EVENT_THAW:
337                 return "thaw";
338         case PM_EVENT_RESTORE:
339                 return "restore";
340         case PM_EVENT_RECOVER:
341                 return "recover";
342         default:
343                 return "(unknown PM event)";
344         }
345 }
346
347 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
348 {
349         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
350                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
351                 ", may wakeup" : "");
352 }
353
354 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
355                         int error)
356 {
357         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
358                 dev_name(dev), pm_verb(state.event), info, error);
359 }
360
361 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
362 {
363         ktime_t calltime;
364         u64 usecs64;
365         int usecs;
366
367         calltime = ktime_get();
368         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
369         do_div(usecs64, NSEC_PER_USEC);
370         usecs = usecs64;
371         if (usecs == 0)
372                 usecs = 1;
373         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
374                 info ?: "", info ? " " : "", pm_verb(state.event),
375                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
376 }
377
378 /*------------------------- Resume routines -------------------------*/
379
380 /**
381  * device_resume_noirq - Execute an "early resume" callback for given device.
382  * @dev: Device to handle.
383  * @state: PM transition of the system being carried out.
384  *
385  * The driver of @dev will not receive interrupts while this function is being
386  * executed.
387  */
388 static int device_resume_noirq(struct device *dev, pm_message_t state)
389 {
390         int error = 0;
391
392         TRACE_DEVICE(dev);
393         TRACE_RESUME(0);
394
395         if (dev->pm_domain) {
396                 pm_dev_dbg(dev, state, "EARLY power domain ");
397                 error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
398         } else if (dev->type && dev->type->pm) {
399                 pm_dev_dbg(dev, state, "EARLY type ");
400                 error = pm_noirq_op(dev, dev->type->pm, state);
401         } else if (dev->class && dev->class->pm) {
402                 pm_dev_dbg(dev, state, "EARLY class ");
403                 error = pm_noirq_op(dev, dev->class->pm, state);
404         } else if (dev->bus && dev->bus->pm) {
405                 pm_dev_dbg(dev, state, "EARLY ");
406                 error = pm_noirq_op(dev, dev->bus->pm, state);
407         }
408
409         TRACE_RESUME(error);
410         return error;
411 }
412
413 /**
414  * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
415  * @state: PM transition of the system being carried out.
416  *
417  * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
418  * enable device drivers to receive interrupts.
419  */
420 void dpm_resume_noirq(pm_message_t state)
421 {
422         ktime_t starttime = ktime_get();
423
424         mutex_lock(&dpm_list_mtx);
425         while (!list_empty(&dpm_noirq_list)) {
426                 struct device *dev = to_device(dpm_noirq_list.next);
427                 int error;
428
429                 get_device(dev);
430                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
431                 mutex_unlock(&dpm_list_mtx);
432
433                 error = device_resume_noirq(dev, state);
434                 if (error) {
435                         suspend_stats.failed_resume_noirq++;
436                         dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
437                         dpm_save_failed_dev(dev_name(dev));
438                         pm_dev_err(dev, state, " early", error);
439                 }
440
441                 mutex_lock(&dpm_list_mtx);
442                 put_device(dev);
443         }
444         mutex_unlock(&dpm_list_mtx);
445         dpm_show_time(starttime, state, "early");
446         resume_device_irqs();
447 }
448 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
449
450 /**
451  * device_resume - Execute "resume" callbacks for given device.
452  * @dev: Device to handle.
453  * @state: PM transition of the system being carried out.
454  * @async: If true, the device is being resumed asynchronously.
455  */
456 static int device_resume(struct device *dev, pm_message_t state, bool async)
457 {
458         int error = 0;
459         bool put = false;
460
461         TRACE_DEVICE(dev);
462         TRACE_RESUME(0);
463
464         dpm_wait(dev->parent, async);
465         device_lock(dev);
466
467         /*
468          * This is a fib.  But we'll allow new children to be added below
469          * a resumed device, even if the device hasn't been completed yet.
470          */
471         dev->power.is_prepared = false;
472
473         if (!dev->power.is_suspended)
474                 goto Unlock;
475
476         pm_runtime_enable(dev);
477         put = true;
478
479         if (dev->pm_domain) {
480                 pm_dev_dbg(dev, state, "power domain ");
481                 error = pm_op(dev, &dev->pm_domain->ops, state);
482                 goto End;
483         }
484
485         if (dev->type && dev->type->pm) {
486                 pm_dev_dbg(dev, state, "type ");
487                 error = pm_op(dev, dev->type->pm, state);
488                 goto End;
489         }
490
491         if (dev->class) {
492                 if (dev->class->pm) {
493                         pm_dev_dbg(dev, state, "class ");
494                         error = pm_op(dev, dev->class->pm, state);
495                         goto End;
496                 } else if (dev->class->resume) {
497                         pm_dev_dbg(dev, state, "legacy class ");
498                         error = dpm_run_callback(dev, dev->class->resume);
499                         goto End;
500                 }
501         }
502
503         if (dev->bus) {
504                 if (dev->bus->pm) {
505                         pm_dev_dbg(dev, state, "");
506                         error = pm_op(dev, dev->bus->pm, state);
507                 } else if (dev->bus->resume) {
508                         pm_dev_dbg(dev, state, "legacy ");
509                         error = dpm_run_callback(dev, dev->bus->resume);
510                 }
511         }
512
513  End:
514         dev->power.is_suspended = false;
515
516  Unlock:
517         device_unlock(dev);
518         complete_all(&dev->power.completion);
519
520         TRACE_RESUME(error);
521
522         if (put)
523                 pm_runtime_put_sync(dev);
524
525         return error;
526 }
527
528 static void async_resume(void *data, async_cookie_t cookie)
529 {
530         struct device *dev = (struct device *)data;
531         int error;
532
533         error = device_resume(dev, pm_transition, true);
534         if (error)
535                 pm_dev_err(dev, pm_transition, " async", error);
536         put_device(dev);
537 }
538
539 static bool is_async(struct device *dev)
540 {
541         return dev->power.async_suspend && pm_async_enabled
542                 && !pm_trace_is_enabled();
543 }
544
545 /**
546  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
547  * @state: PM transition of the system being carried out.
548  *
549  * Execute the appropriate "resume" callback for all devices whose status
550  * indicates that they are suspended.
551  */
552 void dpm_resume(pm_message_t state)
553 {
554         struct device *dev;
555         ktime_t starttime = ktime_get();
556
557         might_sleep();
558
559         mutex_lock(&dpm_list_mtx);
560         pm_transition = state;
561         async_error = 0;
562
563         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
564                 INIT_COMPLETION(dev->power.completion);
565                 if (is_async(dev)) {
566                         get_device(dev);
567                         async_schedule(async_resume, dev);
568                 }
569         }
570
571         while (!list_empty(&dpm_suspended_list)) {
572                 dev = to_device(dpm_suspended_list.next);
573                 get_device(dev);
574                 if (!is_async(dev)) {
575                         int error;
576
577                         mutex_unlock(&dpm_list_mtx);
578
579                         error = device_resume(dev, state, false);
580                         if (error) {
581                                 suspend_stats.failed_resume++;
582                                 dpm_save_failed_step(SUSPEND_RESUME);
583                                 dpm_save_failed_dev(dev_name(dev));
584                                 pm_dev_err(dev, state, "", error);
585                         }
586
587                         mutex_lock(&dpm_list_mtx);
588                 }
589                 if (!list_empty(&dev->power.entry))
590                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
591                 put_device(dev);
592         }
593         mutex_unlock(&dpm_list_mtx);
594         async_synchronize_full();
595         dpm_show_time(starttime, state, NULL);
596 }
597
598 /**
599  * device_complete - Complete a PM transition for given device.
600  * @dev: Device to handle.
601  * @state: PM transition of the system being carried out.
602  */
603 static void device_complete(struct device *dev, pm_message_t state)
604 {
605         device_lock(dev);
606
607         if (dev->pm_domain) {
608                 pm_dev_dbg(dev, state, "completing power domain ");
609                 if (dev->pm_domain->ops.complete)
610                         dev->pm_domain->ops.complete(dev);
611         } else if (dev->type && dev->type->pm) {
612                 pm_dev_dbg(dev, state, "completing type ");
613                 if (dev->type->pm->complete)
614                         dev->type->pm->complete(dev);
615         } else if (dev->class && dev->class->pm) {
616                 pm_dev_dbg(dev, state, "completing class ");
617                 if (dev->class->pm->complete)
618                         dev->class->pm->complete(dev);
619         } else if (dev->bus && dev->bus->pm) {
620                 pm_dev_dbg(dev, state, "completing ");
621                 if (dev->bus->pm->complete)
622                         dev->bus->pm->complete(dev);
623         }
624
625         device_unlock(dev);
626 }
627
628 /**
629  * dpm_complete - Complete a PM transition for all non-sysdev devices.
630  * @state: PM transition of the system being carried out.
631  *
632  * Execute the ->complete() callbacks for all devices whose PM status is not
633  * DPM_ON (this allows new devices to be registered).
634  */
635 void dpm_complete(pm_message_t state)
636 {
637         struct list_head list;
638
639         might_sleep();
640
641         INIT_LIST_HEAD(&list);
642         mutex_lock(&dpm_list_mtx);
643         while (!list_empty(&dpm_prepared_list)) {
644                 struct device *dev = to_device(dpm_prepared_list.prev);
645
646                 get_device(dev);
647                 dev->power.is_prepared = false;
648                 list_move(&dev->power.entry, &list);
649                 mutex_unlock(&dpm_list_mtx);
650
651                 device_complete(dev, state);
652
653                 mutex_lock(&dpm_list_mtx);
654                 put_device(dev);
655         }
656         list_splice(&list, &dpm_list);
657         mutex_unlock(&dpm_list_mtx);
658 }
659
660 /**
661  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
662  * @state: PM transition of the system being carried out.
663  *
664  * Execute "resume" callbacks for all devices and complete the PM transition of
665  * the system.
666  */
667 void dpm_resume_end(pm_message_t state)
668 {
669         dpm_resume(state);
670         dpm_complete(state);
671 }
672 EXPORT_SYMBOL_GPL(dpm_resume_end);
673
674
675 /*------------------------- Suspend routines -------------------------*/
676
677 /**
678  * resume_event - Return a "resume" message for given "suspend" sleep state.
679  * @sleep_state: PM message representing a sleep state.
680  *
681  * Return a PM message representing the resume event corresponding to given
682  * sleep state.
683  */
684 static pm_message_t resume_event(pm_message_t sleep_state)
685 {
686         switch (sleep_state.event) {
687         case PM_EVENT_SUSPEND:
688                 return PMSG_RESUME;
689         case PM_EVENT_FREEZE:
690         case PM_EVENT_QUIESCE:
691                 return PMSG_RECOVER;
692         case PM_EVENT_HIBERNATE:
693                 return PMSG_RESTORE;
694         }
695         return PMSG_ON;
696 }
697
698 /**
699  * device_suspend_noirq - Execute a "late suspend" callback for given device.
700  * @dev: Device to handle.
701  * @state: PM transition of the system being carried out.
702  *
703  * The driver of @dev will not receive interrupts while this function is being
704  * executed.
705  */
706 static int device_suspend_noirq(struct device *dev, pm_message_t state)
707 {
708         int error = 0;
709
710         if (dev->pm_domain) {
711                 pm_dev_dbg(dev, state, "LATE power domain ");
712                 error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
713         } else if (dev->type && dev->type->pm) {
714                 pm_dev_dbg(dev, state, "LATE type ");
715                 error = pm_noirq_op(dev, dev->type->pm, state);
716         } else if (dev->class && dev->class->pm) {
717                 pm_dev_dbg(dev, state, "LATE class ");
718                 error = pm_noirq_op(dev, dev->class->pm, state);
719         } else if (dev->bus && dev->bus->pm) {
720                 pm_dev_dbg(dev, state, "LATE ");
721                 error = pm_noirq_op(dev, dev->bus->pm, state);
722         }
723
724         return error;
725 }
726
727 /**
728  * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
729  * @state: PM transition of the system being carried out.
730  *
731  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
732  * handlers for all non-sysdev devices.
733  */
734 int dpm_suspend_noirq(pm_message_t state)
735 {
736         ktime_t starttime = ktime_get();
737         int error = 0;
738
739         suspend_device_irqs();
740         mutex_lock(&dpm_list_mtx);
741         while (!list_empty(&dpm_suspended_list)) {
742                 struct device *dev = to_device(dpm_suspended_list.prev);
743
744                 get_device(dev);
745                 mutex_unlock(&dpm_list_mtx);
746
747                 error = device_suspend_noirq(dev, state);
748
749                 mutex_lock(&dpm_list_mtx);
750                 if (error) {
751                         pm_dev_err(dev, state, " late", error);
752                         suspend_stats.failed_suspend_noirq++;
753                         dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
754                         dpm_save_failed_dev(dev_name(dev));
755                         put_device(dev);
756                         break;
757                 }
758                 if (!list_empty(&dev->power.entry))
759                         list_move(&dev->power.entry, &dpm_noirq_list);
760                 put_device(dev);
761         }
762         mutex_unlock(&dpm_list_mtx);
763         if (error)
764                 dpm_resume_noirq(resume_event(state));
765         else
766                 dpm_show_time(starttime, state, "late");
767         return error;
768 }
769 EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
770
771 /**
772  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
773  * @dev: Device to suspend.
774  * @state: PM transition of the system being carried out.
775  * @cb: Suspend callback to execute.
776  */
777 static int legacy_suspend(struct device *dev, pm_message_t state,
778                           int (*cb)(struct device *dev, pm_message_t state))
779 {
780         int error;
781         ktime_t calltime;
782
783         calltime = initcall_debug_start(dev);
784
785         error = cb(dev, state);
786         suspend_report_result(cb, error);
787
788         initcall_debug_report(dev, calltime, error);
789
790         return error;
791 }
792
793 /**
794  * device_suspend - Execute "suspend" callbacks for given device.
795  * @dev: Device to handle.
796  * @state: PM transition of the system being carried out.
797  * @async: If true, the device is being suspended asynchronously.
798  */
799 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
800 {
801         int error = 0;
802
803         dpm_wait_for_children(dev, async);
804
805         if (async_error)
806                 return 0;
807
808         pm_runtime_get_noresume(dev);
809         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
810                 pm_wakeup_event(dev, 0);
811
812         if (pm_wakeup_pending()) {
813                 pm_runtime_put_sync(dev);
814                 async_error = -EBUSY;
815                 return 0;
816         }
817
818         device_lock(dev);
819
820         if (dev->pm_domain) {
821                 pm_dev_dbg(dev, state, "power domain ");
822                 error = pm_op(dev, &dev->pm_domain->ops, state);
823                 goto End;
824         }
825
826         if (dev->type && dev->type->pm) {
827                 pm_dev_dbg(dev, state, "type ");
828                 error = pm_op(dev, dev->type->pm, state);
829                 goto End;
830         }
831
832         if (dev->class) {
833                 if (dev->class->pm) {
834                         pm_dev_dbg(dev, state, "class ");
835                         error = pm_op(dev, dev->class->pm, state);
836                         goto End;
837                 } else if (dev->class->suspend) {
838                         pm_dev_dbg(dev, state, "legacy class ");
839                         error = legacy_suspend(dev, state, dev->class->suspend);
840                         goto End;
841                 }
842         }
843
844         if (dev->bus) {
845                 if (dev->bus->pm) {
846                         pm_dev_dbg(dev, state, "");
847                         error = pm_op(dev, dev->bus->pm, state);
848                 } else if (dev->bus->suspend) {
849                         pm_dev_dbg(dev, state, "legacy ");
850                         error = legacy_suspend(dev, state, dev->bus->suspend);
851                 }
852         }
853
854  End:
855         if (!error) {
856                 dev->power.is_suspended = true;
857                 if (dev->power.wakeup_path
858                     && dev->parent && !dev->parent->power.ignore_children)
859                         dev->parent->power.wakeup_path = true;
860         }
861
862         device_unlock(dev);
863         complete_all(&dev->power.completion);
864
865         if (error) {
866                 pm_runtime_put_sync(dev);
867                 async_error = error;
868         } else if (dev->power.is_suspended) {
869                 __pm_runtime_disable(dev, false);
870         }
871
872         return error;
873 }
874
875 static void async_suspend(void *data, async_cookie_t cookie)
876 {
877         struct device *dev = (struct device *)data;
878         int error;
879
880         error = __device_suspend(dev, pm_transition, true);
881         if (error) {
882                 dpm_save_failed_dev(dev_name(dev));
883                 pm_dev_err(dev, pm_transition, " async", error);
884         }
885
886         put_device(dev);
887 }
888
889 static int device_suspend(struct device *dev)
890 {
891         INIT_COMPLETION(dev->power.completion);
892
893         if (pm_async_enabled && dev->power.async_suspend) {
894                 get_device(dev);
895                 async_schedule(async_suspend, dev);
896                 return 0;
897         }
898
899         return __device_suspend(dev, pm_transition, false);
900 }
901
902 /**
903  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
904  * @state: PM transition of the system being carried out.
905  */
906 int dpm_suspend(pm_message_t state)
907 {
908         ktime_t starttime = ktime_get();
909         int error = 0;
910
911         might_sleep();
912
913         mutex_lock(&dpm_list_mtx);
914         pm_transition = state;
915         async_error = 0;
916         while (!list_empty(&dpm_prepared_list)) {
917                 struct device *dev = to_device(dpm_prepared_list.prev);
918
919                 get_device(dev);
920                 mutex_unlock(&dpm_list_mtx);
921
922                 error = device_suspend(dev);
923
924                 mutex_lock(&dpm_list_mtx);
925                 if (error) {
926                         pm_dev_err(dev, state, "", error);
927                         dpm_save_failed_dev(dev_name(dev));
928                         put_device(dev);
929                         break;
930                 }
931                 if (!list_empty(&dev->power.entry))
932                         list_move(&dev->power.entry, &dpm_suspended_list);
933                 put_device(dev);
934                 if (async_error)
935                         break;
936         }
937         mutex_unlock(&dpm_list_mtx);
938         async_synchronize_full();
939         if (!error)
940                 error = async_error;
941         if (error) {
942                 suspend_stats.failed_suspend++;
943                 dpm_save_failed_step(SUSPEND_SUSPEND);
944         } else
945                 dpm_show_time(starttime, state, NULL);
946         return error;
947 }
948
949 /**
950  * device_prepare - Prepare a device for system power transition.
951  * @dev: Device to handle.
952  * @state: PM transition of the system being carried out.
953  *
954  * Execute the ->prepare() callback(s) for given device.  No new children of the
955  * device may be registered after this function has returned.
956  */
957 static int device_prepare(struct device *dev, pm_message_t state)
958 {
959         int error = 0;
960
961         device_lock(dev);
962
963         dev->power.wakeup_path = device_may_wakeup(dev);
964
965         if (dev->pm_domain) {
966                 pm_dev_dbg(dev, state, "preparing power domain ");
967                 if (dev->pm_domain->ops.prepare)
968                         error = dev->pm_domain->ops.prepare(dev);
969                 suspend_report_result(dev->pm_domain->ops.prepare, error);
970         } else if (dev->type && dev->type->pm) {
971                 pm_dev_dbg(dev, state, "preparing type ");
972                 if (dev->type->pm->prepare)
973                         error = dev->type->pm->prepare(dev);
974                 suspend_report_result(dev->type->pm->prepare, error);
975         } else if (dev->class && dev->class->pm) {
976                 pm_dev_dbg(dev, state, "preparing class ");
977                 if (dev->class->pm->prepare)
978                         error = dev->class->pm->prepare(dev);
979                 suspend_report_result(dev->class->pm->prepare, error);
980         } else if (dev->bus && dev->bus->pm) {
981                 pm_dev_dbg(dev, state, "preparing ");
982                 if (dev->bus->pm->prepare)
983                         error = dev->bus->pm->prepare(dev);
984                 suspend_report_result(dev->bus->pm->prepare, error);
985         }
986
987         device_unlock(dev);
988
989         return error;
990 }
991
992 /**
993  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
994  * @state: PM transition of the system being carried out.
995  *
996  * Execute the ->prepare() callback(s) for all devices.
997  */
998 int dpm_prepare(pm_message_t state)
999 {
1000         int error = 0;
1001
1002         might_sleep();
1003
1004         mutex_lock(&dpm_list_mtx);
1005         while (!list_empty(&dpm_list)) {
1006                 struct device *dev = to_device(dpm_list.next);
1007
1008                 get_device(dev);
1009                 mutex_unlock(&dpm_list_mtx);
1010
1011                 error = device_prepare(dev, state);
1012
1013                 mutex_lock(&dpm_list_mtx);
1014                 if (error) {
1015                         if (error == -EAGAIN) {
1016                                 put_device(dev);
1017                                 error = 0;
1018                                 continue;
1019                         }
1020                         printk(KERN_INFO "PM: Device %s not prepared "
1021                                 "for power transition: code %d\n",
1022                                 dev_name(dev), error);
1023                         put_device(dev);
1024                         break;
1025                 }
1026                 dev->power.is_prepared = true;
1027                 if (!list_empty(&dev->power.entry))
1028                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1029                 put_device(dev);
1030         }
1031         mutex_unlock(&dpm_list_mtx);
1032         return error;
1033 }
1034
1035 /**
1036  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1037  * @state: PM transition of the system being carried out.
1038  *
1039  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1040  * callbacks for them.
1041  */
1042 int dpm_suspend_start(pm_message_t state)
1043 {
1044         int error;
1045
1046         error = dpm_prepare(state);
1047         if (error) {
1048                 suspend_stats.failed_prepare++;
1049                 dpm_save_failed_step(SUSPEND_PREPARE);
1050         } else
1051                 error = dpm_suspend(state);
1052         return error;
1053 }
1054 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1055
1056 void __suspend_report_result(const char *function, void *fn, int ret)
1057 {
1058         if (ret)
1059                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1060 }
1061 EXPORT_SYMBOL_GPL(__suspend_report_result);
1062
1063 /**
1064  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1065  * @dev: Device to wait for.
1066  * @subordinate: Device that needs to wait for @dev.
1067  */
1068 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1069 {
1070         dpm_wait(dev, subordinate->power.async_suspend);
1071         return async_error;
1072 }
1073 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);