]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/base/power/main.c
b5cef7e7de234ccf1fd523b377ce0cff17411c1e
[mv-sheeva.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
31
32 #include "../base.h"
33 #include "power.h"
34
35 typedef int (*pm_callback_t)(struct device *);
36
37 /*
38  * The entries in the dpm_list list are in a depth first order, simply
39  * because children are guaranteed to be discovered after parents, and
40  * are inserted at the back of the list on discovery.
41  *
42  * Since device_pm_add() may be called with a device lock held,
43  * we must never try to acquire a device lock while holding
44  * dpm_list_mutex.
45  */
46
47 LIST_HEAD(dpm_list);
48 LIST_HEAD(dpm_prepared_list);
49 LIST_HEAD(dpm_suspended_list);
50 LIST_HEAD(dpm_noirq_list);
51
52 struct suspend_stats suspend_stats;
53 static DEFINE_MUTEX(dpm_list_mtx);
54 static pm_message_t pm_transition;
55
56 static int async_error;
57
58 /**
59  * device_pm_init - Initialize the PM-related part of a device object.
60  * @dev: Device object being initialized.
61  */
62 void device_pm_init(struct device *dev)
63 {
64         dev->power.is_prepared = false;
65         dev->power.is_suspended = false;
66         init_completion(&dev->power.completion);
67         complete_all(&dev->power.completion);
68         dev->power.wakeup = NULL;
69         spin_lock_init(&dev->power.lock);
70         pm_runtime_init(dev);
71         INIT_LIST_HEAD(&dev->power.entry);
72         dev->power.power_state = PMSG_INVALID;
73 }
74
75 /**
76  * device_pm_lock - Lock the list of active devices used by the PM core.
77  */
78 void device_pm_lock(void)
79 {
80         mutex_lock(&dpm_list_mtx);
81 }
82
83 /**
84  * device_pm_unlock - Unlock the list of active devices used by the PM core.
85  */
86 void device_pm_unlock(void)
87 {
88         mutex_unlock(&dpm_list_mtx);
89 }
90
91 /**
92  * device_pm_add - Add a device to the PM core's list of active devices.
93  * @dev: Device to add to the list.
94  */
95 void device_pm_add(struct device *dev)
96 {
97         pr_debug("PM: Adding info for %s:%s\n",
98                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
99         mutex_lock(&dpm_list_mtx);
100         if (dev->parent && dev->parent->power.is_prepared)
101                 dev_warn(dev, "parent %s should not be sleeping\n",
102                         dev_name(dev->parent));
103         list_add_tail(&dev->power.entry, &dpm_list);
104         dev_pm_qos_constraints_init(dev);
105         mutex_unlock(&dpm_list_mtx);
106 }
107
108 /**
109  * device_pm_remove - Remove a device from the PM core's list of active devices.
110  * @dev: Device to be removed from the list.
111  */
112 void device_pm_remove(struct device *dev)
113 {
114         pr_debug("PM: Removing info for %s:%s\n",
115                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
116         complete_all(&dev->power.completion);
117         mutex_lock(&dpm_list_mtx);
118         dev_pm_qos_constraints_destroy(dev);
119         list_del_init(&dev->power.entry);
120         mutex_unlock(&dpm_list_mtx);
121         device_wakeup_disable(dev);
122         pm_runtime_remove(dev);
123 }
124
125 /**
126  * device_pm_move_before - Move device in the PM core's list of active devices.
127  * @deva: Device to move in dpm_list.
128  * @devb: Device @deva should come before.
129  */
130 void device_pm_move_before(struct device *deva, struct device *devb)
131 {
132         pr_debug("PM: Moving %s:%s before %s:%s\n",
133                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
134                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
135         /* Delete deva from dpm_list and reinsert before devb. */
136         list_move_tail(&deva->power.entry, &devb->power.entry);
137 }
138
139 /**
140  * device_pm_move_after - Move device in the PM core's list of active devices.
141  * @deva: Device to move in dpm_list.
142  * @devb: Device @deva should come after.
143  */
144 void device_pm_move_after(struct device *deva, struct device *devb)
145 {
146         pr_debug("PM: Moving %s:%s after %s:%s\n",
147                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
148                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
149         /* Delete deva from dpm_list and reinsert after devb. */
150         list_move(&deva->power.entry, &devb->power.entry);
151 }
152
153 /**
154  * device_pm_move_last - Move device to end of the PM core's list of devices.
155  * @dev: Device to move in dpm_list.
156  */
157 void device_pm_move_last(struct device *dev)
158 {
159         pr_debug("PM: Moving %s:%s to end of list\n",
160                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
161         list_move_tail(&dev->power.entry, &dpm_list);
162 }
163
164 static ktime_t initcall_debug_start(struct device *dev)
165 {
166         ktime_t calltime = ktime_set(0, 0);
167
168         if (initcall_debug) {
169                 pr_info("calling  %s+ @ %i, parent: %s\n",
170                         dev_name(dev), task_pid_nr(current),
171                         dev->parent ? dev_name(dev->parent) : "none");
172                 calltime = ktime_get();
173         }
174
175         return calltime;
176 }
177
178 static void initcall_debug_report(struct device *dev, ktime_t calltime,
179                                   int error)
180 {
181         ktime_t delta, rettime;
182
183         if (initcall_debug) {
184                 rettime = ktime_get();
185                 delta = ktime_sub(rettime, calltime);
186                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
187                         error, (unsigned long long)ktime_to_ns(delta) >> 10);
188         }
189 }
190
191 /**
192  * dpm_wait - Wait for a PM operation to complete.
193  * @dev: Device to wait for.
194  * @async: If unset, wait only if the device's power.async_suspend flag is set.
195  */
196 static void dpm_wait(struct device *dev, bool async)
197 {
198         if (!dev)
199                 return;
200
201         if (async || (pm_async_enabled && dev->power.async_suspend))
202                 wait_for_completion(&dev->power.completion);
203 }
204
205 static int dpm_wait_fn(struct device *dev, void *async_ptr)
206 {
207         dpm_wait(dev, *((bool *)async_ptr));
208         return 0;
209 }
210
211 static void dpm_wait_for_children(struct device *dev, bool async)
212 {
213        device_for_each_child(dev, &async, dpm_wait_fn);
214 }
215
216 /**
217  * pm_op - Return the PM operation appropriate for given PM event.
218  * @ops: PM operations to choose from.
219  * @state: PM transition of the system being carried out.
220  */
221 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
222 {
223         switch (state.event) {
224 #ifdef CONFIG_SUSPEND
225         case PM_EVENT_SUSPEND:
226                 return ops->suspend;
227         case PM_EVENT_RESUME:
228                 return ops->resume;
229 #endif /* CONFIG_SUSPEND */
230 #ifdef CONFIG_HIBERNATE_CALLBACKS
231         case PM_EVENT_FREEZE:
232         case PM_EVENT_QUIESCE:
233                 return ops->freeze;
234         case PM_EVENT_HIBERNATE:
235                 return ops->poweroff;
236         case PM_EVENT_THAW:
237         case PM_EVENT_RECOVER:
238                 return ops->thaw;
239                 break;
240         case PM_EVENT_RESTORE:
241                 return ops->restore;
242 #endif /* CONFIG_HIBERNATE_CALLBACKS */
243         }
244
245         return NULL;
246 }
247
248 /**
249  * pm_noirq_op - Return the PM operation appropriate for given PM event.
250  * @ops: PM operations to choose from.
251  * @state: PM transition of the system being carried out.
252  *
253  * The driver of @dev will not receive interrupts while this function is being
254  * executed.
255  */
256 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
257 {
258         switch (state.event) {
259 #ifdef CONFIG_SUSPEND
260         case PM_EVENT_SUSPEND:
261                 return ops->suspend_noirq;
262         case PM_EVENT_RESUME:
263                 return ops->resume_noirq;
264 #endif /* CONFIG_SUSPEND */
265 #ifdef CONFIG_HIBERNATE_CALLBACKS
266         case PM_EVENT_FREEZE:
267         case PM_EVENT_QUIESCE:
268                 return ops->freeze_noirq;
269         case PM_EVENT_HIBERNATE:
270                 return ops->poweroff_noirq;
271         case PM_EVENT_THAW:
272         case PM_EVENT_RECOVER:
273                 return ops->thaw_noirq;
274         case PM_EVENT_RESTORE:
275                 return ops->restore_noirq;
276 #endif /* CONFIG_HIBERNATE_CALLBACKS */
277         }
278
279         return NULL;
280 }
281
282 static char *pm_verb(int event)
283 {
284         switch (event) {
285         case PM_EVENT_SUSPEND:
286                 return "suspend";
287         case PM_EVENT_RESUME:
288                 return "resume";
289         case PM_EVENT_FREEZE:
290                 return "freeze";
291         case PM_EVENT_QUIESCE:
292                 return "quiesce";
293         case PM_EVENT_HIBERNATE:
294                 return "hibernate";
295         case PM_EVENT_THAW:
296                 return "thaw";
297         case PM_EVENT_RESTORE:
298                 return "restore";
299         case PM_EVENT_RECOVER:
300                 return "recover";
301         default:
302                 return "(unknown PM event)";
303         }
304 }
305
306 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
307 {
308         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
309                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
310                 ", may wakeup" : "");
311 }
312
313 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
314                         int error)
315 {
316         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
317                 dev_name(dev), pm_verb(state.event), info, error);
318 }
319
320 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
321 {
322         ktime_t calltime;
323         u64 usecs64;
324         int usecs;
325
326         calltime = ktime_get();
327         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
328         do_div(usecs64, NSEC_PER_USEC);
329         usecs = usecs64;
330         if (usecs == 0)
331                 usecs = 1;
332         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
333                 info ?: "", info ? " " : "", pm_verb(state.event),
334                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
335 }
336
337 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
338                             pm_message_t state, char *info)
339 {
340         ktime_t calltime;
341         int error;
342
343         if (!cb)
344                 return 0;
345
346         calltime = initcall_debug_start(dev);
347
348         pm_dev_dbg(dev, state, info);
349         error = cb(dev);
350         suspend_report_result(cb, error);
351
352         initcall_debug_report(dev, calltime, error);
353
354         return error;
355 }
356
357 /*------------------------- Resume routines -------------------------*/
358
359 /**
360  * device_resume_noirq - Execute an "early resume" callback for given device.
361  * @dev: Device to handle.
362  * @state: PM transition of the system being carried out.
363  *
364  * The driver of @dev will not receive interrupts while this function is being
365  * executed.
366  */
367 static int device_resume_noirq(struct device *dev, pm_message_t state)
368 {
369         pm_callback_t callback = NULL;
370         char *info = NULL;
371         int error = 0;
372
373         TRACE_DEVICE(dev);
374         TRACE_RESUME(0);
375
376         if (dev->pm_domain) {
377                 info = "EARLY power domain ";
378                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
379         } else if (dev->type && dev->type->pm) {
380                 info = "EARLY type ";
381                 callback = pm_noirq_op(dev->type->pm, state);
382         } else if (dev->class && dev->class->pm) {
383                 info = "EARLY class ";
384                 callback = pm_noirq_op(dev->class->pm, state);
385         } else if (dev->bus && dev->bus->pm) {
386                 info = "EARLY ";
387                 callback = pm_noirq_op(dev->bus->pm, state);
388         }
389
390         error = dpm_run_callback(callback, dev, state, info);
391
392         TRACE_RESUME(error);
393         return error;
394 }
395
396 /**
397  * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
398  * @state: PM transition of the system being carried out.
399  *
400  * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
401  * enable device drivers to receive interrupts.
402  */
403 void dpm_resume_noirq(pm_message_t state)
404 {
405         ktime_t starttime = ktime_get();
406
407         mutex_lock(&dpm_list_mtx);
408         while (!list_empty(&dpm_noirq_list)) {
409                 struct device *dev = to_device(dpm_noirq_list.next);
410                 int error;
411
412                 get_device(dev);
413                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
414                 mutex_unlock(&dpm_list_mtx);
415
416                 error = device_resume_noirq(dev, state);
417                 if (error) {
418                         suspend_stats.failed_resume_noirq++;
419                         dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
420                         dpm_save_failed_dev(dev_name(dev));
421                         pm_dev_err(dev, state, " early", error);
422                 }
423
424                 mutex_lock(&dpm_list_mtx);
425                 put_device(dev);
426         }
427         mutex_unlock(&dpm_list_mtx);
428         dpm_show_time(starttime, state, "early");
429         resume_device_irqs();
430 }
431 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
432
433 /**
434  * device_resume - Execute "resume" callbacks for given device.
435  * @dev: Device to handle.
436  * @state: PM transition of the system being carried out.
437  * @async: If true, the device is being resumed asynchronously.
438  */
439 static int device_resume(struct device *dev, pm_message_t state, bool async)
440 {
441         pm_callback_t callback = NULL;
442         char *info = NULL;
443         int error = 0;
444         bool put = false;
445
446         TRACE_DEVICE(dev);
447         TRACE_RESUME(0);
448
449         dpm_wait(dev->parent, async);
450         device_lock(dev);
451
452         /*
453          * This is a fib.  But we'll allow new children to be added below
454          * a resumed device, even if the device hasn't been completed yet.
455          */
456         dev->power.is_prepared = false;
457
458         if (!dev->power.is_suspended)
459                 goto Unlock;
460
461         pm_runtime_enable(dev);
462         put = true;
463
464         if (dev->pm_domain) {
465                 info = "power domain ";
466                 callback = pm_op(&dev->pm_domain->ops, state);
467                 goto End;
468         }
469
470         if (dev->type && dev->type->pm) {
471                 info = "type ";
472                 callback = pm_op(dev->type->pm, state);
473                 goto End;
474         }
475
476         if (dev->class) {
477                 if (dev->class->pm) {
478                         info = "class ";
479                         callback = pm_op(dev->class->pm, state);
480                         goto End;
481                 } else if (dev->class->resume) {
482                         info = "legacy class ";
483                         callback = dev->class->resume;
484                         goto End;
485                 }
486         }
487
488         if (dev->bus) {
489                 if (dev->bus->pm) {
490                         info = "";
491                         callback = pm_op(dev->bus->pm, state);
492                 } else if (dev->bus->resume) {
493                         info = "legacy ";
494                         callback = dev->bus->resume;
495                 }
496         }
497
498  End:
499         error = dpm_run_callback(callback, dev, state, info);
500         dev->power.is_suspended = false;
501
502  Unlock:
503         device_unlock(dev);
504         complete_all(&dev->power.completion);
505
506         TRACE_RESUME(error);
507
508         if (put)
509                 pm_runtime_put_sync(dev);
510
511         return error;
512 }
513
514 static void async_resume(void *data, async_cookie_t cookie)
515 {
516         struct device *dev = (struct device *)data;
517         int error;
518
519         error = device_resume(dev, pm_transition, true);
520         if (error)
521                 pm_dev_err(dev, pm_transition, " async", error);
522         put_device(dev);
523 }
524
525 static bool is_async(struct device *dev)
526 {
527         return dev->power.async_suspend && pm_async_enabled
528                 && !pm_trace_is_enabled();
529 }
530
531 /**
532  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
533  * @state: PM transition of the system being carried out.
534  *
535  * Execute the appropriate "resume" callback for all devices whose status
536  * indicates that they are suspended.
537  */
538 void dpm_resume(pm_message_t state)
539 {
540         struct device *dev;
541         ktime_t starttime = ktime_get();
542
543         might_sleep();
544
545         mutex_lock(&dpm_list_mtx);
546         pm_transition = state;
547         async_error = 0;
548
549         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
550                 INIT_COMPLETION(dev->power.completion);
551                 if (is_async(dev)) {
552                         get_device(dev);
553                         async_schedule(async_resume, dev);
554                 }
555         }
556
557         while (!list_empty(&dpm_suspended_list)) {
558                 dev = to_device(dpm_suspended_list.next);
559                 get_device(dev);
560                 if (!is_async(dev)) {
561                         int error;
562
563                         mutex_unlock(&dpm_list_mtx);
564
565                         error = device_resume(dev, state, false);
566                         if (error) {
567                                 suspend_stats.failed_resume++;
568                                 dpm_save_failed_step(SUSPEND_RESUME);
569                                 dpm_save_failed_dev(dev_name(dev));
570                                 pm_dev_err(dev, state, "", error);
571                         }
572
573                         mutex_lock(&dpm_list_mtx);
574                 }
575                 if (!list_empty(&dev->power.entry))
576                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
577                 put_device(dev);
578         }
579         mutex_unlock(&dpm_list_mtx);
580         async_synchronize_full();
581         dpm_show_time(starttime, state, NULL);
582 }
583
584 /**
585  * device_complete - Complete a PM transition for given device.
586  * @dev: Device to handle.
587  * @state: PM transition of the system being carried out.
588  */
589 static void device_complete(struct device *dev, pm_message_t state)
590 {
591         device_lock(dev);
592
593         if (dev->pm_domain) {
594                 pm_dev_dbg(dev, state, "completing power domain ");
595                 if (dev->pm_domain->ops.complete)
596                         dev->pm_domain->ops.complete(dev);
597         } else if (dev->type && dev->type->pm) {
598                 pm_dev_dbg(dev, state, "completing type ");
599                 if (dev->type->pm->complete)
600                         dev->type->pm->complete(dev);
601         } else if (dev->class && dev->class->pm) {
602                 pm_dev_dbg(dev, state, "completing class ");
603                 if (dev->class->pm->complete)
604                         dev->class->pm->complete(dev);
605         } else if (dev->bus && dev->bus->pm) {
606                 pm_dev_dbg(dev, state, "completing ");
607                 if (dev->bus->pm->complete)
608                         dev->bus->pm->complete(dev);
609         }
610
611         device_unlock(dev);
612 }
613
614 /**
615  * dpm_complete - Complete a PM transition for all non-sysdev devices.
616  * @state: PM transition of the system being carried out.
617  *
618  * Execute the ->complete() callbacks for all devices whose PM status is not
619  * DPM_ON (this allows new devices to be registered).
620  */
621 void dpm_complete(pm_message_t state)
622 {
623         struct list_head list;
624
625         might_sleep();
626
627         INIT_LIST_HEAD(&list);
628         mutex_lock(&dpm_list_mtx);
629         while (!list_empty(&dpm_prepared_list)) {
630                 struct device *dev = to_device(dpm_prepared_list.prev);
631
632                 get_device(dev);
633                 dev->power.is_prepared = false;
634                 list_move(&dev->power.entry, &list);
635                 mutex_unlock(&dpm_list_mtx);
636
637                 device_complete(dev, state);
638
639                 mutex_lock(&dpm_list_mtx);
640                 put_device(dev);
641         }
642         list_splice(&list, &dpm_list);
643         mutex_unlock(&dpm_list_mtx);
644 }
645
646 /**
647  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
648  * @state: PM transition of the system being carried out.
649  *
650  * Execute "resume" callbacks for all devices and complete the PM transition of
651  * the system.
652  */
653 void dpm_resume_end(pm_message_t state)
654 {
655         dpm_resume(state);
656         dpm_complete(state);
657 }
658 EXPORT_SYMBOL_GPL(dpm_resume_end);
659
660
661 /*------------------------- Suspend routines -------------------------*/
662
663 /**
664  * resume_event - Return a "resume" message for given "suspend" sleep state.
665  * @sleep_state: PM message representing a sleep state.
666  *
667  * Return a PM message representing the resume event corresponding to given
668  * sleep state.
669  */
670 static pm_message_t resume_event(pm_message_t sleep_state)
671 {
672         switch (sleep_state.event) {
673         case PM_EVENT_SUSPEND:
674                 return PMSG_RESUME;
675         case PM_EVENT_FREEZE:
676         case PM_EVENT_QUIESCE:
677                 return PMSG_RECOVER;
678         case PM_EVENT_HIBERNATE:
679                 return PMSG_RESTORE;
680         }
681         return PMSG_ON;
682 }
683
684 /**
685  * device_suspend_noirq - Execute a "late suspend" callback for given device.
686  * @dev: Device to handle.
687  * @state: PM transition of the system being carried out.
688  *
689  * The driver of @dev will not receive interrupts while this function is being
690  * executed.
691  */
692 static int device_suspend_noirq(struct device *dev, pm_message_t state)
693 {
694         pm_callback_t callback = NULL;
695         char *info = NULL;
696
697         if (dev->pm_domain) {
698                 info = "LATE power domain ";
699                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
700         } else if (dev->type && dev->type->pm) {
701                 info = "LATE type ";
702                 callback = pm_noirq_op(dev->type->pm, state);
703         } else if (dev->class && dev->class->pm) {
704                 info = "LATE class ";
705                 callback = pm_noirq_op(dev->class->pm, state);
706         } else if (dev->bus && dev->bus->pm) {
707                 info = "LATE ";
708                 callback = pm_noirq_op(dev->bus->pm, state);
709         }
710
711         return dpm_run_callback(callback, dev, state, info);
712 }
713
714 /**
715  * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
716  * @state: PM transition of the system being carried out.
717  *
718  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
719  * handlers for all non-sysdev devices.
720  */
721 int dpm_suspend_noirq(pm_message_t state)
722 {
723         ktime_t starttime = ktime_get();
724         int error = 0;
725
726         suspend_device_irqs();
727         mutex_lock(&dpm_list_mtx);
728         while (!list_empty(&dpm_suspended_list)) {
729                 struct device *dev = to_device(dpm_suspended_list.prev);
730
731                 get_device(dev);
732                 mutex_unlock(&dpm_list_mtx);
733
734                 error = device_suspend_noirq(dev, state);
735
736                 mutex_lock(&dpm_list_mtx);
737                 if (error) {
738                         pm_dev_err(dev, state, " late", error);
739                         suspend_stats.failed_suspend_noirq++;
740                         dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
741                         dpm_save_failed_dev(dev_name(dev));
742                         put_device(dev);
743                         break;
744                 }
745                 if (!list_empty(&dev->power.entry))
746                         list_move(&dev->power.entry, &dpm_noirq_list);
747                 put_device(dev);
748         }
749         mutex_unlock(&dpm_list_mtx);
750         if (error)
751                 dpm_resume_noirq(resume_event(state));
752         else
753                 dpm_show_time(starttime, state, "late");
754         return error;
755 }
756 EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
757
758 /**
759  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
760  * @dev: Device to suspend.
761  * @state: PM transition of the system being carried out.
762  * @cb: Suspend callback to execute.
763  */
764 static int legacy_suspend(struct device *dev, pm_message_t state,
765                           int (*cb)(struct device *dev, pm_message_t state))
766 {
767         int error;
768         ktime_t calltime;
769
770         calltime = initcall_debug_start(dev);
771
772         error = cb(dev, state);
773         suspend_report_result(cb, error);
774
775         initcall_debug_report(dev, calltime, error);
776
777         return error;
778 }
779
780 /**
781  * device_suspend - Execute "suspend" callbacks for given device.
782  * @dev: Device to handle.
783  * @state: PM transition of the system being carried out.
784  * @async: If true, the device is being suspended asynchronously.
785  */
786 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
787 {
788         pm_callback_t callback = NULL;
789         char *info = NULL;
790         int error = 0;
791
792         dpm_wait_for_children(dev, async);
793
794         if (async_error)
795                 return 0;
796
797         pm_runtime_get_noresume(dev);
798         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
799                 pm_wakeup_event(dev, 0);
800
801         if (pm_wakeup_pending()) {
802                 pm_runtime_put_sync(dev);
803                 async_error = -EBUSY;
804                 return 0;
805         }
806
807         device_lock(dev);
808
809         if (dev->pm_domain) {
810                 info = "power domain ";
811                 callback = pm_op(&dev->pm_domain->ops, state);
812                 goto Run;
813         }
814
815         if (dev->type && dev->type->pm) {
816                 info = "type ";
817                 callback = pm_op(dev->type->pm, state);
818                 goto Run;
819         }
820
821         if (dev->class) {
822                 if (dev->class->pm) {
823                         info = "class ";
824                         callback = pm_op(dev->class->pm, state);
825                         goto Run;
826                 } else if (dev->class->suspend) {
827                         pm_dev_dbg(dev, state, "legacy class ");
828                         error = legacy_suspend(dev, state, dev->class->suspend);
829                         goto End;
830                 }
831         }
832
833         if (dev->bus) {
834                 if (dev->bus->pm) {
835                         info = "";
836                         callback = pm_op(dev->bus->pm, state);
837                 } else if (dev->bus->suspend) {
838                         pm_dev_dbg(dev, state, "legacy ");
839                         error = legacy_suspend(dev, state, dev->bus->suspend);
840                         goto End;
841                 }
842         }
843
844  Run:
845         error = dpm_run_callback(callback, dev, state, info);
846
847  End:
848         if (!error) {
849                 dev->power.is_suspended = true;
850                 if (dev->power.wakeup_path
851                     && dev->parent && !dev->parent->power.ignore_children)
852                         dev->parent->power.wakeup_path = true;
853         }
854
855         device_unlock(dev);
856         complete_all(&dev->power.completion);
857
858         if (error) {
859                 pm_runtime_put_sync(dev);
860                 async_error = error;
861         } else if (dev->power.is_suspended) {
862                 __pm_runtime_disable(dev, false);
863         }
864
865         return error;
866 }
867
868 static void async_suspend(void *data, async_cookie_t cookie)
869 {
870         struct device *dev = (struct device *)data;
871         int error;
872
873         error = __device_suspend(dev, pm_transition, true);
874         if (error) {
875                 dpm_save_failed_dev(dev_name(dev));
876                 pm_dev_err(dev, pm_transition, " async", error);
877         }
878
879         put_device(dev);
880 }
881
882 static int device_suspend(struct device *dev)
883 {
884         INIT_COMPLETION(dev->power.completion);
885
886         if (pm_async_enabled && dev->power.async_suspend) {
887                 get_device(dev);
888                 async_schedule(async_suspend, dev);
889                 return 0;
890         }
891
892         return __device_suspend(dev, pm_transition, false);
893 }
894
895 /**
896  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
897  * @state: PM transition of the system being carried out.
898  */
899 int dpm_suspend(pm_message_t state)
900 {
901         ktime_t starttime = ktime_get();
902         int error = 0;
903
904         might_sleep();
905
906         mutex_lock(&dpm_list_mtx);
907         pm_transition = state;
908         async_error = 0;
909         while (!list_empty(&dpm_prepared_list)) {
910                 struct device *dev = to_device(dpm_prepared_list.prev);
911
912                 get_device(dev);
913                 mutex_unlock(&dpm_list_mtx);
914
915                 error = device_suspend(dev);
916
917                 mutex_lock(&dpm_list_mtx);
918                 if (error) {
919                         pm_dev_err(dev, state, "", error);
920                         dpm_save_failed_dev(dev_name(dev));
921                         put_device(dev);
922                         break;
923                 }
924                 if (!list_empty(&dev->power.entry))
925                         list_move(&dev->power.entry, &dpm_suspended_list);
926                 put_device(dev);
927                 if (async_error)
928                         break;
929         }
930         mutex_unlock(&dpm_list_mtx);
931         async_synchronize_full();
932         if (!error)
933                 error = async_error;
934         if (error) {
935                 suspend_stats.failed_suspend++;
936                 dpm_save_failed_step(SUSPEND_SUSPEND);
937         } else
938                 dpm_show_time(starttime, state, NULL);
939         return error;
940 }
941
942 /**
943  * device_prepare - Prepare a device for system power transition.
944  * @dev: Device to handle.
945  * @state: PM transition of the system being carried out.
946  *
947  * Execute the ->prepare() callback(s) for given device.  No new children of the
948  * device may be registered after this function has returned.
949  */
950 static int device_prepare(struct device *dev, pm_message_t state)
951 {
952         int error = 0;
953
954         device_lock(dev);
955
956         dev->power.wakeup_path = device_may_wakeup(dev);
957
958         if (dev->pm_domain) {
959                 pm_dev_dbg(dev, state, "preparing power domain ");
960                 if (dev->pm_domain->ops.prepare)
961                         error = dev->pm_domain->ops.prepare(dev);
962                 suspend_report_result(dev->pm_domain->ops.prepare, error);
963         } else if (dev->type && dev->type->pm) {
964                 pm_dev_dbg(dev, state, "preparing type ");
965                 if (dev->type->pm->prepare)
966                         error = dev->type->pm->prepare(dev);
967                 suspend_report_result(dev->type->pm->prepare, error);
968         } else if (dev->class && dev->class->pm) {
969                 pm_dev_dbg(dev, state, "preparing class ");
970                 if (dev->class->pm->prepare)
971                         error = dev->class->pm->prepare(dev);
972                 suspend_report_result(dev->class->pm->prepare, error);
973         } else if (dev->bus && dev->bus->pm) {
974                 pm_dev_dbg(dev, state, "preparing ");
975                 if (dev->bus->pm->prepare)
976                         error = dev->bus->pm->prepare(dev);
977                 suspend_report_result(dev->bus->pm->prepare, error);
978         }
979
980         device_unlock(dev);
981
982         return error;
983 }
984
985 /**
986  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
987  * @state: PM transition of the system being carried out.
988  *
989  * Execute the ->prepare() callback(s) for all devices.
990  */
991 int dpm_prepare(pm_message_t state)
992 {
993         int error = 0;
994
995         might_sleep();
996
997         mutex_lock(&dpm_list_mtx);
998         while (!list_empty(&dpm_list)) {
999                 struct device *dev = to_device(dpm_list.next);
1000
1001                 get_device(dev);
1002                 mutex_unlock(&dpm_list_mtx);
1003
1004                 error = device_prepare(dev, state);
1005
1006                 mutex_lock(&dpm_list_mtx);
1007                 if (error) {
1008                         if (error == -EAGAIN) {
1009                                 put_device(dev);
1010                                 error = 0;
1011                                 continue;
1012                         }
1013                         printk(KERN_INFO "PM: Device %s not prepared "
1014                                 "for power transition: code %d\n",
1015                                 dev_name(dev), error);
1016                         put_device(dev);
1017                         break;
1018                 }
1019                 dev->power.is_prepared = true;
1020                 if (!list_empty(&dev->power.entry))
1021                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1022                 put_device(dev);
1023         }
1024         mutex_unlock(&dpm_list_mtx);
1025         return error;
1026 }
1027
1028 /**
1029  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1030  * @state: PM transition of the system being carried out.
1031  *
1032  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1033  * callbacks for them.
1034  */
1035 int dpm_suspend_start(pm_message_t state)
1036 {
1037         int error;
1038
1039         error = dpm_prepare(state);
1040         if (error) {
1041                 suspend_stats.failed_prepare++;
1042                 dpm_save_failed_step(SUSPEND_PREPARE);
1043         } else
1044                 error = dpm_suspend(state);
1045         return error;
1046 }
1047 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1048
1049 void __suspend_report_result(const char *function, void *fn, int ret)
1050 {
1051         if (ret)
1052                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1053 }
1054 EXPORT_SYMBOL_GPL(__suspend_report_result);
1055
1056 /**
1057  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1058  * @dev: Device to wait for.
1059  * @subordinate: Device that needs to wait for @dev.
1060  */
1061 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1062 {
1063         dpm_wait(dev, subordinate->power.async_suspend);
1064         return async_error;
1065 }
1066 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);