]> git.karo-electronics.de Git - linux-beck.git/blob - drivers/base/power/runtime.c
PM / Runtime: Rework RPM get callback routines
[linux-beck.git] / drivers / base / power / runtime.c
1 /*
2  * drivers/base/power/runtime.c - Helper functions for device runtime PM
3  *
4  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6  *
7  * This file is released under the GPLv2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <trace/events/rpm.h>
14 #include "power.h"
15
16 typedef int (*pm_callback_t)(struct device *);
17
18 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
19 {
20         pm_callback_t cb;
21         const struct dev_pm_ops *ops;
22
23         if (dev->pm_domain)
24                 ops = &dev->pm_domain->ops;
25         else if (dev->type && dev->type->pm)
26                 ops = dev->type->pm;
27         else if (dev->class && dev->class->pm)
28                 ops = dev->class->pm;
29         else if (dev->bus && dev->bus->pm)
30                 ops = dev->bus->pm;
31         else
32                 ops = NULL;
33
34         if (ops)
35                 cb = *(pm_callback_t *)((void *)ops + cb_offset);
36         else
37                 cb = NULL;
38
39         if (!cb && dev->driver && dev->driver->pm)
40                 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
41
42         return cb;
43 }
44
45 #define RPM_GET_CALLBACK(dev, callback) \
46                 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
47
48 #ifdef CONFIG_PM_RUNTIME
49
50 static int rpm_resume(struct device *dev, int rpmflags);
51 static int rpm_suspend(struct device *dev, int rpmflags);
52
53 /**
54  * update_pm_runtime_accounting - Update the time accounting of power states
55  * @dev: Device to update the accounting for
56  *
57  * In order to be able to have time accounting of the various power states
58  * (as used by programs such as PowerTOP to show the effectiveness of runtime
59  * PM), we need to track the time spent in each state.
60  * update_pm_runtime_accounting must be called each time before the
61  * runtime_status field is updated, to account the time in the old state
62  * correctly.
63  */
64 void update_pm_runtime_accounting(struct device *dev)
65 {
66         unsigned long now = jiffies;
67         unsigned long delta;
68
69         delta = now - dev->power.accounting_timestamp;
70
71         dev->power.accounting_timestamp = now;
72
73         if (dev->power.disable_depth > 0)
74                 return;
75
76         if (dev->power.runtime_status == RPM_SUSPENDED)
77                 dev->power.suspended_jiffies += delta;
78         else
79                 dev->power.active_jiffies += delta;
80 }
81
82 static void __update_runtime_status(struct device *dev, enum rpm_status status)
83 {
84         update_pm_runtime_accounting(dev);
85         dev->power.runtime_status = status;
86 }
87
88 /**
89  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
90  * @dev: Device to handle.
91  */
92 static void pm_runtime_deactivate_timer(struct device *dev)
93 {
94         if (dev->power.timer_expires > 0) {
95                 del_timer(&dev->power.suspend_timer);
96                 dev->power.timer_expires = 0;
97         }
98 }
99
100 /**
101  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
102  * @dev: Device to handle.
103  */
104 static void pm_runtime_cancel_pending(struct device *dev)
105 {
106         pm_runtime_deactivate_timer(dev);
107         /*
108          * In case there's a request pending, make sure its work function will
109          * return without doing anything.
110          */
111         dev->power.request = RPM_REQ_NONE;
112 }
113
114 /*
115  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
116  * @dev: Device to handle.
117  *
118  * Compute the autosuspend-delay expiration time based on the device's
119  * power.last_busy time.  If the delay has already expired or is disabled
120  * (negative) or the power.use_autosuspend flag isn't set, return 0.
121  * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
122  *
123  * This function may be called either with or without dev->power.lock held.
124  * Either way it can be racy, since power.last_busy may be updated at any time.
125  */
126 unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
127 {
128         int autosuspend_delay;
129         long elapsed;
130         unsigned long last_busy;
131         unsigned long expires = 0;
132
133         if (!dev->power.use_autosuspend)
134                 goto out;
135
136         autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
137         if (autosuspend_delay < 0)
138                 goto out;
139
140         last_busy = ACCESS_ONCE(dev->power.last_busy);
141         elapsed = jiffies - last_busy;
142         if (elapsed < 0)
143                 goto out;       /* jiffies has wrapped around. */
144
145         /*
146          * If the autosuspend_delay is >= 1 second, align the timer by rounding
147          * up to the nearest second.
148          */
149         expires = last_busy + msecs_to_jiffies(autosuspend_delay);
150         if (autosuspend_delay >= 1000)
151                 expires = round_jiffies(expires);
152         expires += !expires;
153         if (elapsed >= expires - last_busy)
154                 expires = 0;    /* Already expired. */
155
156  out:
157         return expires;
158 }
159 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
160
161 static int dev_memalloc_noio(struct device *dev, void *data)
162 {
163         return dev->power.memalloc_noio;
164 }
165
166 /*
167  * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
168  * @dev: Device to handle.
169  * @enable: True for setting the flag and False for clearing the flag.
170  *
171  * Set the flag for all devices in the path from the device to the
172  * root device in the device tree if @enable is true, otherwise clear
173  * the flag for devices in the path whose siblings don't set the flag.
174  *
175  * The function should only be called by block device, or network
176  * device driver for solving the deadlock problem during runtime
177  * resume/suspend:
178  *
179  *     If memory allocation with GFP_KERNEL is called inside runtime
180  *     resume/suspend callback of any one of its ancestors(or the
181  *     block device itself), the deadlock may be triggered inside the
182  *     memory allocation since it might not complete until the block
183  *     device becomes active and the involed page I/O finishes. The
184  *     situation is pointed out first by Alan Stern. Network device
185  *     are involved in iSCSI kind of situation.
186  *
187  * The lock of dev_hotplug_mutex is held in the function for handling
188  * hotplug race because pm_runtime_set_memalloc_noio() may be called
189  * in async probe().
190  *
191  * The function should be called between device_add() and device_del()
192  * on the affected device(block/network device).
193  */
194 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
195 {
196         static DEFINE_MUTEX(dev_hotplug_mutex);
197
198         mutex_lock(&dev_hotplug_mutex);
199         for (;;) {
200                 bool enabled;
201
202                 /* hold power lock since bitfield is not SMP-safe. */
203                 spin_lock_irq(&dev->power.lock);
204                 enabled = dev->power.memalloc_noio;
205                 dev->power.memalloc_noio = enable;
206                 spin_unlock_irq(&dev->power.lock);
207
208                 /*
209                  * not need to enable ancestors any more if the device
210                  * has been enabled.
211                  */
212                 if (enabled && enable)
213                         break;
214
215                 dev = dev->parent;
216
217                 /*
218                  * clear flag of the parent device only if all the
219                  * children don't set the flag because ancestor's
220                  * flag was set by any one of the descendants.
221                  */
222                 if (!dev || (!enable &&
223                              device_for_each_child(dev, NULL,
224                                                    dev_memalloc_noio)))
225                         break;
226         }
227         mutex_unlock(&dev_hotplug_mutex);
228 }
229 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
230
231 /**
232  * rpm_check_suspend_allowed - Test whether a device may be suspended.
233  * @dev: Device to test.
234  */
235 static int rpm_check_suspend_allowed(struct device *dev)
236 {
237         int retval = 0;
238
239         if (dev->power.runtime_error)
240                 retval = -EINVAL;
241         else if (dev->power.disable_depth > 0)
242                 retval = -EACCES;
243         else if (atomic_read(&dev->power.usage_count) > 0)
244                 retval = -EAGAIN;
245         else if (!pm_children_suspended(dev))
246                 retval = -EBUSY;
247
248         /* Pending resume requests take precedence over suspends. */
249         else if ((dev->power.deferred_resume
250                         && dev->power.runtime_status == RPM_SUSPENDING)
251             || (dev->power.request_pending
252                         && dev->power.request == RPM_REQ_RESUME))
253                 retval = -EAGAIN;
254         else if (__dev_pm_qos_read_value(dev) < 0)
255                 retval = -EPERM;
256         else if (dev->power.runtime_status == RPM_SUSPENDED)
257                 retval = 1;
258
259         return retval;
260 }
261
262 /**
263  * __rpm_callback - Run a given runtime PM callback for a given device.
264  * @cb: Runtime PM callback to run.
265  * @dev: Device to run the callback for.
266  */
267 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
268         __releases(&dev->power.lock) __acquires(&dev->power.lock)
269 {
270         int retval;
271
272         if (dev->power.irq_safe)
273                 spin_unlock(&dev->power.lock);
274         else
275                 spin_unlock_irq(&dev->power.lock);
276
277         retval = cb(dev);
278
279         if (dev->power.irq_safe)
280                 spin_lock(&dev->power.lock);
281         else
282                 spin_lock_irq(&dev->power.lock);
283
284         return retval;
285 }
286
287 /**
288  * rpm_idle - Notify device bus type if the device can be suspended.
289  * @dev: Device to notify the bus type about.
290  * @rpmflags: Flag bits.
291  *
292  * Check if the device's runtime PM status allows it to be suspended.  If
293  * another idle notification has been started earlier, return immediately.  If
294  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
295  * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
296  * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
297  *
298  * This function must be called under dev->power.lock with interrupts disabled.
299  */
300 static int rpm_idle(struct device *dev, int rpmflags)
301 {
302         int (*callback)(struct device *);
303         int retval;
304
305         trace_rpm_idle(dev, rpmflags);
306         retval = rpm_check_suspend_allowed(dev);
307         if (retval < 0)
308                 ;       /* Conditions are wrong. */
309
310         /* Idle notifications are allowed only in the RPM_ACTIVE state. */
311         else if (dev->power.runtime_status != RPM_ACTIVE)
312                 retval = -EAGAIN;
313
314         /*
315          * Any pending request other than an idle notification takes
316          * precedence over us, except that the timer may be running.
317          */
318         else if (dev->power.request_pending &&
319             dev->power.request > RPM_REQ_IDLE)
320                 retval = -EAGAIN;
321
322         /* Act as though RPM_NOWAIT is always set. */
323         else if (dev->power.idle_notification)
324                 retval = -EINPROGRESS;
325         if (retval)
326                 goto out;
327
328         /* Pending requests need to be canceled. */
329         dev->power.request = RPM_REQ_NONE;
330
331         if (dev->power.no_callbacks)
332                 goto out;
333
334         /* Carry out an asynchronous or a synchronous idle notification. */
335         if (rpmflags & RPM_ASYNC) {
336                 dev->power.request = RPM_REQ_IDLE;
337                 if (!dev->power.request_pending) {
338                         dev->power.request_pending = true;
339                         queue_work(pm_wq, &dev->power.work);
340                 }
341                 trace_rpm_return_int(dev, _THIS_IP_, 0);
342                 return 0;
343         }
344
345         dev->power.idle_notification = true;
346
347         callback = RPM_GET_CALLBACK(dev, runtime_idle);
348
349         if (callback)
350                 retval = __rpm_callback(callback, dev);
351
352         dev->power.idle_notification = false;
353         wake_up_all(&dev->power.wait_queue);
354
355  out:
356         trace_rpm_return_int(dev, _THIS_IP_, retval);
357         return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
358 }
359
360 /**
361  * rpm_callback - Run a given runtime PM callback for a given device.
362  * @cb: Runtime PM callback to run.
363  * @dev: Device to run the callback for.
364  */
365 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
366 {
367         int retval;
368
369         if (!cb)
370                 return -ENOSYS;
371
372         if (dev->power.memalloc_noio) {
373                 unsigned int noio_flag;
374
375                 /*
376                  * Deadlock might be caused if memory allocation with
377                  * GFP_KERNEL happens inside runtime_suspend and
378                  * runtime_resume callbacks of one block device's
379                  * ancestor or the block device itself. Network
380                  * device might be thought as part of iSCSI block
381                  * device, so network device and its ancestor should
382                  * be marked as memalloc_noio too.
383                  */
384                 noio_flag = memalloc_noio_save();
385                 retval = __rpm_callback(cb, dev);
386                 memalloc_noio_restore(noio_flag);
387         } else {
388                 retval = __rpm_callback(cb, dev);
389         }
390
391         dev->power.runtime_error = retval;
392         return retval != -EACCES ? retval : -EIO;
393 }
394
395 /**
396  * rpm_suspend - Carry out runtime suspend of given device.
397  * @dev: Device to suspend.
398  * @rpmflags: Flag bits.
399  *
400  * Check if the device's runtime PM status allows it to be suspended.
401  * Cancel a pending idle notification, autosuspend or suspend. If
402  * another suspend has been started earlier, either return immediately
403  * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
404  * flags. If the RPM_ASYNC flag is set then queue a suspend request;
405  * otherwise run the ->runtime_suspend() callback directly. When
406  * ->runtime_suspend succeeded, if a deferred resume was requested while
407  * the callback was running then carry it out, otherwise send an idle
408  * notification for its parent (if the suspend succeeded and both
409  * ignore_children of parent->power and irq_safe of dev->power are not set).
410  * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
411  * flag is set and the next autosuspend-delay expiration time is in the
412  * future, schedule another autosuspend attempt.
413  *
414  * This function must be called under dev->power.lock with interrupts disabled.
415  */
416 static int rpm_suspend(struct device *dev, int rpmflags)
417         __releases(&dev->power.lock) __acquires(&dev->power.lock)
418 {
419         int (*callback)(struct device *);
420         struct device *parent = NULL;
421         int retval;
422
423         trace_rpm_suspend(dev, rpmflags);
424
425  repeat:
426         retval = rpm_check_suspend_allowed(dev);
427
428         if (retval < 0)
429                 ;       /* Conditions are wrong. */
430
431         /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
432         else if (dev->power.runtime_status == RPM_RESUMING &&
433             !(rpmflags & RPM_ASYNC))
434                 retval = -EAGAIN;
435         if (retval)
436                 goto out;
437
438         /* If the autosuspend_delay time hasn't expired yet, reschedule. */
439         if ((rpmflags & RPM_AUTO)
440             && dev->power.runtime_status != RPM_SUSPENDING) {
441                 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
442
443                 if (expires != 0) {
444                         /* Pending requests need to be canceled. */
445                         dev->power.request = RPM_REQ_NONE;
446
447                         /*
448                          * Optimization: If the timer is already running and is
449                          * set to expire at or before the autosuspend delay,
450                          * avoid the overhead of resetting it.  Just let it
451                          * expire; pm_suspend_timer_fn() will take care of the
452                          * rest.
453                          */
454                         if (!(dev->power.timer_expires && time_before_eq(
455                             dev->power.timer_expires, expires))) {
456                                 dev->power.timer_expires = expires;
457                                 mod_timer(&dev->power.suspend_timer, expires);
458                         }
459                         dev->power.timer_autosuspends = 1;
460                         goto out;
461                 }
462         }
463
464         /* Other scheduled or pending requests need to be canceled. */
465         pm_runtime_cancel_pending(dev);
466
467         if (dev->power.runtime_status == RPM_SUSPENDING) {
468                 DEFINE_WAIT(wait);
469
470                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
471                         retval = -EINPROGRESS;
472                         goto out;
473                 }
474
475                 if (dev->power.irq_safe) {
476                         spin_unlock(&dev->power.lock);
477
478                         cpu_relax();
479
480                         spin_lock(&dev->power.lock);
481                         goto repeat;
482                 }
483
484                 /* Wait for the other suspend running in parallel with us. */
485                 for (;;) {
486                         prepare_to_wait(&dev->power.wait_queue, &wait,
487                                         TASK_UNINTERRUPTIBLE);
488                         if (dev->power.runtime_status != RPM_SUSPENDING)
489                                 break;
490
491                         spin_unlock_irq(&dev->power.lock);
492
493                         schedule();
494
495                         spin_lock_irq(&dev->power.lock);
496                 }
497                 finish_wait(&dev->power.wait_queue, &wait);
498                 goto repeat;
499         }
500
501         if (dev->power.no_callbacks)
502                 goto no_callback;       /* Assume success. */
503
504         /* Carry out an asynchronous or a synchronous suspend. */
505         if (rpmflags & RPM_ASYNC) {
506                 dev->power.request = (rpmflags & RPM_AUTO) ?
507                     RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
508                 if (!dev->power.request_pending) {
509                         dev->power.request_pending = true;
510                         queue_work(pm_wq, &dev->power.work);
511                 }
512                 goto out;
513         }
514
515         __update_runtime_status(dev, RPM_SUSPENDING);
516
517         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
518
519         retval = rpm_callback(callback, dev);
520         if (retval)
521                 goto fail;
522
523  no_callback:
524         __update_runtime_status(dev, RPM_SUSPENDED);
525         pm_runtime_deactivate_timer(dev);
526
527         if (dev->parent) {
528                 parent = dev->parent;
529                 atomic_add_unless(&parent->power.child_count, -1, 0);
530         }
531         wake_up_all(&dev->power.wait_queue);
532
533         if (dev->power.deferred_resume) {
534                 dev->power.deferred_resume = false;
535                 rpm_resume(dev, 0);
536                 retval = -EAGAIN;
537                 goto out;
538         }
539
540         /* Maybe the parent is now able to suspend. */
541         if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
542                 spin_unlock(&dev->power.lock);
543
544                 spin_lock(&parent->power.lock);
545                 rpm_idle(parent, RPM_ASYNC);
546                 spin_unlock(&parent->power.lock);
547
548                 spin_lock(&dev->power.lock);
549         }
550
551  out:
552         trace_rpm_return_int(dev, _THIS_IP_, retval);
553
554         return retval;
555
556  fail:
557         __update_runtime_status(dev, RPM_ACTIVE);
558         dev->power.deferred_resume = false;
559         wake_up_all(&dev->power.wait_queue);
560
561         if (retval == -EAGAIN || retval == -EBUSY) {
562                 dev->power.runtime_error = 0;
563
564                 /*
565                  * If the callback routine failed an autosuspend, and
566                  * if the last_busy time has been updated so that there
567                  * is a new autosuspend expiration time, automatically
568                  * reschedule another autosuspend.
569                  */
570                 if ((rpmflags & RPM_AUTO) &&
571                     pm_runtime_autosuspend_expiration(dev) != 0)
572                         goto repeat;
573         } else {
574                 pm_runtime_cancel_pending(dev);
575         }
576         goto out;
577 }
578
579 /**
580  * rpm_resume - Carry out runtime resume of given device.
581  * @dev: Device to resume.
582  * @rpmflags: Flag bits.
583  *
584  * Check if the device's runtime PM status allows it to be resumed.  Cancel
585  * any scheduled or pending requests.  If another resume has been started
586  * earlier, either return immediately or wait for it to finish, depending on the
587  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
588  * parallel with this function, either tell the other process to resume after
589  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
590  * flag is set then queue a resume request; otherwise run the
591  * ->runtime_resume() callback directly.  Queue an idle notification for the
592  * device if the resume succeeded.
593  *
594  * This function must be called under dev->power.lock with interrupts disabled.
595  */
596 static int rpm_resume(struct device *dev, int rpmflags)
597         __releases(&dev->power.lock) __acquires(&dev->power.lock)
598 {
599         int (*callback)(struct device *);
600         struct device *parent = NULL;
601         int retval = 0;
602
603         trace_rpm_resume(dev, rpmflags);
604
605  repeat:
606         if (dev->power.runtime_error)
607                 retval = -EINVAL;
608         else if (dev->power.disable_depth == 1 && dev->power.is_suspended
609             && dev->power.runtime_status == RPM_ACTIVE)
610                 retval = 1;
611         else if (dev->power.disable_depth > 0)
612                 retval = -EACCES;
613         if (retval)
614                 goto out;
615
616         /*
617          * Other scheduled or pending requests need to be canceled.  Small
618          * optimization: If an autosuspend timer is running, leave it running
619          * rather than cancelling it now only to restart it again in the near
620          * future.
621          */
622         dev->power.request = RPM_REQ_NONE;
623         if (!dev->power.timer_autosuspends)
624                 pm_runtime_deactivate_timer(dev);
625
626         if (dev->power.runtime_status == RPM_ACTIVE) {
627                 retval = 1;
628                 goto out;
629         }
630
631         if (dev->power.runtime_status == RPM_RESUMING
632             || dev->power.runtime_status == RPM_SUSPENDING) {
633                 DEFINE_WAIT(wait);
634
635                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
636                         if (dev->power.runtime_status == RPM_SUSPENDING)
637                                 dev->power.deferred_resume = true;
638                         else
639                                 retval = -EINPROGRESS;
640                         goto out;
641                 }
642
643                 if (dev->power.irq_safe) {
644                         spin_unlock(&dev->power.lock);
645
646                         cpu_relax();
647
648                         spin_lock(&dev->power.lock);
649                         goto repeat;
650                 }
651
652                 /* Wait for the operation carried out in parallel with us. */
653                 for (;;) {
654                         prepare_to_wait(&dev->power.wait_queue, &wait,
655                                         TASK_UNINTERRUPTIBLE);
656                         if (dev->power.runtime_status != RPM_RESUMING
657                             && dev->power.runtime_status != RPM_SUSPENDING)
658                                 break;
659
660                         spin_unlock_irq(&dev->power.lock);
661
662                         schedule();
663
664                         spin_lock_irq(&dev->power.lock);
665                 }
666                 finish_wait(&dev->power.wait_queue, &wait);
667                 goto repeat;
668         }
669
670         /*
671          * See if we can skip waking up the parent.  This is safe only if
672          * power.no_callbacks is set, because otherwise we don't know whether
673          * the resume will actually succeed.
674          */
675         if (dev->power.no_callbacks && !parent && dev->parent) {
676                 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
677                 if (dev->parent->power.disable_depth > 0
678                     || dev->parent->power.ignore_children
679                     || dev->parent->power.runtime_status == RPM_ACTIVE) {
680                         atomic_inc(&dev->parent->power.child_count);
681                         spin_unlock(&dev->parent->power.lock);
682                         retval = 1;
683                         goto no_callback;       /* Assume success. */
684                 }
685                 spin_unlock(&dev->parent->power.lock);
686         }
687
688         /* Carry out an asynchronous or a synchronous resume. */
689         if (rpmflags & RPM_ASYNC) {
690                 dev->power.request = RPM_REQ_RESUME;
691                 if (!dev->power.request_pending) {
692                         dev->power.request_pending = true;
693                         queue_work(pm_wq, &dev->power.work);
694                 }
695                 retval = 0;
696                 goto out;
697         }
698
699         if (!parent && dev->parent) {
700                 /*
701                  * Increment the parent's usage counter and resume it if
702                  * necessary.  Not needed if dev is irq-safe; then the
703                  * parent is permanently resumed.
704                  */
705                 parent = dev->parent;
706                 if (dev->power.irq_safe)
707                         goto skip_parent;
708                 spin_unlock(&dev->power.lock);
709
710                 pm_runtime_get_noresume(parent);
711
712                 spin_lock(&parent->power.lock);
713                 /*
714                  * We can resume if the parent's runtime PM is disabled or it
715                  * is set to ignore children.
716                  */
717                 if (!parent->power.disable_depth
718                     && !parent->power.ignore_children) {
719                         rpm_resume(parent, 0);
720                         if (parent->power.runtime_status != RPM_ACTIVE)
721                                 retval = -EBUSY;
722                 }
723                 spin_unlock(&parent->power.lock);
724
725                 spin_lock(&dev->power.lock);
726                 if (retval)
727                         goto out;
728                 goto repeat;
729         }
730  skip_parent:
731
732         if (dev->power.no_callbacks)
733                 goto no_callback;       /* Assume success. */
734
735         __update_runtime_status(dev, RPM_RESUMING);
736
737         callback = RPM_GET_CALLBACK(dev, runtime_resume);
738
739         retval = rpm_callback(callback, dev);
740         if (retval) {
741                 __update_runtime_status(dev, RPM_SUSPENDED);
742                 pm_runtime_cancel_pending(dev);
743         } else {
744  no_callback:
745                 __update_runtime_status(dev, RPM_ACTIVE);
746                 if (parent)
747                         atomic_inc(&parent->power.child_count);
748         }
749         wake_up_all(&dev->power.wait_queue);
750
751         if (retval >= 0)
752                 rpm_idle(dev, RPM_ASYNC);
753
754  out:
755         if (parent && !dev->power.irq_safe) {
756                 spin_unlock_irq(&dev->power.lock);
757
758                 pm_runtime_put(parent);
759
760                 spin_lock_irq(&dev->power.lock);
761         }
762
763         trace_rpm_return_int(dev, _THIS_IP_, retval);
764
765         return retval;
766 }
767
768 /**
769  * pm_runtime_work - Universal runtime PM work function.
770  * @work: Work structure used for scheduling the execution of this function.
771  *
772  * Use @work to get the device object the work is to be done for, determine what
773  * is to be done and execute the appropriate runtime PM function.
774  */
775 static void pm_runtime_work(struct work_struct *work)
776 {
777         struct device *dev = container_of(work, struct device, power.work);
778         enum rpm_request req;
779
780         spin_lock_irq(&dev->power.lock);
781
782         if (!dev->power.request_pending)
783                 goto out;
784
785         req = dev->power.request;
786         dev->power.request = RPM_REQ_NONE;
787         dev->power.request_pending = false;
788
789         switch (req) {
790         case RPM_REQ_NONE:
791                 break;
792         case RPM_REQ_IDLE:
793                 rpm_idle(dev, RPM_NOWAIT);
794                 break;
795         case RPM_REQ_SUSPEND:
796                 rpm_suspend(dev, RPM_NOWAIT);
797                 break;
798         case RPM_REQ_AUTOSUSPEND:
799                 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
800                 break;
801         case RPM_REQ_RESUME:
802                 rpm_resume(dev, RPM_NOWAIT);
803                 break;
804         }
805
806  out:
807         spin_unlock_irq(&dev->power.lock);
808 }
809
810 /**
811  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
812  * @data: Device pointer passed by pm_schedule_suspend().
813  *
814  * Check if the time is right and queue a suspend request.
815  */
816 static void pm_suspend_timer_fn(unsigned long data)
817 {
818         struct device *dev = (struct device *)data;
819         unsigned long flags;
820         unsigned long expires;
821
822         spin_lock_irqsave(&dev->power.lock, flags);
823
824         expires = dev->power.timer_expires;
825         /* If 'expire' is after 'jiffies' we've been called too early. */
826         if (expires > 0 && !time_after(expires, jiffies)) {
827                 dev->power.timer_expires = 0;
828                 rpm_suspend(dev, dev->power.timer_autosuspends ?
829                     (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
830         }
831
832         spin_unlock_irqrestore(&dev->power.lock, flags);
833 }
834
835 /**
836  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
837  * @dev: Device to suspend.
838  * @delay: Time to wait before submitting a suspend request, in milliseconds.
839  */
840 int pm_schedule_suspend(struct device *dev, unsigned int delay)
841 {
842         unsigned long flags;
843         int retval;
844
845         spin_lock_irqsave(&dev->power.lock, flags);
846
847         if (!delay) {
848                 retval = rpm_suspend(dev, RPM_ASYNC);
849                 goto out;
850         }
851
852         retval = rpm_check_suspend_allowed(dev);
853         if (retval)
854                 goto out;
855
856         /* Other scheduled or pending requests need to be canceled. */
857         pm_runtime_cancel_pending(dev);
858
859         dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
860         dev->power.timer_expires += !dev->power.timer_expires;
861         dev->power.timer_autosuspends = 0;
862         mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
863
864  out:
865         spin_unlock_irqrestore(&dev->power.lock, flags);
866
867         return retval;
868 }
869 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
870
871 /**
872  * __pm_runtime_idle - Entry point for runtime idle operations.
873  * @dev: Device to send idle notification for.
874  * @rpmflags: Flag bits.
875  *
876  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
877  * return immediately if it is larger than zero.  Then carry out an idle
878  * notification, either synchronous or asynchronous.
879  *
880  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
881  * or if pm_runtime_irq_safe() has been called.
882  */
883 int __pm_runtime_idle(struct device *dev, int rpmflags)
884 {
885         unsigned long flags;
886         int retval;
887
888         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
889
890         if (rpmflags & RPM_GET_PUT) {
891                 if (!atomic_dec_and_test(&dev->power.usage_count))
892                         return 0;
893         }
894
895         spin_lock_irqsave(&dev->power.lock, flags);
896         retval = rpm_idle(dev, rpmflags);
897         spin_unlock_irqrestore(&dev->power.lock, flags);
898
899         return retval;
900 }
901 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
902
903 /**
904  * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
905  * @dev: Device to suspend.
906  * @rpmflags: Flag bits.
907  *
908  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
909  * return immediately if it is larger than zero.  Then carry out a suspend,
910  * either synchronous or asynchronous.
911  *
912  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
913  * or if pm_runtime_irq_safe() has been called.
914  */
915 int __pm_runtime_suspend(struct device *dev, int rpmflags)
916 {
917         unsigned long flags;
918         int retval;
919
920         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
921
922         if (rpmflags & RPM_GET_PUT) {
923                 if (!atomic_dec_and_test(&dev->power.usage_count))
924                         return 0;
925         }
926
927         spin_lock_irqsave(&dev->power.lock, flags);
928         retval = rpm_suspend(dev, rpmflags);
929         spin_unlock_irqrestore(&dev->power.lock, flags);
930
931         return retval;
932 }
933 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
934
935 /**
936  * __pm_runtime_resume - Entry point for runtime resume operations.
937  * @dev: Device to resume.
938  * @rpmflags: Flag bits.
939  *
940  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
941  * carry out a resume, either synchronous or asynchronous.
942  *
943  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
944  * or if pm_runtime_irq_safe() has been called.
945  */
946 int __pm_runtime_resume(struct device *dev, int rpmflags)
947 {
948         unsigned long flags;
949         int retval;
950
951         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
952
953         if (rpmflags & RPM_GET_PUT)
954                 atomic_inc(&dev->power.usage_count);
955
956         spin_lock_irqsave(&dev->power.lock, flags);
957         retval = rpm_resume(dev, rpmflags);
958         spin_unlock_irqrestore(&dev->power.lock, flags);
959
960         return retval;
961 }
962 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
963
964 /**
965  * __pm_runtime_set_status - Set runtime PM status of a device.
966  * @dev: Device to handle.
967  * @status: New runtime PM status of the device.
968  *
969  * If runtime PM of the device is disabled or its power.runtime_error field is
970  * different from zero, the status may be changed either to RPM_ACTIVE, or to
971  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
972  * However, if the device has a parent and the parent is not active, and the
973  * parent's power.ignore_children flag is unset, the device's status cannot be
974  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
975  *
976  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
977  * and the device parent's counter of unsuspended children is modified to
978  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
979  * notification request for the parent is submitted.
980  */
981 int __pm_runtime_set_status(struct device *dev, unsigned int status)
982 {
983         struct device *parent = dev->parent;
984         unsigned long flags;
985         bool notify_parent = false;
986         int error = 0;
987
988         if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
989                 return -EINVAL;
990
991         spin_lock_irqsave(&dev->power.lock, flags);
992
993         if (!dev->power.runtime_error && !dev->power.disable_depth) {
994                 error = -EAGAIN;
995                 goto out;
996         }
997
998         if (dev->power.runtime_status == status)
999                 goto out_set;
1000
1001         if (status == RPM_SUSPENDED) {
1002                 /* It always is possible to set the status to 'suspended'. */
1003                 if (parent) {
1004                         atomic_add_unless(&parent->power.child_count, -1, 0);
1005                         notify_parent = !parent->power.ignore_children;
1006                 }
1007                 goto out_set;
1008         }
1009
1010         if (parent) {
1011                 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1012
1013                 /*
1014                  * It is invalid to put an active child under a parent that is
1015                  * not active, has runtime PM enabled and the
1016                  * 'power.ignore_children' flag unset.
1017                  */
1018                 if (!parent->power.disable_depth
1019                     && !parent->power.ignore_children
1020                     && parent->power.runtime_status != RPM_ACTIVE)
1021                         error = -EBUSY;
1022                 else if (dev->power.runtime_status == RPM_SUSPENDED)
1023                         atomic_inc(&parent->power.child_count);
1024
1025                 spin_unlock(&parent->power.lock);
1026
1027                 if (error)
1028                         goto out;
1029         }
1030
1031  out_set:
1032         __update_runtime_status(dev, status);
1033         dev->power.runtime_error = 0;
1034  out:
1035         spin_unlock_irqrestore(&dev->power.lock, flags);
1036
1037         if (notify_parent)
1038                 pm_request_idle(parent);
1039
1040         return error;
1041 }
1042 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1043
1044 /**
1045  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1046  * @dev: Device to handle.
1047  *
1048  * Flush all pending requests for the device from pm_wq and wait for all
1049  * runtime PM operations involving the device in progress to complete.
1050  *
1051  * Should be called under dev->power.lock with interrupts disabled.
1052  */
1053 static void __pm_runtime_barrier(struct device *dev)
1054 {
1055         pm_runtime_deactivate_timer(dev);
1056
1057         if (dev->power.request_pending) {
1058                 dev->power.request = RPM_REQ_NONE;
1059                 spin_unlock_irq(&dev->power.lock);
1060
1061                 cancel_work_sync(&dev->power.work);
1062
1063                 spin_lock_irq(&dev->power.lock);
1064                 dev->power.request_pending = false;
1065         }
1066
1067         if (dev->power.runtime_status == RPM_SUSPENDING
1068             || dev->power.runtime_status == RPM_RESUMING
1069             || dev->power.idle_notification) {
1070                 DEFINE_WAIT(wait);
1071
1072                 /* Suspend, wake-up or idle notification in progress. */
1073                 for (;;) {
1074                         prepare_to_wait(&dev->power.wait_queue, &wait,
1075                                         TASK_UNINTERRUPTIBLE);
1076                         if (dev->power.runtime_status != RPM_SUSPENDING
1077                             && dev->power.runtime_status != RPM_RESUMING
1078                             && !dev->power.idle_notification)
1079                                 break;
1080                         spin_unlock_irq(&dev->power.lock);
1081
1082                         schedule();
1083
1084                         spin_lock_irq(&dev->power.lock);
1085                 }
1086                 finish_wait(&dev->power.wait_queue, &wait);
1087         }
1088 }
1089
1090 /**
1091  * pm_runtime_barrier - Flush pending requests and wait for completions.
1092  * @dev: Device to handle.
1093  *
1094  * Prevent the device from being suspended by incrementing its usage counter and
1095  * if there's a pending resume request for the device, wake the device up.
1096  * Next, make sure that all pending requests for the device have been flushed
1097  * from pm_wq and wait for all runtime PM operations involving the device in
1098  * progress to complete.
1099  *
1100  * Return value:
1101  * 1, if there was a resume request pending and the device had to be woken up,
1102  * 0, otherwise
1103  */
1104 int pm_runtime_barrier(struct device *dev)
1105 {
1106         int retval = 0;
1107
1108         pm_runtime_get_noresume(dev);
1109         spin_lock_irq(&dev->power.lock);
1110
1111         if (dev->power.request_pending
1112             && dev->power.request == RPM_REQ_RESUME) {
1113                 rpm_resume(dev, 0);
1114                 retval = 1;
1115         }
1116
1117         __pm_runtime_barrier(dev);
1118
1119         spin_unlock_irq(&dev->power.lock);
1120         pm_runtime_put_noidle(dev);
1121
1122         return retval;
1123 }
1124 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1125
1126 /**
1127  * __pm_runtime_disable - Disable runtime PM of a device.
1128  * @dev: Device to handle.
1129  * @check_resume: If set, check if there's a resume request for the device.
1130  *
1131  * Increment power.disable_depth for the device and if it was zero previously,
1132  * cancel all pending runtime PM requests for the device and wait for all
1133  * operations in progress to complete.  The device can be either active or
1134  * suspended after its runtime PM has been disabled.
1135  *
1136  * If @check_resume is set and there's a resume request pending when
1137  * __pm_runtime_disable() is called and power.disable_depth is zero, the
1138  * function will wake up the device before disabling its runtime PM.
1139  */
1140 void __pm_runtime_disable(struct device *dev, bool check_resume)
1141 {
1142         spin_lock_irq(&dev->power.lock);
1143
1144         if (dev->power.disable_depth > 0) {
1145                 dev->power.disable_depth++;
1146                 goto out;
1147         }
1148
1149         /*
1150          * Wake up the device if there's a resume request pending, because that
1151          * means there probably is some I/O to process and disabling runtime PM
1152          * shouldn't prevent the device from processing the I/O.
1153          */
1154         if (check_resume && dev->power.request_pending
1155             && dev->power.request == RPM_REQ_RESUME) {
1156                 /*
1157                  * Prevent suspends and idle notifications from being carried
1158                  * out after we have woken up the device.
1159                  */
1160                 pm_runtime_get_noresume(dev);
1161
1162                 rpm_resume(dev, 0);
1163
1164                 pm_runtime_put_noidle(dev);
1165         }
1166
1167         if (!dev->power.disable_depth++)
1168                 __pm_runtime_barrier(dev);
1169
1170  out:
1171         spin_unlock_irq(&dev->power.lock);
1172 }
1173 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1174
1175 /**
1176  * pm_runtime_enable - Enable runtime PM of a device.
1177  * @dev: Device to handle.
1178  */
1179 void pm_runtime_enable(struct device *dev)
1180 {
1181         unsigned long flags;
1182
1183         spin_lock_irqsave(&dev->power.lock, flags);
1184
1185         if (dev->power.disable_depth > 0)
1186                 dev->power.disable_depth--;
1187         else
1188                 dev_warn(dev, "Unbalanced %s!\n", __func__);
1189
1190         spin_unlock_irqrestore(&dev->power.lock, flags);
1191 }
1192 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1193
1194 /**
1195  * pm_runtime_forbid - Block runtime PM of a device.
1196  * @dev: Device to handle.
1197  *
1198  * Increase the device's usage count and clear its power.runtime_auto flag,
1199  * so that it cannot be suspended at run time until pm_runtime_allow() is called
1200  * for it.
1201  */
1202 void pm_runtime_forbid(struct device *dev)
1203 {
1204         spin_lock_irq(&dev->power.lock);
1205         if (!dev->power.runtime_auto)
1206                 goto out;
1207
1208         dev->power.runtime_auto = false;
1209         atomic_inc(&dev->power.usage_count);
1210         rpm_resume(dev, 0);
1211
1212  out:
1213         spin_unlock_irq(&dev->power.lock);
1214 }
1215 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1216
1217 /**
1218  * pm_runtime_allow - Unblock runtime PM of a device.
1219  * @dev: Device to handle.
1220  *
1221  * Decrease the device's usage count and set its power.runtime_auto flag.
1222  */
1223 void pm_runtime_allow(struct device *dev)
1224 {
1225         spin_lock_irq(&dev->power.lock);
1226         if (dev->power.runtime_auto)
1227                 goto out;
1228
1229         dev->power.runtime_auto = true;
1230         if (atomic_dec_and_test(&dev->power.usage_count))
1231                 rpm_idle(dev, RPM_AUTO);
1232
1233  out:
1234         spin_unlock_irq(&dev->power.lock);
1235 }
1236 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1237
1238 /**
1239  * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1240  * @dev: Device to handle.
1241  *
1242  * Set the power.no_callbacks flag, which tells the PM core that this
1243  * device is power-managed through its parent and has no runtime PM
1244  * callbacks of its own.  The runtime sysfs attributes will be removed.
1245  */
1246 void pm_runtime_no_callbacks(struct device *dev)
1247 {
1248         spin_lock_irq(&dev->power.lock);
1249         dev->power.no_callbacks = 1;
1250         spin_unlock_irq(&dev->power.lock);
1251         if (device_is_registered(dev))
1252                 rpm_sysfs_remove(dev);
1253 }
1254 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1255
1256 /**
1257  * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1258  * @dev: Device to handle
1259  *
1260  * Set the power.irq_safe flag, which tells the PM core that the
1261  * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1262  * always be invoked with the spinlock held and interrupts disabled.  It also
1263  * causes the parent's usage counter to be permanently incremented, preventing
1264  * the parent from runtime suspending -- otherwise an irq-safe child might have
1265  * to wait for a non-irq-safe parent.
1266  */
1267 void pm_runtime_irq_safe(struct device *dev)
1268 {
1269         if (dev->parent)
1270                 pm_runtime_get_sync(dev->parent);
1271         spin_lock_irq(&dev->power.lock);
1272         dev->power.irq_safe = 1;
1273         spin_unlock_irq(&dev->power.lock);
1274 }
1275 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1276
1277 /**
1278  * update_autosuspend - Handle a change to a device's autosuspend settings.
1279  * @dev: Device to handle.
1280  * @old_delay: The former autosuspend_delay value.
1281  * @old_use: The former use_autosuspend value.
1282  *
1283  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1284  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1285  *
1286  * This function must be called under dev->power.lock with interrupts disabled.
1287  */
1288 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1289 {
1290         int delay = dev->power.autosuspend_delay;
1291
1292         /* Should runtime suspend be prevented now? */
1293         if (dev->power.use_autosuspend && delay < 0) {
1294
1295                 /* If it used to be allowed then prevent it. */
1296                 if (!old_use || old_delay >= 0) {
1297                         atomic_inc(&dev->power.usage_count);
1298                         rpm_resume(dev, 0);
1299                 }
1300         }
1301
1302         /* Runtime suspend should be allowed now. */
1303         else {
1304
1305                 /* If it used to be prevented then allow it. */
1306                 if (old_use && old_delay < 0)
1307                         atomic_dec(&dev->power.usage_count);
1308
1309                 /* Maybe we can autosuspend now. */
1310                 rpm_idle(dev, RPM_AUTO);
1311         }
1312 }
1313
1314 /**
1315  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1316  * @dev: Device to handle.
1317  * @delay: Value of the new delay in milliseconds.
1318  *
1319  * Set the device's power.autosuspend_delay value.  If it changes to negative
1320  * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1321  * changes the other way, allow runtime suspends.
1322  */
1323 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1324 {
1325         int old_delay, old_use;
1326
1327         spin_lock_irq(&dev->power.lock);
1328         old_delay = dev->power.autosuspend_delay;
1329         old_use = dev->power.use_autosuspend;
1330         dev->power.autosuspend_delay = delay;
1331         update_autosuspend(dev, old_delay, old_use);
1332         spin_unlock_irq(&dev->power.lock);
1333 }
1334 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1335
1336 /**
1337  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1338  * @dev: Device to handle.
1339  * @use: New value for use_autosuspend.
1340  *
1341  * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1342  * suspends as needed.
1343  */
1344 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1345 {
1346         int old_delay, old_use;
1347
1348         spin_lock_irq(&dev->power.lock);
1349         old_delay = dev->power.autosuspend_delay;
1350         old_use = dev->power.use_autosuspend;
1351         dev->power.use_autosuspend = use;
1352         update_autosuspend(dev, old_delay, old_use);
1353         spin_unlock_irq(&dev->power.lock);
1354 }
1355 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1356
1357 /**
1358  * pm_runtime_init - Initialize runtime PM fields in given device object.
1359  * @dev: Device object to initialize.
1360  */
1361 void pm_runtime_init(struct device *dev)
1362 {
1363         dev->power.runtime_status = RPM_SUSPENDED;
1364         dev->power.idle_notification = false;
1365
1366         dev->power.disable_depth = 1;
1367         atomic_set(&dev->power.usage_count, 0);
1368
1369         dev->power.runtime_error = 0;
1370
1371         atomic_set(&dev->power.child_count, 0);
1372         pm_suspend_ignore_children(dev, false);
1373         dev->power.runtime_auto = true;
1374
1375         dev->power.request_pending = false;
1376         dev->power.request = RPM_REQ_NONE;
1377         dev->power.deferred_resume = false;
1378         dev->power.accounting_timestamp = jiffies;
1379         INIT_WORK(&dev->power.work, pm_runtime_work);
1380
1381         dev->power.timer_expires = 0;
1382         setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1383                         (unsigned long)dev);
1384
1385         init_waitqueue_head(&dev->power.wait_queue);
1386 }
1387
1388 /**
1389  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1390  * @dev: Device object being removed from device hierarchy.
1391  */
1392 void pm_runtime_remove(struct device *dev)
1393 {
1394         __pm_runtime_disable(dev, false);
1395
1396         /* Change the status back to 'suspended' to match the initial status. */
1397         if (dev->power.runtime_status == RPM_ACTIVE)
1398                 pm_runtime_set_suspended(dev);
1399         if (dev->power.irq_safe && dev->parent)
1400                 pm_runtime_put(dev->parent);
1401 }
1402 #endif
1403
1404 /**
1405  * pm_runtime_force_suspend - Force a device into suspend state if needed.
1406  * @dev: Device to suspend.
1407  *
1408  * Disable runtime PM so we safely can check the device's runtime PM status and
1409  * if it is active, invoke it's .runtime_suspend callback to bring it into
1410  * suspend state. Keep runtime PM disabled to preserve the state unless we
1411  * encounter errors.
1412  *
1413  * Typically this function may be invoked from a system suspend callback to make
1414  * sure the device is put into low power state.
1415  */
1416 int pm_runtime_force_suspend(struct device *dev)
1417 {
1418         int (*callback)(struct device *);
1419         int ret = 0;
1420
1421         pm_runtime_disable(dev);
1422
1423         /*
1424          * Note that pm_runtime_status_suspended() returns false while
1425          * !CONFIG_PM_RUNTIME, which means the device will be put into low
1426          * power state.
1427          */
1428         if (pm_runtime_status_suspended(dev))
1429                 return 0;
1430
1431         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1432
1433         if (!callback) {
1434                 ret = -ENOSYS;
1435                 goto err;
1436         }
1437
1438         ret = callback(dev);
1439         if (ret)
1440                 goto err;
1441
1442         pm_runtime_set_suspended(dev);
1443         return 0;
1444 err:
1445         pm_runtime_enable(dev);
1446         return ret;
1447 }
1448 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1449
1450 /**
1451  * pm_runtime_force_resume - Force a device into resume state.
1452  * @dev: Device to resume.
1453  *
1454  * Prior invoking this function we expect the user to have brought the device
1455  * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1456  * those actions and brings the device into full power. We update the runtime PM
1457  * status and re-enables runtime PM.
1458  *
1459  * Typically this function may be invoked from a system resume callback to make
1460  * sure the device is put into full power state.
1461  */
1462 int pm_runtime_force_resume(struct device *dev)
1463 {
1464         int (*callback)(struct device *);
1465         int ret = 0;
1466
1467         callback = RPM_GET_CALLBACK(dev, runtime_resume);
1468
1469         if (!callback) {
1470                 ret = -ENOSYS;
1471                 goto out;
1472         }
1473
1474         ret = callback(dev);
1475         if (ret)
1476                 goto out;
1477
1478         pm_runtime_set_active(dev);
1479         pm_runtime_mark_last_busy(dev);
1480 out:
1481         pm_runtime_enable(dev);
1482         return ret;
1483 }
1484 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);