]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/base/power/domain.c
PM / domains: Remove dev_irq_safe from genpd config
[karo-tx-linux.git] / drivers / base / power / domain.c
1 /*
2  * drivers/base/power/domain.c - Common code related to device power domains.
3  *
4  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5  *
6  * This file is released under the GPLv2.
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/io.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/pm_domain.h>
13 #include <linux/pm_qos.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sched.h>
17 #include <linux/suspend.h>
18 #include <linux/export.h>
19
20 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)          \
21 ({                                                              \
22         type (*__routine)(struct device *__d);                  \
23         type __ret = (type)0;                                   \
24                                                                 \
25         __routine = genpd->dev_ops.callback;                    \
26         if (__routine) {                                        \
27                 __ret = __routine(dev);                         \
28         }                                                       \
29         __ret;                                                  \
30 })
31
32 #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name)       \
33 ({                                                                              \
34         ktime_t __start = ktime_get();                                          \
35         type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev);         \
36         s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start));           \
37         struct gpd_timing_data *__td = &dev_gpd_data(dev)->td;                  \
38         if (!__retval && __elapsed > __td->field) {                             \
39                 __td->field = __elapsed;                                        \
40                 dev_dbg(dev, name " latency exceeded, new value %lld ns\n",     \
41                         __elapsed);                                             \
42                 genpd->max_off_time_changed = true;                             \
43                 __td->constraint_changed = true;                                \
44         }                                                                       \
45         __retval;                                                               \
46 })
47
48 static LIST_HEAD(gpd_list);
49 static DEFINE_MUTEX(gpd_list_lock);
50
51 static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
52 {
53         struct generic_pm_domain *genpd = NULL, *gpd;
54
55         if (IS_ERR_OR_NULL(domain_name))
56                 return NULL;
57
58         mutex_lock(&gpd_list_lock);
59         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
60                 if (!strcmp(gpd->name, domain_name)) {
61                         genpd = gpd;
62                         break;
63                 }
64         }
65         mutex_unlock(&gpd_list_lock);
66         return genpd;
67 }
68
69 #ifdef CONFIG_PM
70
71 struct generic_pm_domain *dev_to_genpd(struct device *dev)
72 {
73         if (IS_ERR_OR_NULL(dev->pm_domain))
74                 return ERR_PTR(-EINVAL);
75
76         return pd_to_genpd(dev->pm_domain);
77 }
78
79 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
80 {
81         return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
82                                         stop_latency_ns, "stop");
83 }
84
85 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
86 {
87         return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
88                                         start_latency_ns, "start");
89 }
90
91 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
92 {
93         bool ret = false;
94
95         if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
96                 ret = !!atomic_dec_and_test(&genpd->sd_count);
97
98         return ret;
99 }
100
101 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
102 {
103         atomic_inc(&genpd->sd_count);
104         smp_mb__after_atomic_inc();
105 }
106
107 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
108 {
109         DEFINE_WAIT(wait);
110
111         mutex_lock(&genpd->lock);
112         /*
113          * Wait for the domain to transition into either the active,
114          * or the power off state.
115          */
116         for (;;) {
117                 prepare_to_wait(&genpd->status_wait_queue, &wait,
118                                 TASK_UNINTERRUPTIBLE);
119                 if (genpd->status == GPD_STATE_ACTIVE
120                     || genpd->status == GPD_STATE_POWER_OFF)
121                         break;
122                 mutex_unlock(&genpd->lock);
123
124                 schedule();
125
126                 mutex_lock(&genpd->lock);
127         }
128         finish_wait(&genpd->status_wait_queue, &wait);
129 }
130
131 static void genpd_release_lock(struct generic_pm_domain *genpd)
132 {
133         mutex_unlock(&genpd->lock);
134 }
135
136 static void genpd_set_active(struct generic_pm_domain *genpd)
137 {
138         if (genpd->resume_count == 0)
139                 genpd->status = GPD_STATE_ACTIVE;
140 }
141
142 static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
143 {
144         s64 usecs64;
145
146         if (!genpd->cpu_data)
147                 return;
148
149         usecs64 = genpd->power_on_latency_ns;
150         do_div(usecs64, NSEC_PER_USEC);
151         usecs64 += genpd->cpu_data->saved_exit_latency;
152         genpd->cpu_data->idle_state->exit_latency = usecs64;
153 }
154
155 /**
156  * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
157  * @genpd: PM domain to power up.
158  *
159  * Restore power to @genpd and all of its masters so that it is possible to
160  * resume a device belonging to it.
161  */
162 static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
163         __releases(&genpd->lock) __acquires(&genpd->lock)
164 {
165         struct gpd_link *link;
166         DEFINE_WAIT(wait);
167         int ret = 0;
168
169         /* If the domain's master is being waited for, we have to wait too. */
170         for (;;) {
171                 prepare_to_wait(&genpd->status_wait_queue, &wait,
172                                 TASK_UNINTERRUPTIBLE);
173                 if (genpd->status != GPD_STATE_WAIT_MASTER)
174                         break;
175                 mutex_unlock(&genpd->lock);
176
177                 schedule();
178
179                 mutex_lock(&genpd->lock);
180         }
181         finish_wait(&genpd->status_wait_queue, &wait);
182
183         if (genpd->status == GPD_STATE_ACTIVE
184             || (genpd->prepared_count > 0 && genpd->suspend_power_off))
185                 return 0;
186
187         if (genpd->status != GPD_STATE_POWER_OFF) {
188                 genpd_set_active(genpd);
189                 return 0;
190         }
191
192         if (genpd->cpu_data) {
193                 cpuidle_pause_and_lock();
194                 genpd->cpu_data->idle_state->disabled = true;
195                 cpuidle_resume_and_unlock();
196                 goto out;
197         }
198
199         /*
200          * The list is guaranteed not to change while the loop below is being
201          * executed, unless one of the masters' .power_on() callbacks fiddles
202          * with it.
203          */
204         list_for_each_entry(link, &genpd->slave_links, slave_node) {
205                 genpd_sd_counter_inc(link->master);
206                 genpd->status = GPD_STATE_WAIT_MASTER;
207
208                 mutex_unlock(&genpd->lock);
209
210                 ret = pm_genpd_poweron(link->master);
211
212                 mutex_lock(&genpd->lock);
213
214                 /*
215                  * The "wait for parent" status is guaranteed not to change
216                  * while the master is powering on.
217                  */
218                 genpd->status = GPD_STATE_POWER_OFF;
219                 wake_up_all(&genpd->status_wait_queue);
220                 if (ret) {
221                         genpd_sd_counter_dec(link->master);
222                         goto err;
223                 }
224         }
225
226         if (genpd->power_on) {
227                 ktime_t time_start = ktime_get();
228                 s64 elapsed_ns;
229
230                 ret = genpd->power_on(genpd);
231                 if (ret)
232                         goto err;
233
234                 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
235                 if (elapsed_ns > genpd->power_on_latency_ns) {
236                         genpd->power_on_latency_ns = elapsed_ns;
237                         genpd->max_off_time_changed = true;
238                         genpd_recalc_cpu_exit_latency(genpd);
239                         if (genpd->name)
240                                 pr_warning("%s: Power-on latency exceeded, "
241                                         "new value %lld ns\n", genpd->name,
242                                         elapsed_ns);
243                 }
244         }
245
246  out:
247         genpd_set_active(genpd);
248
249         return 0;
250
251  err:
252         list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
253                 genpd_sd_counter_dec(link->master);
254
255         return ret;
256 }
257
258 /**
259  * pm_genpd_poweron - Restore power to a given PM domain and its masters.
260  * @genpd: PM domain to power up.
261  */
262 int pm_genpd_poweron(struct generic_pm_domain *genpd)
263 {
264         int ret;
265
266         mutex_lock(&genpd->lock);
267         ret = __pm_genpd_poweron(genpd);
268         mutex_unlock(&genpd->lock);
269         return ret;
270 }
271
272 /**
273  * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
274  * @domain_name: Name of the PM domain to power up.
275  */
276 int pm_genpd_name_poweron(const char *domain_name)
277 {
278         struct generic_pm_domain *genpd;
279
280         genpd = pm_genpd_lookup_name(domain_name);
281         return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
282 }
283
284 #endif /* CONFIG_PM */
285
286 #ifdef CONFIG_PM_RUNTIME
287
288 static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
289                                      struct device *dev)
290 {
291         return GENPD_DEV_CALLBACK(genpd, int, start, dev);
292 }
293
294 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
295 {
296         return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
297                                         save_state_latency_ns, "state save");
298 }
299
300 static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
301 {
302         return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
303                                         restore_state_latency_ns,
304                                         "state restore");
305 }
306
307 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
308                                      unsigned long val, void *ptr)
309 {
310         struct generic_pm_domain_data *gpd_data;
311         struct device *dev;
312
313         gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
314
315         mutex_lock(&gpd_data->lock);
316         dev = gpd_data->base.dev;
317         if (!dev) {
318                 mutex_unlock(&gpd_data->lock);
319                 return NOTIFY_DONE;
320         }
321         mutex_unlock(&gpd_data->lock);
322
323         for (;;) {
324                 struct generic_pm_domain *genpd;
325                 struct pm_domain_data *pdd;
326
327                 spin_lock_irq(&dev->power.lock);
328
329                 pdd = dev->power.subsys_data ?
330                                 dev->power.subsys_data->domain_data : NULL;
331                 if (pdd && pdd->dev) {
332                         to_gpd_data(pdd)->td.constraint_changed = true;
333                         genpd = dev_to_genpd(dev);
334                 } else {
335                         genpd = ERR_PTR(-ENODATA);
336                 }
337
338                 spin_unlock_irq(&dev->power.lock);
339
340                 if (!IS_ERR(genpd)) {
341                         mutex_lock(&genpd->lock);
342                         genpd->max_off_time_changed = true;
343                         mutex_unlock(&genpd->lock);
344                 }
345
346                 dev = dev->parent;
347                 if (!dev || dev->power.ignore_children)
348                         break;
349         }
350
351         return NOTIFY_DONE;
352 }
353
354 /**
355  * __pm_genpd_save_device - Save the pre-suspend state of a device.
356  * @pdd: Domain data of the device to save the state of.
357  * @genpd: PM domain the device belongs to.
358  */
359 static int __pm_genpd_save_device(struct pm_domain_data *pdd,
360                                   struct generic_pm_domain *genpd)
361         __releases(&genpd->lock) __acquires(&genpd->lock)
362 {
363         struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
364         struct device *dev = pdd->dev;
365         int ret = 0;
366
367         if (gpd_data->need_restore)
368                 return 0;
369
370         mutex_unlock(&genpd->lock);
371
372         genpd_start_dev(genpd, dev);
373         ret = genpd_save_dev(genpd, dev);
374         genpd_stop_dev(genpd, dev);
375
376         mutex_lock(&genpd->lock);
377
378         if (!ret)
379                 gpd_data->need_restore = true;
380
381         return ret;
382 }
383
384 /**
385  * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
386  * @pdd: Domain data of the device to restore the state of.
387  * @genpd: PM domain the device belongs to.
388  */
389 static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
390                                       struct generic_pm_domain *genpd)
391         __releases(&genpd->lock) __acquires(&genpd->lock)
392 {
393         struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
394         struct device *dev = pdd->dev;
395         bool need_restore = gpd_data->need_restore;
396
397         gpd_data->need_restore = false;
398         mutex_unlock(&genpd->lock);
399
400         genpd_start_dev(genpd, dev);
401         if (need_restore)
402                 genpd_restore_dev(genpd, dev);
403
404         mutex_lock(&genpd->lock);
405 }
406
407 /**
408  * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
409  * @genpd: PM domain to check.
410  *
411  * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
412  * a "power off" operation, which means that a "power on" has occured in the
413  * meantime, or if its resume_count field is different from zero, which means
414  * that one of its devices has been resumed in the meantime.
415  */
416 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
417 {
418         return genpd->status == GPD_STATE_WAIT_MASTER
419                 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
420 }
421
422 /**
423  * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
424  * @genpd: PM domait to power off.
425  *
426  * Queue up the execution of pm_genpd_poweroff() unless it's already been done
427  * before.
428  */
429 void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
430 {
431         queue_work(pm_wq, &genpd->power_off_work);
432 }
433
434 /**
435  * pm_genpd_poweroff - Remove power from a given PM domain.
436  * @genpd: PM domain to power down.
437  *
438  * If all of the @genpd's devices have been suspended and all of its subdomains
439  * have been powered down, run the runtime suspend callbacks provided by all of
440  * the @genpd's devices' drivers and remove power from @genpd.
441  */
442 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
443         __releases(&genpd->lock) __acquires(&genpd->lock)
444 {
445         struct pm_domain_data *pdd;
446         struct gpd_link *link;
447         unsigned int not_suspended;
448         int ret = 0;
449
450  start:
451         /*
452          * Do not try to power off the domain in the following situations:
453          * (1) The domain is already in the "power off" state.
454          * (2) The domain is waiting for its master to power up.
455          * (3) One of the domain's devices is being resumed right now.
456          * (4) System suspend is in progress.
457          */
458         if (genpd->status == GPD_STATE_POWER_OFF
459             || genpd->status == GPD_STATE_WAIT_MASTER
460             || genpd->resume_count > 0 || genpd->prepared_count > 0)
461                 return 0;
462
463         if (atomic_read(&genpd->sd_count) > 0)
464                 return -EBUSY;
465
466         not_suspended = 0;
467         list_for_each_entry(pdd, &genpd->dev_list, list_node) {
468                 enum pm_qos_flags_status stat;
469
470                 stat = dev_pm_qos_flags(pdd->dev,
471                                         PM_QOS_FLAG_NO_POWER_OFF
472                                                 | PM_QOS_FLAG_REMOTE_WAKEUP);
473                 if (stat > PM_QOS_FLAGS_NONE)
474                         return -EBUSY;
475
476                 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
477                     || pdd->dev->power.irq_safe))
478                         not_suspended++;
479         }
480
481         if (not_suspended > genpd->in_progress)
482                 return -EBUSY;
483
484         if (genpd->poweroff_task) {
485                 /*
486                  * Another instance of pm_genpd_poweroff() is executing
487                  * callbacks, so tell it to start over and return.
488                  */
489                 genpd->status = GPD_STATE_REPEAT;
490                 return 0;
491         }
492
493         if (genpd->gov && genpd->gov->power_down_ok) {
494                 if (!genpd->gov->power_down_ok(&genpd->domain))
495                         return -EAGAIN;
496         }
497
498         genpd->status = GPD_STATE_BUSY;
499         genpd->poweroff_task = current;
500
501         list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
502                 ret = atomic_read(&genpd->sd_count) == 0 ?
503                         __pm_genpd_save_device(pdd, genpd) : -EBUSY;
504
505                 if (genpd_abort_poweroff(genpd))
506                         goto out;
507
508                 if (ret) {
509                         genpd_set_active(genpd);
510                         goto out;
511                 }
512
513                 if (genpd->status == GPD_STATE_REPEAT) {
514                         genpd->poweroff_task = NULL;
515                         goto start;
516                 }
517         }
518
519         if (genpd->cpu_data) {
520                 /*
521                  * If cpu_data is set, cpuidle should turn the domain off when
522                  * the CPU in it is idle.  In that case we don't decrement the
523                  * subdomain counts of the master domains, so that power is not
524                  * removed from the current domain prematurely as a result of
525                  * cutting off the masters' power.
526                  */
527                 genpd->status = GPD_STATE_POWER_OFF;
528                 cpuidle_pause_and_lock();
529                 genpd->cpu_data->idle_state->disabled = false;
530                 cpuidle_resume_and_unlock();
531                 goto out;
532         }
533
534         if (genpd->power_off) {
535                 ktime_t time_start;
536                 s64 elapsed_ns;
537
538                 if (atomic_read(&genpd->sd_count) > 0) {
539                         ret = -EBUSY;
540                         goto out;
541                 }
542
543                 time_start = ktime_get();
544
545                 /*
546                  * If sd_count > 0 at this point, one of the subdomains hasn't
547                  * managed to call pm_genpd_poweron() for the master yet after
548                  * incrementing it.  In that case pm_genpd_poweron() will wait
549                  * for us to drop the lock, so we can call .power_off() and let
550                  * the pm_genpd_poweron() restore power for us (this shouldn't
551                  * happen very often).
552                  */
553                 ret = genpd->power_off(genpd);
554                 if (ret == -EBUSY) {
555                         genpd_set_active(genpd);
556                         goto out;
557                 }
558
559                 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
560                 if (elapsed_ns > genpd->power_off_latency_ns) {
561                         genpd->power_off_latency_ns = elapsed_ns;
562                         genpd->max_off_time_changed = true;
563                         if (genpd->name)
564                                 pr_warning("%s: Power-off latency exceeded, "
565                                         "new value %lld ns\n", genpd->name,
566                                         elapsed_ns);
567                 }
568         }
569
570         genpd->status = GPD_STATE_POWER_OFF;
571
572         list_for_each_entry(link, &genpd->slave_links, slave_node) {
573                 genpd_sd_counter_dec(link->master);
574                 genpd_queue_power_off_work(link->master);
575         }
576
577  out:
578         genpd->poweroff_task = NULL;
579         wake_up_all(&genpd->status_wait_queue);
580         return ret;
581 }
582
583 /**
584  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
585  * @work: Work structure used for scheduling the execution of this function.
586  */
587 static void genpd_power_off_work_fn(struct work_struct *work)
588 {
589         struct generic_pm_domain *genpd;
590
591         genpd = container_of(work, struct generic_pm_domain, power_off_work);
592
593         genpd_acquire_lock(genpd);
594         pm_genpd_poweroff(genpd);
595         genpd_release_lock(genpd);
596 }
597
598 /**
599  * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
600  * @dev: Device to suspend.
601  *
602  * Carry out a runtime suspend of a device under the assumption that its
603  * pm_domain field points to the domain member of an object of type
604  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
605  */
606 static int pm_genpd_runtime_suspend(struct device *dev)
607 {
608         struct generic_pm_domain *genpd;
609         bool (*stop_ok)(struct device *__dev);
610         int ret;
611
612         dev_dbg(dev, "%s()\n", __func__);
613
614         genpd = dev_to_genpd(dev);
615         if (IS_ERR(genpd))
616                 return -EINVAL;
617
618         stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
619         if (stop_ok && !stop_ok(dev))
620                 return -EBUSY;
621
622         ret = genpd_stop_dev(genpd, dev);
623         if (ret)
624                 return ret;
625
626         /*
627          * If power.irq_safe is set, this routine will be run with interrupts
628          * off, so it can't use mutexes.
629          */
630         if (dev->power.irq_safe)
631                 return 0;
632
633         mutex_lock(&genpd->lock);
634         genpd->in_progress++;
635         pm_genpd_poweroff(genpd);
636         genpd->in_progress--;
637         mutex_unlock(&genpd->lock);
638
639         return 0;
640 }
641
642 /**
643  * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
644  * @dev: Device to resume.
645  *
646  * Carry out a runtime resume of a device under the assumption that its
647  * pm_domain field points to the domain member of an object of type
648  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
649  */
650 static int pm_genpd_runtime_resume(struct device *dev)
651 {
652         struct generic_pm_domain *genpd;
653         DEFINE_WAIT(wait);
654         int ret;
655
656         dev_dbg(dev, "%s()\n", __func__);
657
658         genpd = dev_to_genpd(dev);
659         if (IS_ERR(genpd))
660                 return -EINVAL;
661
662         /* If power.irq_safe, the PM domain is never powered off. */
663         if (dev->power.irq_safe)
664                 return genpd_start_dev_no_timing(genpd, dev);
665
666         mutex_lock(&genpd->lock);
667         ret = __pm_genpd_poweron(genpd);
668         if (ret) {
669                 mutex_unlock(&genpd->lock);
670                 return ret;
671         }
672         genpd->status = GPD_STATE_BUSY;
673         genpd->resume_count++;
674         for (;;) {
675                 prepare_to_wait(&genpd->status_wait_queue, &wait,
676                                 TASK_UNINTERRUPTIBLE);
677                 /*
678                  * If current is the powering off task, we have been called
679                  * reentrantly from one of the device callbacks, so we should
680                  * not wait.
681                  */
682                 if (!genpd->poweroff_task || genpd->poweroff_task == current)
683                         break;
684                 mutex_unlock(&genpd->lock);
685
686                 schedule();
687
688                 mutex_lock(&genpd->lock);
689         }
690         finish_wait(&genpd->status_wait_queue, &wait);
691         __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
692         genpd->resume_count--;
693         genpd_set_active(genpd);
694         wake_up_all(&genpd->status_wait_queue);
695         mutex_unlock(&genpd->lock);
696
697         return 0;
698 }
699
700 static bool pd_ignore_unused;
701 static int __init pd_ignore_unused_setup(char *__unused)
702 {
703         pd_ignore_unused = true;
704         return 1;
705 }
706 __setup("pd_ignore_unused", pd_ignore_unused_setup);
707
708 /**
709  * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
710  */
711 void pm_genpd_poweroff_unused(void)
712 {
713         struct generic_pm_domain *genpd;
714
715         if (pd_ignore_unused) {
716                 pr_warn("genpd: Not disabling unused power domains\n");
717                 return;
718         }
719
720         mutex_lock(&gpd_list_lock);
721
722         list_for_each_entry(genpd, &gpd_list, gpd_list_node)
723                 genpd_queue_power_off_work(genpd);
724
725         mutex_unlock(&gpd_list_lock);
726 }
727
728 #else
729
730 static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
731                                             unsigned long val, void *ptr)
732 {
733         return NOTIFY_DONE;
734 }
735
736 static inline void genpd_power_off_work_fn(struct work_struct *work) {}
737
738 #define pm_genpd_runtime_suspend        NULL
739 #define pm_genpd_runtime_resume         NULL
740
741 #endif /* CONFIG_PM_RUNTIME */
742
743 #ifdef CONFIG_PM_SLEEP
744
745 /**
746  * pm_genpd_present - Check if the given PM domain has been initialized.
747  * @genpd: PM domain to check.
748  */
749 static bool pm_genpd_present(struct generic_pm_domain *genpd)
750 {
751         struct generic_pm_domain *gpd;
752
753         if (IS_ERR_OR_NULL(genpd))
754                 return false;
755
756         list_for_each_entry(gpd, &gpd_list, gpd_list_node)
757                 if (gpd == genpd)
758                         return true;
759
760         return false;
761 }
762
763 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
764                                     struct device *dev)
765 {
766         return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
767 }
768
769 /**
770  * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
771  * @genpd: PM domain to power off, if possible.
772  *
773  * Check if the given PM domain can be powered off (during system suspend or
774  * hibernation) and do that if so.  Also, in that case propagate to its masters.
775  *
776  * This function is only called in "noirq" and "syscore" stages of system power
777  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
778  * executed sequentially, so it is guaranteed that it will never run twice in
779  * parallel).
780  */
781 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
782 {
783         struct gpd_link *link;
784
785         if (genpd->status == GPD_STATE_POWER_OFF)
786                 return;
787
788         if (genpd->suspended_count != genpd->device_count
789             || atomic_read(&genpd->sd_count) > 0)
790                 return;
791
792         if (genpd->power_off)
793                 genpd->power_off(genpd);
794
795         genpd->status = GPD_STATE_POWER_OFF;
796
797         list_for_each_entry(link, &genpd->slave_links, slave_node) {
798                 genpd_sd_counter_dec(link->master);
799                 pm_genpd_sync_poweroff(link->master);
800         }
801 }
802
803 /**
804  * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
805  * @genpd: PM domain to power on.
806  *
807  * This function is only called in "noirq" and "syscore" stages of system power
808  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
809  * executed sequentially, so it is guaranteed that it will never run twice in
810  * parallel).
811  */
812 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
813 {
814         struct gpd_link *link;
815
816         if (genpd->status != GPD_STATE_POWER_OFF)
817                 return;
818
819         list_for_each_entry(link, &genpd->slave_links, slave_node) {
820                 pm_genpd_sync_poweron(link->master);
821                 genpd_sd_counter_inc(link->master);
822         }
823
824         if (genpd->power_on)
825                 genpd->power_on(genpd);
826
827         genpd->status = GPD_STATE_ACTIVE;
828 }
829
830 /**
831  * resume_needed - Check whether to resume a device before system suspend.
832  * @dev: Device to check.
833  * @genpd: PM domain the device belongs to.
834  *
835  * There are two cases in which a device that can wake up the system from sleep
836  * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
837  * to wake up the system and it has to remain active for this purpose while the
838  * system is in the sleep state and (2) if the device is not enabled to wake up
839  * the system from sleep states and it generally doesn't generate wakeup signals
840  * by itself (those signals are generated on its behalf by other parts of the
841  * system).  In the latter case it may be necessary to reconfigure the device's
842  * wakeup settings during system suspend, because it may have been set up to
843  * signal remote wakeup from the system's working state as needed by runtime PM.
844  * Return 'true' in either of the above cases.
845  */
846 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
847 {
848         bool active_wakeup;
849
850         if (!device_can_wakeup(dev))
851                 return false;
852
853         active_wakeup = genpd_dev_active_wakeup(genpd, dev);
854         return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
855 }
856
857 /**
858  * pm_genpd_prepare - Start power transition of a device in a PM domain.
859  * @dev: Device to start the transition of.
860  *
861  * Start a power transition of a device (during a system-wide power transition)
862  * under the assumption that its pm_domain field points to the domain member of
863  * an object of type struct generic_pm_domain representing a PM domain
864  * consisting of I/O devices.
865  */
866 static int pm_genpd_prepare(struct device *dev)
867 {
868         struct generic_pm_domain *genpd;
869         int ret;
870
871         dev_dbg(dev, "%s()\n", __func__);
872
873         genpd = dev_to_genpd(dev);
874         if (IS_ERR(genpd))
875                 return -EINVAL;
876
877         /*
878          * If a wakeup request is pending for the device, it should be woken up
879          * at this point and a system wakeup event should be reported if it's
880          * set up to wake up the system from sleep states.
881          */
882         pm_runtime_get_noresume(dev);
883         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
884                 pm_wakeup_event(dev, 0);
885
886         if (pm_wakeup_pending()) {
887                 pm_runtime_put(dev);
888                 return -EBUSY;
889         }
890
891         if (resume_needed(dev, genpd))
892                 pm_runtime_resume(dev);
893
894         genpd_acquire_lock(genpd);
895
896         if (genpd->prepared_count++ == 0) {
897                 genpd->suspended_count = 0;
898                 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
899         }
900
901         genpd_release_lock(genpd);
902
903         if (genpd->suspend_power_off) {
904                 pm_runtime_put_noidle(dev);
905                 return 0;
906         }
907
908         /*
909          * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
910          * so pm_genpd_poweron() will return immediately, but if the device
911          * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
912          * to make it operational.
913          */
914         pm_runtime_resume(dev);
915         __pm_runtime_disable(dev, false);
916
917         ret = pm_generic_prepare(dev);
918         if (ret) {
919                 mutex_lock(&genpd->lock);
920
921                 if (--genpd->prepared_count == 0)
922                         genpd->suspend_power_off = false;
923
924                 mutex_unlock(&genpd->lock);
925                 pm_runtime_enable(dev);
926         }
927
928         pm_runtime_put(dev);
929         return ret;
930 }
931
932 /**
933  * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
934  * @dev: Device to suspend.
935  *
936  * Suspend a device under the assumption that its pm_domain field points to the
937  * domain member of an object of type struct generic_pm_domain representing
938  * a PM domain consisting of I/O devices.
939  */
940 static int pm_genpd_suspend(struct device *dev)
941 {
942         struct generic_pm_domain *genpd;
943
944         dev_dbg(dev, "%s()\n", __func__);
945
946         genpd = dev_to_genpd(dev);
947         if (IS_ERR(genpd))
948                 return -EINVAL;
949
950         return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
951 }
952
953 /**
954  * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
955  * @dev: Device to suspend.
956  *
957  * Carry out a late suspend of a device under the assumption that its
958  * pm_domain field points to the domain member of an object of type
959  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
960  */
961 static int pm_genpd_suspend_late(struct device *dev)
962 {
963         struct generic_pm_domain *genpd;
964
965         dev_dbg(dev, "%s()\n", __func__);
966
967         genpd = dev_to_genpd(dev);
968         if (IS_ERR(genpd))
969                 return -EINVAL;
970
971         return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
972 }
973
974 /**
975  * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
976  * @dev: Device to suspend.
977  *
978  * Stop the device and remove power from the domain if all devices in it have
979  * been stopped.
980  */
981 static int pm_genpd_suspend_noirq(struct device *dev)
982 {
983         struct generic_pm_domain *genpd;
984
985         dev_dbg(dev, "%s()\n", __func__);
986
987         genpd = dev_to_genpd(dev);
988         if (IS_ERR(genpd))
989                 return -EINVAL;
990
991         if (genpd->suspend_power_off
992             || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
993                 return 0;
994
995         genpd_stop_dev(genpd, dev);
996
997         /*
998          * Since all of the "noirq" callbacks are executed sequentially, it is
999          * guaranteed that this function will never run twice in parallel for
1000          * the same PM domain, so it is not necessary to use locking here.
1001          */
1002         genpd->suspended_count++;
1003         pm_genpd_sync_poweroff(genpd);
1004
1005         return 0;
1006 }
1007
1008 /**
1009  * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1010  * @dev: Device to resume.
1011  *
1012  * Restore power to the device's PM domain, if necessary, and start the device.
1013  */
1014 static int pm_genpd_resume_noirq(struct device *dev)
1015 {
1016         struct generic_pm_domain *genpd;
1017
1018         dev_dbg(dev, "%s()\n", __func__);
1019
1020         genpd = dev_to_genpd(dev);
1021         if (IS_ERR(genpd))
1022                 return -EINVAL;
1023
1024         if (genpd->suspend_power_off
1025             || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1026                 return 0;
1027
1028         /*
1029          * Since all of the "noirq" callbacks are executed sequentially, it is
1030          * guaranteed that this function will never run twice in parallel for
1031          * the same PM domain, so it is not necessary to use locking here.
1032          */
1033         pm_genpd_sync_poweron(genpd);
1034         genpd->suspended_count--;
1035
1036         return genpd_start_dev(genpd, dev);
1037 }
1038
1039 /**
1040  * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
1041  * @dev: Device to resume.
1042  *
1043  * Carry out an early resume of a device under the assumption that its
1044  * pm_domain field points to the domain member of an object of type
1045  * struct generic_pm_domain representing a power domain consisting of I/O
1046  * devices.
1047  */
1048 static int pm_genpd_resume_early(struct device *dev)
1049 {
1050         struct generic_pm_domain *genpd;
1051
1052         dev_dbg(dev, "%s()\n", __func__);
1053
1054         genpd = dev_to_genpd(dev);
1055         if (IS_ERR(genpd))
1056                 return -EINVAL;
1057
1058         return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
1059 }
1060
1061 /**
1062  * pm_genpd_resume - Resume of device in an I/O PM domain.
1063  * @dev: Device to resume.
1064  *
1065  * Resume a device under the assumption that its pm_domain field points to the
1066  * domain member of an object of type struct generic_pm_domain representing
1067  * a power domain consisting of I/O devices.
1068  */
1069 static int pm_genpd_resume(struct device *dev)
1070 {
1071         struct generic_pm_domain *genpd;
1072
1073         dev_dbg(dev, "%s()\n", __func__);
1074
1075         genpd = dev_to_genpd(dev);
1076         if (IS_ERR(genpd))
1077                 return -EINVAL;
1078
1079         return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
1080 }
1081
1082 /**
1083  * pm_genpd_freeze - Freezing a device in an I/O PM domain.
1084  * @dev: Device to freeze.
1085  *
1086  * Freeze a device under the assumption that its pm_domain field points to the
1087  * domain member of an object of type struct generic_pm_domain representing
1088  * a power domain consisting of I/O devices.
1089  */
1090 static int pm_genpd_freeze(struct device *dev)
1091 {
1092         struct generic_pm_domain *genpd;
1093
1094         dev_dbg(dev, "%s()\n", __func__);
1095
1096         genpd = dev_to_genpd(dev);
1097         if (IS_ERR(genpd))
1098                 return -EINVAL;
1099
1100         return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
1101 }
1102
1103 /**
1104  * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
1105  * @dev: Device to freeze.
1106  *
1107  * Carry out a late freeze of a device under the assumption that its
1108  * pm_domain field points to the domain member of an object of type
1109  * struct generic_pm_domain representing a power domain consisting of I/O
1110  * devices.
1111  */
1112 static int pm_genpd_freeze_late(struct device *dev)
1113 {
1114         struct generic_pm_domain *genpd;
1115
1116         dev_dbg(dev, "%s()\n", __func__);
1117
1118         genpd = dev_to_genpd(dev);
1119         if (IS_ERR(genpd))
1120                 return -EINVAL;
1121
1122         return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
1123 }
1124
1125 /**
1126  * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1127  * @dev: Device to freeze.
1128  *
1129  * Carry out a late freeze of a device under the assumption that its
1130  * pm_domain field points to the domain member of an object of type
1131  * struct generic_pm_domain representing a power domain consisting of I/O
1132  * devices.
1133  */
1134 static int pm_genpd_freeze_noirq(struct device *dev)
1135 {
1136         struct generic_pm_domain *genpd;
1137
1138         dev_dbg(dev, "%s()\n", __func__);
1139
1140         genpd = dev_to_genpd(dev);
1141         if (IS_ERR(genpd))
1142                 return -EINVAL;
1143
1144         return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
1145 }
1146
1147 /**
1148  * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1149  * @dev: Device to thaw.
1150  *
1151  * Start the device, unless power has been removed from the domain already
1152  * before the system transition.
1153  */
1154 static int pm_genpd_thaw_noirq(struct device *dev)
1155 {
1156         struct generic_pm_domain *genpd;
1157
1158         dev_dbg(dev, "%s()\n", __func__);
1159
1160         genpd = dev_to_genpd(dev);
1161         if (IS_ERR(genpd))
1162                 return -EINVAL;
1163
1164         return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
1165 }
1166
1167 /**
1168  * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
1169  * @dev: Device to thaw.
1170  *
1171  * Carry out an early thaw of a device under the assumption that its
1172  * pm_domain field points to the domain member of an object of type
1173  * struct generic_pm_domain representing a power domain consisting of I/O
1174  * devices.
1175  */
1176 static int pm_genpd_thaw_early(struct device *dev)
1177 {
1178         struct generic_pm_domain *genpd;
1179
1180         dev_dbg(dev, "%s()\n", __func__);
1181
1182         genpd = dev_to_genpd(dev);
1183         if (IS_ERR(genpd))
1184                 return -EINVAL;
1185
1186         return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
1187 }
1188
1189 /**
1190  * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1191  * @dev: Device to thaw.
1192  *
1193  * Thaw a device under the assumption that its pm_domain field points to the
1194  * domain member of an object of type struct generic_pm_domain representing
1195  * a power domain consisting of I/O devices.
1196  */
1197 static int pm_genpd_thaw(struct device *dev)
1198 {
1199         struct generic_pm_domain *genpd;
1200
1201         dev_dbg(dev, "%s()\n", __func__);
1202
1203         genpd = dev_to_genpd(dev);
1204         if (IS_ERR(genpd))
1205                 return -EINVAL;
1206
1207         return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
1208 }
1209
1210 /**
1211  * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1212  * @dev: Device to resume.
1213  *
1214  * Make sure the domain will be in the same power state as before the
1215  * hibernation the system is resuming from and start the device if necessary.
1216  */
1217 static int pm_genpd_restore_noirq(struct device *dev)
1218 {
1219         struct generic_pm_domain *genpd;
1220
1221         dev_dbg(dev, "%s()\n", __func__);
1222
1223         genpd = dev_to_genpd(dev);
1224         if (IS_ERR(genpd))
1225                 return -EINVAL;
1226
1227         /*
1228          * Since all of the "noirq" callbacks are executed sequentially, it is
1229          * guaranteed that this function will never run twice in parallel for
1230          * the same PM domain, so it is not necessary to use locking here.
1231          *
1232          * At this point suspended_count == 0 means we are being run for the
1233          * first time for the given domain in the present cycle.
1234          */
1235         if (genpd->suspended_count++ == 0) {
1236                 /*
1237                  * The boot kernel might put the domain into arbitrary state,
1238                  * so make it appear as powered off to pm_genpd_sync_poweron(),
1239                  * so that it tries to power it on in case it was really off.
1240                  */
1241                 genpd->status = GPD_STATE_POWER_OFF;
1242                 if (genpd->suspend_power_off) {
1243                         /*
1244                          * If the domain was off before the hibernation, make
1245                          * sure it will be off going forward.
1246                          */
1247                         if (genpd->power_off)
1248                                 genpd->power_off(genpd);
1249
1250                         return 0;
1251                 }
1252         }
1253
1254         if (genpd->suspend_power_off)
1255                 return 0;
1256
1257         pm_genpd_sync_poweron(genpd);
1258
1259         return genpd_start_dev(genpd, dev);
1260 }
1261
1262 /**
1263  * pm_genpd_complete - Complete power transition of a device in a power domain.
1264  * @dev: Device to complete the transition of.
1265  *
1266  * Complete a power transition of a device (during a system-wide power
1267  * transition) under the assumption that its pm_domain field points to the
1268  * domain member of an object of type struct generic_pm_domain representing
1269  * a power domain consisting of I/O devices.
1270  */
1271 static void pm_genpd_complete(struct device *dev)
1272 {
1273         struct generic_pm_domain *genpd;
1274         bool run_complete;
1275
1276         dev_dbg(dev, "%s()\n", __func__);
1277
1278         genpd = dev_to_genpd(dev);
1279         if (IS_ERR(genpd))
1280                 return;
1281
1282         mutex_lock(&genpd->lock);
1283
1284         run_complete = !genpd->suspend_power_off;
1285         if (--genpd->prepared_count == 0)
1286                 genpd->suspend_power_off = false;
1287
1288         mutex_unlock(&genpd->lock);
1289
1290         if (run_complete) {
1291                 pm_generic_complete(dev);
1292                 pm_runtime_set_active(dev);
1293                 pm_runtime_enable(dev);
1294                 pm_request_idle(dev);
1295         }
1296 }
1297
1298 /**
1299  * pm_genpd_syscore_switch - Switch power during system core suspend or resume.
1300  * @dev: Device that normally is marked as "always on" to switch power for.
1301  *
1302  * This routine may only be called during the system core (syscore) suspend or
1303  * resume phase for devices whose "always on" flags are set.
1304  */
1305 void pm_genpd_syscore_switch(struct device *dev, bool suspend)
1306 {
1307         struct generic_pm_domain *genpd;
1308
1309         genpd = dev_to_genpd(dev);
1310         if (!pm_genpd_present(genpd))
1311                 return;
1312
1313         if (suspend) {
1314                 genpd->suspended_count++;
1315                 pm_genpd_sync_poweroff(genpd);
1316         } else {
1317                 pm_genpd_sync_poweron(genpd);
1318                 genpd->suspended_count--;
1319         }
1320 }
1321 EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch);
1322
1323 #else
1324
1325 #define pm_genpd_prepare                NULL
1326 #define pm_genpd_suspend                NULL
1327 #define pm_genpd_suspend_late           NULL
1328 #define pm_genpd_suspend_noirq          NULL
1329 #define pm_genpd_resume_early           NULL
1330 #define pm_genpd_resume_noirq           NULL
1331 #define pm_genpd_resume                 NULL
1332 #define pm_genpd_freeze                 NULL
1333 #define pm_genpd_freeze_late            NULL
1334 #define pm_genpd_freeze_noirq           NULL
1335 #define pm_genpd_thaw_early             NULL
1336 #define pm_genpd_thaw_noirq             NULL
1337 #define pm_genpd_thaw                   NULL
1338 #define pm_genpd_restore_noirq          NULL
1339 #define pm_genpd_complete               NULL
1340
1341 #endif /* CONFIG_PM_SLEEP */
1342
1343 static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
1344 {
1345         struct generic_pm_domain_data *gpd_data;
1346
1347         gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1348         if (!gpd_data)
1349                 return NULL;
1350
1351         mutex_init(&gpd_data->lock);
1352         gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1353         dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1354         return gpd_data;
1355 }
1356
1357 static void __pm_genpd_free_dev_data(struct device *dev,
1358                                      struct generic_pm_domain_data *gpd_data)
1359 {
1360         dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1361         kfree(gpd_data);
1362 }
1363
1364 /**
1365  * __pm_genpd_add_device - Add a device to an I/O PM domain.
1366  * @genpd: PM domain to add the device to.
1367  * @dev: Device to be added.
1368  * @td: Set of PM QoS timing parameters to attach to the device.
1369  */
1370 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1371                           struct gpd_timing_data *td)
1372 {
1373         struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1374         struct pm_domain_data *pdd;
1375         int ret = 0;
1376
1377         dev_dbg(dev, "%s()\n", __func__);
1378
1379         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1380                 return -EINVAL;
1381
1382         gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1383         if (!gpd_data_new)
1384                 return -ENOMEM;
1385
1386         genpd_acquire_lock(genpd);
1387
1388         if (genpd->prepared_count > 0) {
1389                 ret = -EAGAIN;
1390                 goto out;
1391         }
1392
1393         list_for_each_entry(pdd, &genpd->dev_list, list_node)
1394                 if (pdd->dev == dev) {
1395                         ret = -EINVAL;
1396                         goto out;
1397                 }
1398
1399         ret = dev_pm_get_subsys_data(dev);
1400         if (ret)
1401                 goto out;
1402
1403         genpd->device_count++;
1404         genpd->max_off_time_changed = true;
1405
1406         spin_lock_irq(&dev->power.lock);
1407
1408         dev->pm_domain = &genpd->domain;
1409         if (dev->power.subsys_data->domain_data) {
1410                 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1411         } else {
1412                 gpd_data = gpd_data_new;
1413                 dev->power.subsys_data->domain_data = &gpd_data->base;
1414         }
1415         gpd_data->refcount++;
1416         if (td)
1417                 gpd_data->td = *td;
1418
1419         spin_unlock_irq(&dev->power.lock);
1420
1421         mutex_lock(&gpd_data->lock);
1422         gpd_data->base.dev = dev;
1423         list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1424         gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
1425         gpd_data->td.constraint_changed = true;
1426         gpd_data->td.effective_constraint_ns = -1;
1427         mutex_unlock(&gpd_data->lock);
1428
1429  out:
1430         genpd_release_lock(genpd);
1431
1432         if (gpd_data != gpd_data_new)
1433                 __pm_genpd_free_dev_data(dev, gpd_data_new);
1434
1435         return ret;
1436 }
1437
1438 /**
1439  * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
1440  * @genpd_node: Device tree node pointer representing a PM domain to which the
1441  *   the device is added to.
1442  * @dev: Device to be added.
1443  * @td: Set of PM QoS timing parameters to attach to the device.
1444  */
1445 int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1446                              struct gpd_timing_data *td)
1447 {
1448         struct generic_pm_domain *genpd = NULL, *gpd;
1449
1450         dev_dbg(dev, "%s()\n", __func__);
1451
1452         if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
1453                 return -EINVAL;
1454
1455         mutex_lock(&gpd_list_lock);
1456         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1457                 if (gpd->of_node == genpd_node) {
1458                         genpd = gpd;
1459                         break;
1460                 }
1461         }
1462         mutex_unlock(&gpd_list_lock);
1463
1464         if (!genpd)
1465                 return -EINVAL;
1466
1467         return __pm_genpd_add_device(genpd, dev, td);
1468 }
1469
1470
1471 /**
1472  * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
1473  * @domain_name: Name of the PM domain to add the device to.
1474  * @dev: Device to be added.
1475  * @td: Set of PM QoS timing parameters to attach to the device.
1476  */
1477 int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
1478                                struct gpd_timing_data *td)
1479 {
1480         return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
1481 }
1482
1483 /**
1484  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1485  * @genpd: PM domain to remove the device from.
1486  * @dev: Device to be removed.
1487  */
1488 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1489                            struct device *dev)
1490 {
1491         struct generic_pm_domain_data *gpd_data;
1492         struct pm_domain_data *pdd;
1493         bool remove = false;
1494         int ret = 0;
1495
1496         dev_dbg(dev, "%s()\n", __func__);
1497
1498         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1499             ||  IS_ERR_OR_NULL(dev->pm_domain)
1500             ||  pd_to_genpd(dev->pm_domain) != genpd)
1501                 return -EINVAL;
1502
1503         genpd_acquire_lock(genpd);
1504
1505         if (genpd->prepared_count > 0) {
1506                 ret = -EAGAIN;
1507                 goto out;
1508         }
1509
1510         genpd->device_count--;
1511         genpd->max_off_time_changed = true;
1512
1513         spin_lock_irq(&dev->power.lock);
1514
1515         dev->pm_domain = NULL;
1516         pdd = dev->power.subsys_data->domain_data;
1517         list_del_init(&pdd->list_node);
1518         gpd_data = to_gpd_data(pdd);
1519         if (--gpd_data->refcount == 0) {
1520                 dev->power.subsys_data->domain_data = NULL;
1521                 remove = true;
1522         }
1523
1524         spin_unlock_irq(&dev->power.lock);
1525
1526         mutex_lock(&gpd_data->lock);
1527         pdd->dev = NULL;
1528         mutex_unlock(&gpd_data->lock);
1529
1530         genpd_release_lock(genpd);
1531
1532         dev_pm_put_subsys_data(dev);
1533         if (remove)
1534                 __pm_genpd_free_dev_data(dev, gpd_data);
1535
1536         return 0;
1537
1538  out:
1539         genpd_release_lock(genpd);
1540
1541         return ret;
1542 }
1543
1544 /**
1545  * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
1546  * @dev: Device to set/unset the flag for.
1547  * @val: The new value of the device's "need restore" flag.
1548  */
1549 void pm_genpd_dev_need_restore(struct device *dev, bool val)
1550 {
1551         struct pm_subsys_data *psd;
1552         unsigned long flags;
1553
1554         spin_lock_irqsave(&dev->power.lock, flags);
1555
1556         psd = dev_to_psd(dev);
1557         if (psd && psd->domain_data)
1558                 to_gpd_data(psd->domain_data)->need_restore = val;
1559
1560         spin_unlock_irqrestore(&dev->power.lock, flags);
1561 }
1562 EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
1563
1564 /**
1565  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1566  * @genpd: Master PM domain to add the subdomain to.
1567  * @subdomain: Subdomain to be added.
1568  */
1569 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1570                            struct generic_pm_domain *subdomain)
1571 {
1572         struct gpd_link *link;
1573         int ret = 0;
1574
1575         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1576             || genpd == subdomain)
1577                 return -EINVAL;
1578
1579  start:
1580         genpd_acquire_lock(genpd);
1581         mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1582
1583         if (subdomain->status != GPD_STATE_POWER_OFF
1584             && subdomain->status != GPD_STATE_ACTIVE) {
1585                 mutex_unlock(&subdomain->lock);
1586                 genpd_release_lock(genpd);
1587                 goto start;
1588         }
1589
1590         if (genpd->status == GPD_STATE_POWER_OFF
1591             &&  subdomain->status != GPD_STATE_POWER_OFF) {
1592                 ret = -EINVAL;
1593                 goto out;
1594         }
1595
1596         list_for_each_entry(link, &genpd->master_links, master_node) {
1597                 if (link->slave == subdomain && link->master == genpd) {
1598                         ret = -EINVAL;
1599                         goto out;
1600                 }
1601         }
1602
1603         link = kzalloc(sizeof(*link), GFP_KERNEL);
1604         if (!link) {
1605                 ret = -ENOMEM;
1606                 goto out;
1607         }
1608         link->master = genpd;
1609         list_add_tail(&link->master_node, &genpd->master_links);
1610         link->slave = subdomain;
1611         list_add_tail(&link->slave_node, &subdomain->slave_links);
1612         if (subdomain->status != GPD_STATE_POWER_OFF)
1613                 genpd_sd_counter_inc(genpd);
1614
1615  out:
1616         mutex_unlock(&subdomain->lock);
1617         genpd_release_lock(genpd);
1618
1619         return ret;
1620 }
1621
1622 /**
1623  * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
1624  * @master_name: Name of the master PM domain to add the subdomain to.
1625  * @subdomain_name: Name of the subdomain to be added.
1626  */
1627 int pm_genpd_add_subdomain_names(const char *master_name,
1628                                  const char *subdomain_name)
1629 {
1630         struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
1631
1632         if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
1633                 return -EINVAL;
1634
1635         mutex_lock(&gpd_list_lock);
1636         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1637                 if (!master && !strcmp(gpd->name, master_name))
1638                         master = gpd;
1639
1640                 if (!subdomain && !strcmp(gpd->name, subdomain_name))
1641                         subdomain = gpd;
1642
1643                 if (master && subdomain)
1644                         break;
1645         }
1646         mutex_unlock(&gpd_list_lock);
1647
1648         return pm_genpd_add_subdomain(master, subdomain);
1649 }
1650
1651 /**
1652  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1653  * @genpd: Master PM domain to remove the subdomain from.
1654  * @subdomain: Subdomain to be removed.
1655  */
1656 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1657                               struct generic_pm_domain *subdomain)
1658 {
1659         struct gpd_link *link;
1660         int ret = -EINVAL;
1661
1662         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1663                 return -EINVAL;
1664
1665  start:
1666         genpd_acquire_lock(genpd);
1667
1668         list_for_each_entry(link, &genpd->master_links, master_node) {
1669                 if (link->slave != subdomain)
1670                         continue;
1671
1672                 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1673
1674                 if (subdomain->status != GPD_STATE_POWER_OFF
1675                     && subdomain->status != GPD_STATE_ACTIVE) {
1676                         mutex_unlock(&subdomain->lock);
1677                         genpd_release_lock(genpd);
1678                         goto start;
1679                 }
1680
1681                 list_del(&link->master_node);
1682                 list_del(&link->slave_node);
1683                 kfree(link);
1684                 if (subdomain->status != GPD_STATE_POWER_OFF)
1685                         genpd_sd_counter_dec(genpd);
1686
1687                 mutex_unlock(&subdomain->lock);
1688
1689                 ret = 0;
1690                 break;
1691         }
1692
1693         genpd_release_lock(genpd);
1694
1695         return ret;
1696 }
1697
1698 /**
1699  * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
1700  * @genpd: PM domain to be connected with cpuidle.
1701  * @state: cpuidle state this domain can disable/enable.
1702  *
1703  * Make a PM domain behave as though it contained a CPU core, that is, instead
1704  * of calling its power down routine it will enable the given cpuidle state so
1705  * that the cpuidle subsystem can power it down (if possible and desirable).
1706  */
1707 int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1708 {
1709         struct cpuidle_driver *cpuidle_drv;
1710         struct gpd_cpu_data *cpu_data;
1711         struct cpuidle_state *idle_state;
1712         int ret = 0;
1713
1714         if (IS_ERR_OR_NULL(genpd) || state < 0)
1715                 return -EINVAL;
1716
1717         genpd_acquire_lock(genpd);
1718
1719         if (genpd->cpu_data) {
1720                 ret = -EEXIST;
1721                 goto out;
1722         }
1723         cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL);
1724         if (!cpu_data) {
1725                 ret = -ENOMEM;
1726                 goto out;
1727         }
1728         cpuidle_drv = cpuidle_driver_ref();
1729         if (!cpuidle_drv) {
1730                 ret = -ENODEV;
1731                 goto err_drv;
1732         }
1733         if (cpuidle_drv->state_count <= state) {
1734                 ret = -EINVAL;
1735                 goto err;
1736         }
1737         idle_state = &cpuidle_drv->states[state];
1738         if (!idle_state->disabled) {
1739                 ret = -EAGAIN;
1740                 goto err;
1741         }
1742         cpu_data->idle_state = idle_state;
1743         cpu_data->saved_exit_latency = idle_state->exit_latency;
1744         genpd->cpu_data = cpu_data;
1745         genpd_recalc_cpu_exit_latency(genpd);
1746
1747  out:
1748         genpd_release_lock(genpd);
1749         return ret;
1750
1751  err:
1752         cpuidle_driver_unref();
1753
1754  err_drv:
1755         kfree(cpu_data);
1756         goto out;
1757 }
1758
1759 /**
1760  * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
1761  * @name: Name of the domain to connect to cpuidle.
1762  * @state: cpuidle state this domain can manipulate.
1763  */
1764 int pm_genpd_name_attach_cpuidle(const char *name, int state)
1765 {
1766         return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
1767 }
1768
1769 /**
1770  * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
1771  * @genpd: PM domain to remove the cpuidle connection from.
1772  *
1773  * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
1774  * given PM domain.
1775  */
1776 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1777 {
1778         struct gpd_cpu_data *cpu_data;
1779         struct cpuidle_state *idle_state;
1780         int ret = 0;
1781
1782         if (IS_ERR_OR_NULL(genpd))
1783                 return -EINVAL;
1784
1785         genpd_acquire_lock(genpd);
1786
1787         cpu_data = genpd->cpu_data;
1788         if (!cpu_data) {
1789                 ret = -ENODEV;
1790                 goto out;
1791         }
1792         idle_state = cpu_data->idle_state;
1793         if (!idle_state->disabled) {
1794                 ret = -EAGAIN;
1795                 goto out;
1796         }
1797         idle_state->exit_latency = cpu_data->saved_exit_latency;
1798         cpuidle_driver_unref();
1799         genpd->cpu_data = NULL;
1800         kfree(cpu_data);
1801
1802  out:
1803         genpd_release_lock(genpd);
1804         return ret;
1805 }
1806
1807 /**
1808  * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
1809  * @name: Name of the domain to disconnect cpuidle from.
1810  */
1811 int pm_genpd_name_detach_cpuidle(const char *name)
1812 {
1813         return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
1814 }
1815
1816 /* Default device callbacks for generic PM domains. */
1817
1818 /**
1819  * pm_genpd_default_save_state - Default "save device state" for PM domians.
1820  * @dev: Device to handle.
1821  */
1822 static int pm_genpd_default_save_state(struct device *dev)
1823 {
1824         int (*cb)(struct device *__dev);
1825
1826         if (dev->type && dev->type->pm)
1827                 cb = dev->type->pm->runtime_suspend;
1828         else if (dev->class && dev->class->pm)
1829                 cb = dev->class->pm->runtime_suspend;
1830         else if (dev->bus && dev->bus->pm)
1831                 cb = dev->bus->pm->runtime_suspend;
1832         else
1833                 cb = NULL;
1834
1835         if (!cb && dev->driver && dev->driver->pm)
1836                 cb = dev->driver->pm->runtime_suspend;
1837
1838         return cb ? cb(dev) : 0;
1839 }
1840
1841 /**
1842  * pm_genpd_default_restore_state - Default PM domians "restore device state".
1843  * @dev: Device to handle.
1844  */
1845 static int pm_genpd_default_restore_state(struct device *dev)
1846 {
1847         int (*cb)(struct device *__dev);
1848
1849         if (dev->type && dev->type->pm)
1850                 cb = dev->type->pm->runtime_resume;
1851         else if (dev->class && dev->class->pm)
1852                 cb = dev->class->pm->runtime_resume;
1853         else if (dev->bus && dev->bus->pm)
1854                 cb = dev->bus->pm->runtime_resume;
1855         else
1856                 cb = NULL;
1857
1858         if (!cb && dev->driver && dev->driver->pm)
1859                 cb = dev->driver->pm->runtime_resume;
1860
1861         return cb ? cb(dev) : 0;
1862 }
1863
1864 /**
1865  * pm_genpd_init - Initialize a generic I/O PM domain object.
1866  * @genpd: PM domain object to initialize.
1867  * @gov: PM domain governor to associate with the domain (may be NULL).
1868  * @is_off: Initial value of the domain's power_is_off field.
1869  */
1870 void pm_genpd_init(struct generic_pm_domain *genpd,
1871                    struct dev_power_governor *gov, bool is_off)
1872 {
1873         if (IS_ERR_OR_NULL(genpd))
1874                 return;
1875
1876         INIT_LIST_HEAD(&genpd->master_links);
1877         INIT_LIST_HEAD(&genpd->slave_links);
1878         INIT_LIST_HEAD(&genpd->dev_list);
1879         mutex_init(&genpd->lock);
1880         genpd->gov = gov;
1881         INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1882         genpd->in_progress = 0;
1883         atomic_set(&genpd->sd_count, 0);
1884         genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1885         init_waitqueue_head(&genpd->status_wait_queue);
1886         genpd->poweroff_task = NULL;
1887         genpd->resume_count = 0;
1888         genpd->device_count = 0;
1889         genpd->max_off_time_ns = -1;
1890         genpd->max_off_time_changed = true;
1891         genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1892         genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1893         genpd->domain.ops.prepare = pm_genpd_prepare;
1894         genpd->domain.ops.suspend = pm_genpd_suspend;
1895         genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
1896         genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1897         genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1898         genpd->domain.ops.resume_early = pm_genpd_resume_early;
1899         genpd->domain.ops.resume = pm_genpd_resume;
1900         genpd->domain.ops.freeze = pm_genpd_freeze;
1901         genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
1902         genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1903         genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1904         genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
1905         genpd->domain.ops.thaw = pm_genpd_thaw;
1906         genpd->domain.ops.poweroff = pm_genpd_suspend;
1907         genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
1908         genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1909         genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1910         genpd->domain.ops.restore_early = pm_genpd_resume_early;
1911         genpd->domain.ops.restore = pm_genpd_resume;
1912         genpd->domain.ops.complete = pm_genpd_complete;
1913         genpd->dev_ops.save_state = pm_genpd_default_save_state;
1914         genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1915         mutex_lock(&gpd_list_lock);
1916         list_add(&genpd->gpd_list_node, &gpd_list);
1917         mutex_unlock(&gpd_list_lock);
1918 }