2 * Generic OPP Interface
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/clk.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/slab.h>
20 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/regulator/consumer.h>
28 * The root of the list of all devices. All device_opp structures branch off
29 * from here, with each device_opp containing the list of opp it supports in
30 * various states of availability.
32 static LIST_HEAD(dev_opp_list);
33 /* Lock to allow exclusive modification to the device and opp lists */
34 DEFINE_MUTEX(dev_opp_list_lock);
36 #define opp_rcu_lockdep_assert() \
38 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
39 !lockdep_is_held(&dev_opp_list_lock), \
40 "Missing rcu_read_lock() or " \
41 "dev_opp_list_lock protection"); \
44 static struct device_list_opp *_find_list_dev(const struct device *dev,
45 struct device_opp *dev_opp)
47 struct device_list_opp *list_dev;
49 list_for_each_entry(list_dev, &dev_opp->dev_list, node)
50 if (list_dev->dev == dev)
56 static struct device_opp *_managed_opp(const struct device_node *np)
58 struct device_opp *dev_opp;
60 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
61 if (dev_opp->np == np) {
63 * Multiple devices can point to the same OPP table and
64 * so will have same node-pointer, np.
66 * But the OPPs will be considered as shared only if the
67 * OPP table contains a "opp-shared" property.
69 return dev_opp->shared_opp ? dev_opp : NULL;
77 * _find_device_opp() - find device_opp struct using device pointer
78 * @dev: device pointer used to lookup device OPPs
80 * Search list of device OPPs for one containing matching device. Does a RCU
81 * reader operation to grab the pointer needed.
83 * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
84 * -EINVAL based on type of error.
86 * Locking: For readers, this function must be called under rcu_read_lock().
87 * device_opp is a RCU protected pointer, which means that device_opp is valid
88 * as long as we are under RCU lock.
90 * For Writers, this function must be called with dev_opp_list_lock held.
92 struct device_opp *_find_device_opp(struct device *dev)
94 struct device_opp *dev_opp;
96 opp_rcu_lockdep_assert();
98 if (IS_ERR_OR_NULL(dev)) {
99 pr_err("%s: Invalid parameters\n", __func__);
100 return ERR_PTR(-EINVAL);
103 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
104 if (_find_list_dev(dev, dev_opp))
107 return ERR_PTR(-ENODEV);
111 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
112 * @opp: opp for which voltage has to be returned for
114 * Return: voltage in micro volt corresponding to the opp, else
117 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
118 * protected pointer. This means that opp which could have been fetched by
119 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
120 * under RCU lock. The pointer returned by the opp_find_freq family must be
121 * used in the same section as the usage of this function with the pointer
122 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
125 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
127 struct dev_pm_opp *tmp_opp;
130 opp_rcu_lockdep_assert();
132 tmp_opp = rcu_dereference(opp);
133 if (IS_ERR_OR_NULL(tmp_opp))
134 pr_err("%s: Invalid parameters\n", __func__);
140 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
143 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
144 * @opp: opp for which frequency has to be returned for
146 * Return: frequency in hertz corresponding to the opp, else
149 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
150 * protected pointer. This means that opp which could have been fetched by
151 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
152 * under RCU lock. The pointer returned by the opp_find_freq family must be
153 * used in the same section as the usage of this function with the pointer
154 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
157 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
159 struct dev_pm_opp *tmp_opp;
162 opp_rcu_lockdep_assert();
164 tmp_opp = rcu_dereference(opp);
165 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
166 pr_err("%s: Invalid parameters\n", __func__);
172 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
175 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
176 * @opp: opp for which turbo mode is being verified
178 * Turbo OPPs are not for normal use, and can be enabled (under certain
179 * conditions) for short duration of times to finish high throughput work
180 * quickly. Running on them for longer times may overheat the chip.
182 * Return: true if opp is turbo opp, else false.
184 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
185 * protected pointer. This means that opp which could have been fetched by
186 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
187 * under RCU lock. The pointer returned by the opp_find_freq family must be
188 * used in the same section as the usage of this function with the pointer
189 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
192 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
194 struct dev_pm_opp *tmp_opp;
196 opp_rcu_lockdep_assert();
198 tmp_opp = rcu_dereference(opp);
199 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
200 pr_err("%s: Invalid parameters\n", __func__);
204 return tmp_opp->turbo;
206 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
209 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
210 * @dev: device for which we do this operation
212 * Return: This function returns the max clock latency in nanoseconds.
214 * Locking: This function takes rcu_read_lock().
216 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
218 struct device_opp *dev_opp;
219 unsigned long clock_latency_ns;
223 dev_opp = _find_device_opp(dev);
225 clock_latency_ns = 0;
227 clock_latency_ns = dev_opp->clock_latency_ns_max;
230 return clock_latency_ns;
232 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
235 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
236 * @dev: device for which we do this operation
238 * Return: This function returns the max voltage latency in nanoseconds.
240 * Locking: This function takes rcu_read_lock().
242 unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
244 struct device_opp *dev_opp;
245 struct dev_pm_opp *opp;
246 struct regulator *reg;
247 unsigned long latency_ns = 0;
248 unsigned long min_uV = ~0, max_uV = 0;
253 dev_opp = _find_device_opp(dev);
254 if (IS_ERR(dev_opp)) {
259 reg = dev_opp->regulator;
260 if (IS_ERR_OR_NULL(reg)) {
261 /* Regulator may not be required for device */
263 dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
269 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
273 if (opp->u_volt_min < min_uV)
274 min_uV = opp->u_volt_min;
275 if (opp->u_volt_max > max_uV)
276 max_uV = opp->u_volt_max;
282 * The caller needs to ensure that dev_opp (and hence the regulator)
283 * isn't freed, while we are executing this routine.
285 ret = regulator_set_voltage_time(reg, min_uV, max_uV);
287 latency_ns = ret * 1000;
291 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
294 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
296 * @dev: device for which we do this operation
298 * Return: This function returns the max transition latency, in nanoseconds, to
299 * switch from one OPP to other.
301 * Locking: This function takes rcu_read_lock().
303 unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
305 return dev_pm_opp_get_max_volt_latency(dev) +
306 dev_pm_opp_get_max_clock_latency(dev);
308 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
311 * dev_pm_opp_get_suspend_opp() - Get suspend opp
312 * @dev: device for which we do this operation
314 * Return: This function returns pointer to the suspend opp if it is
315 * defined and available, otherwise it returns NULL.
317 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
318 * protected pointer. The reason for the same is that the opp pointer which is
319 * returned will remain valid for use with opp_get_{voltage, freq} only while
320 * under the locked area. The pointer returned must be used prior to unlocking
321 * with rcu_read_unlock() to maintain the integrity of the pointer.
323 struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
325 struct device_opp *dev_opp;
327 opp_rcu_lockdep_assert();
329 dev_opp = _find_device_opp(dev);
330 if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
331 !dev_opp->suspend_opp->available)
334 return dev_opp->suspend_opp;
336 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
339 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
340 * @dev: device for which we do this operation
342 * Return: This function returns the number of available opps if there are any,
343 * else returns 0 if none or the corresponding error value.
345 * Locking: This function takes rcu_read_lock().
347 int dev_pm_opp_get_opp_count(struct device *dev)
349 struct device_opp *dev_opp;
350 struct dev_pm_opp *temp_opp;
355 dev_opp = _find_device_opp(dev);
356 if (IS_ERR(dev_opp)) {
357 count = PTR_ERR(dev_opp);
358 dev_err(dev, "%s: device OPP not found (%d)\n",
363 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
364 if (temp_opp->available)
372 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
375 * dev_pm_opp_find_freq_exact() - search for an exact frequency
376 * @dev: device for which we do this operation
377 * @freq: frequency to search for
378 * @available: true/false - match for available opp
380 * Return: Searches for exact match in the opp list and returns pointer to the
381 * matching opp if found, else returns ERR_PTR in case of error and should
382 * be handled using IS_ERR. Error return values can be:
383 * EINVAL: for bad pointer
384 * ERANGE: no match found for search
385 * ENODEV: if device not found in list of registered devices
387 * Note: available is a modifier for the search. if available=true, then the
388 * match is for exact matching frequency and is available in the stored OPP
389 * table. if false, the match is for exact frequency which is not available.
391 * This provides a mechanism to enable an opp which is not available currently
392 * or the opposite as well.
394 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
395 * protected pointer. The reason for the same is that the opp pointer which is
396 * returned will remain valid for use with opp_get_{voltage, freq} only while
397 * under the locked area. The pointer returned must be used prior to unlocking
398 * with rcu_read_unlock() to maintain the integrity of the pointer.
400 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
404 struct device_opp *dev_opp;
405 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
407 opp_rcu_lockdep_assert();
409 dev_opp = _find_device_opp(dev);
410 if (IS_ERR(dev_opp)) {
411 int r = PTR_ERR(dev_opp);
412 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
416 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
417 if (temp_opp->available == available &&
418 temp_opp->rate == freq) {
426 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
429 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
430 * @dev: device for which we do this operation
431 * @freq: Start frequency
433 * Search for the matching ceil *available* OPP from a starting freq
436 * Return: matching *opp and refreshes *freq accordingly, else returns
437 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
439 * EINVAL: for bad pointer
440 * ERANGE: no match found for search
441 * ENODEV: if device not found in list of registered devices
443 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
444 * protected pointer. The reason for the same is that the opp pointer which is
445 * returned will remain valid for use with opp_get_{voltage, freq} only while
446 * under the locked area. The pointer returned must be used prior to unlocking
447 * with rcu_read_unlock() to maintain the integrity of the pointer.
449 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
452 struct device_opp *dev_opp;
453 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
455 opp_rcu_lockdep_assert();
458 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
459 return ERR_PTR(-EINVAL);
462 dev_opp = _find_device_opp(dev);
464 return ERR_CAST(dev_opp);
466 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
467 if (temp_opp->available && temp_opp->rate >= *freq) {
476 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
479 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
480 * @dev: device for which we do this operation
481 * @freq: Start frequency
483 * Search for the matching floor *available* OPP from a starting freq
486 * Return: matching *opp and refreshes *freq accordingly, else returns
487 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
489 * EINVAL: for bad pointer
490 * ERANGE: no match found for search
491 * ENODEV: if device not found in list of registered devices
493 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
494 * protected pointer. The reason for the same is that the opp pointer which is
495 * returned will remain valid for use with opp_get_{voltage, freq} only while
496 * under the locked area. The pointer returned must be used prior to unlocking
497 * with rcu_read_unlock() to maintain the integrity of the pointer.
499 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
502 struct device_opp *dev_opp;
503 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
505 opp_rcu_lockdep_assert();
508 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
509 return ERR_PTR(-EINVAL);
512 dev_opp = _find_device_opp(dev);
514 return ERR_CAST(dev_opp);
516 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
517 if (temp_opp->available) {
518 /* go to the next node, before choosing prev */
519 if (temp_opp->rate > *freq)
530 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
533 * The caller needs to ensure that device_opp (and hence the clk) isn't freed,
534 * while clk returned here is used.
536 static struct clk *_get_opp_clk(struct device *dev)
538 struct device_opp *dev_opp;
543 dev_opp = _find_device_opp(dev);
544 if (IS_ERR(dev_opp)) {
545 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
546 clk = ERR_CAST(dev_opp);
552 dev_err(dev, "%s: No clock available for the device\n",
560 static int _set_opp_voltage(struct device *dev, struct regulator *reg,
561 unsigned long u_volt, unsigned long u_volt_min,
562 unsigned long u_volt_max)
566 /* Regulator not available for device */
568 dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
573 dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
576 ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
579 dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
580 __func__, u_volt_min, u_volt, u_volt_max, ret);
586 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
587 * @dev: device for which we do this operation
588 * @target_freq: frequency to achieve
590 * This configures the power-supplies and clock source to the levels specified
591 * by the OPP corresponding to the target_freq.
593 * Locking: This function takes rcu_read_lock().
595 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
597 struct device_opp *dev_opp;
598 struct dev_pm_opp *old_opp, *opp;
599 struct regulator *reg;
601 unsigned long freq, old_freq;
602 unsigned long u_volt, u_volt_min, u_volt_max;
603 unsigned long ou_volt, ou_volt_min, ou_volt_max;
606 if (unlikely(!target_freq)) {
607 dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
612 clk = _get_opp_clk(dev);
616 freq = clk_round_rate(clk, target_freq);
620 old_freq = clk_get_rate(clk);
622 /* Return early if nothing to do */
623 if (old_freq == freq) {
624 dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
631 dev_opp = _find_device_opp(dev);
632 if (IS_ERR(dev_opp)) {
633 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
635 return PTR_ERR(dev_opp);
638 old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq);
639 if (!IS_ERR(old_opp)) {
640 ou_volt = old_opp->u_volt;
641 ou_volt_min = old_opp->u_volt_min;
642 ou_volt_max = old_opp->u_volt_max;
644 dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
645 __func__, old_freq, PTR_ERR(old_opp));
648 opp = dev_pm_opp_find_freq_ceil(dev, &freq);
651 dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
652 __func__, freq, ret);
657 u_volt = opp->u_volt;
658 u_volt_min = opp->u_volt_min;
659 u_volt_max = opp->u_volt_max;
661 reg = dev_opp->regulator;
665 /* Scaling up? Scale voltage before frequency */
666 if (freq > old_freq) {
667 ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
670 goto restore_voltage;
673 /* Change frequency */
675 dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
676 __func__, old_freq, freq);
678 ret = clk_set_rate(clk, freq);
680 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
682 goto restore_voltage;
685 /* Scaling down? Scale voltage after frequency */
686 if (freq < old_freq) {
687 ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
696 if (clk_set_rate(clk, old_freq))
697 dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
700 /* This shouldn't harm even if the voltages weren't updated earlier */
701 if (!IS_ERR(old_opp))
702 _set_opp_voltage(dev, reg, ou_volt, ou_volt_min, ou_volt_max);
706 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
708 /* List-dev Helpers */
709 static void _kfree_list_dev_rcu(struct rcu_head *head)
711 struct device_list_opp *list_dev;
713 list_dev = container_of(head, struct device_list_opp, rcu_head);
714 kfree_rcu(list_dev, rcu_head);
717 static void _remove_list_dev(struct device_list_opp *list_dev,
718 struct device_opp *dev_opp)
720 opp_debug_unregister(list_dev, dev_opp);
721 list_del(&list_dev->node);
722 call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
723 _kfree_list_dev_rcu);
726 struct device_list_opp *_add_list_dev(const struct device *dev,
727 struct device_opp *dev_opp)
729 struct device_list_opp *list_dev;
732 list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
736 /* Initialize list-dev */
738 list_add_rcu(&list_dev->node, &dev_opp->dev_list);
740 /* Create debugfs entries for the dev_opp */
741 ret = opp_debug_register(list_dev, dev_opp);
743 dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
750 * _add_device_opp() - Find device OPP table or allocate a new one
751 * @dev: device for which we do this operation
753 * It tries to find an existing table first, if it couldn't find one, it
754 * allocates a new OPP table and returns that.
756 * Return: valid device_opp pointer if success, else NULL.
758 static struct device_opp *_add_device_opp(struct device *dev)
760 struct device_opp *dev_opp;
761 struct device_list_opp *list_dev;
762 struct device_node *np;
765 /* Check for existing list for 'dev' first */
766 dev_opp = _find_device_opp(dev);
767 if (!IS_ERR(dev_opp))
771 * Allocate a new device OPP table. In the infrequent case where a new
772 * device is needed to be added, we pay this penalty.
774 dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL);
778 INIT_LIST_HEAD(&dev_opp->dev_list);
780 list_dev = _add_list_dev(dev, dev_opp);
787 * Only required for backward compatibility with v1 bindings, but isn't
788 * harmful for other cases. And so we do it unconditionally.
790 np = of_node_get(dev->of_node);
794 if (!of_property_read_u32(np, "clock-latency", &val))
795 dev_opp->clock_latency_ns_max = val;
796 of_property_read_u32(np, "voltage-tolerance",
797 &dev_opp->voltage_tolerance_v1);
801 /* Find clk for the device */
802 dev_opp->clk = clk_get(dev, NULL);
803 if (IS_ERR(dev_opp->clk)) {
804 ret = PTR_ERR(dev_opp->clk);
805 if (ret != -EPROBE_DEFER)
806 dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
810 srcu_init_notifier_head(&dev_opp->srcu_head);
811 INIT_LIST_HEAD(&dev_opp->opp_list);
813 /* Secure the device list modification */
814 list_add_rcu(&dev_opp->node, &dev_opp_list);
819 * _kfree_device_rcu() - Free device_opp RCU handler
822 static void _kfree_device_rcu(struct rcu_head *head)
824 struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
826 kfree_rcu(device_opp, rcu_head);
830 * _remove_device_opp() - Removes a device OPP table
831 * @dev_opp: device OPP table to be removed.
833 * Removes/frees device OPP table it it doesn't contain any OPPs.
835 static void _remove_device_opp(struct device_opp *dev_opp)
837 struct device_list_opp *list_dev;
839 if (!list_empty(&dev_opp->opp_list))
842 if (dev_opp->supported_hw)
845 if (dev_opp->prop_name)
848 if (!IS_ERR_OR_NULL(dev_opp->regulator))
852 if (!IS_ERR(dev_opp->clk))
853 clk_put(dev_opp->clk);
855 list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
858 _remove_list_dev(list_dev, dev_opp);
860 /* dev_list must be empty now */
861 WARN_ON(!list_empty(&dev_opp->dev_list));
863 list_del_rcu(&dev_opp->node);
864 call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
869 * _kfree_opp_rcu() - Free OPP RCU handler
872 static void _kfree_opp_rcu(struct rcu_head *head)
874 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
876 kfree_rcu(opp, rcu_head);
880 * _opp_remove() - Remove an OPP from a table definition
881 * @dev_opp: points back to the device_opp struct this opp belongs to
882 * @opp: pointer to the OPP to remove
883 * @notify: OPP_EVENT_REMOVE notification should be sent or not
885 * This function removes an opp definition from the opp list.
887 * Locking: The internal device_opp and opp structures are RCU protected.
888 * It is assumed that the caller holds required mutex for an RCU updater
891 static void _opp_remove(struct device_opp *dev_opp,
892 struct dev_pm_opp *opp, bool notify)
895 * Notify the changes in the availability of the operable
896 * frequency/voltage list.
899 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
900 opp_debug_remove_one(opp);
901 list_del_rcu(&opp->node);
902 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
904 _remove_device_opp(dev_opp);
908 * dev_pm_opp_remove() - Remove an OPP from OPP list
909 * @dev: device for which we do this operation
910 * @freq: OPP to remove with matching 'freq'
912 * This function removes an opp from the opp list.
914 * Locking: The internal device_opp and opp structures are RCU protected.
915 * Hence this function internally uses RCU updater strategy with mutex locks
916 * to keep the integrity of the internal data structures. Callers should ensure
917 * that this function is *NOT* called under RCU protection or in contexts where
918 * mutex cannot be locked.
920 void dev_pm_opp_remove(struct device *dev, unsigned long freq)
922 struct dev_pm_opp *opp;
923 struct device_opp *dev_opp;
926 /* Hold our list modification lock here */
927 mutex_lock(&dev_opp_list_lock);
929 dev_opp = _find_device_opp(dev);
933 list_for_each_entry(opp, &dev_opp->opp_list, node) {
934 if (opp->rate == freq) {
941 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
946 _opp_remove(dev_opp, opp, true);
948 mutex_unlock(&dev_opp_list_lock);
950 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
952 static struct dev_pm_opp *_allocate_opp(struct device *dev,
953 struct device_opp **dev_opp)
955 struct dev_pm_opp *opp;
957 /* allocate new OPP node */
958 opp = kzalloc(sizeof(*opp), GFP_KERNEL);
962 INIT_LIST_HEAD(&opp->node);
964 *dev_opp = _add_device_opp(dev);
973 static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
974 struct device_opp *dev_opp)
976 struct regulator *reg = dev_opp->regulator;
978 if (!IS_ERR_OR_NULL(reg) &&
979 !regulator_is_supported_voltage(reg, opp->u_volt_min,
981 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
982 __func__, opp->u_volt_min, opp->u_volt_max);
989 static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
990 struct device_opp *dev_opp)
992 struct dev_pm_opp *opp;
993 struct list_head *head = &dev_opp->opp_list;
997 * Insert new OPP in order of increasing frequency and discard if
1000 * Need to use &dev_opp->opp_list in the condition part of the 'for'
1001 * loop, don't replace it with head otherwise it will become an infinite
1004 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
1005 if (new_opp->rate > opp->rate) {
1010 if (new_opp->rate < opp->rate)
1013 /* Duplicate OPPs */
1014 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
1015 __func__, opp->rate, opp->u_volt, opp->available,
1016 new_opp->rate, new_opp->u_volt, new_opp->available);
1018 return opp->available && new_opp->u_volt == opp->u_volt ?
1022 new_opp->dev_opp = dev_opp;
1023 list_add_rcu(&new_opp->node, head);
1025 ret = opp_debug_create_one(new_opp, dev_opp);
1027 dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
1030 if (!_opp_supported_by_regulators(new_opp, dev_opp)) {
1031 new_opp->available = false;
1032 dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
1033 __func__, new_opp->rate);
1040 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
1041 * @dev: device for which we do this operation
1042 * @freq: Frequency in Hz for this OPP
1043 * @u_volt: Voltage in uVolts for this OPP
1044 * @dynamic: Dynamically added OPPs.
1046 * This function adds an opp definition to the opp list and returns status.
1047 * The opp is made available by default and it can be controlled using
1048 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
1050 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
1051 * and freed by dev_pm_opp_of_remove_table.
1053 * Locking: The internal device_opp and opp structures are RCU protected.
1054 * Hence this function internally uses RCU updater strategy with mutex locks
1055 * to keep the integrity of the internal data structures. Callers should ensure
1056 * that this function is *NOT* called under RCU protection or in contexts where
1057 * mutex cannot be locked.
1061 * Duplicate OPPs (both freq and volt are same) and opp->available
1062 * -EEXIST Freq are same and volt are different OR
1063 * Duplicate OPPs (both freq and volt are same) and !opp->available
1064 * -ENOMEM Memory allocation failure
1066 static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
1069 struct device_opp *dev_opp;
1070 struct dev_pm_opp *new_opp;
1074 /* Hold our list modification lock here */
1075 mutex_lock(&dev_opp_list_lock);
1077 new_opp = _allocate_opp(dev, &dev_opp);
1083 /* populate the opp table */
1084 new_opp->rate = freq;
1085 tol = u_volt * dev_opp->voltage_tolerance_v1 / 100;
1086 new_opp->u_volt = u_volt;
1087 new_opp->u_volt_min = u_volt - tol;
1088 new_opp->u_volt_max = u_volt + tol;
1089 new_opp->available = true;
1090 new_opp->dynamic = dynamic;
1092 ret = _opp_add(dev, new_opp, dev_opp);
1096 mutex_unlock(&dev_opp_list_lock);
1099 * Notify the changes in the availability of the operable
1100 * frequency/voltage list.
1102 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
1106 _opp_remove(dev_opp, new_opp, false);
1108 mutex_unlock(&dev_opp_list_lock);
1112 /* TODO: Support multiple regulators */
1113 static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
1114 struct device_opp *dev_opp)
1116 u32 microvolt[3] = {0};
1119 struct property *prop = NULL;
1120 char name[NAME_MAX];
1122 /* Search for "opp-microvolt-<name>" */
1123 if (dev_opp->prop_name) {
1124 snprintf(name, sizeof(name), "opp-microvolt-%s",
1125 dev_opp->prop_name);
1126 prop = of_find_property(opp->np, name, NULL);
1130 /* Search for "opp-microvolt" */
1131 sprintf(name, "opp-microvolt");
1132 prop = of_find_property(opp->np, name, NULL);
1134 /* Missing property isn't a problem, but an invalid entry is */
1139 count = of_property_count_u32_elems(opp->np, name);
1141 dev_err(dev, "%s: Invalid %s property (%d)\n",
1142 __func__, name, count);
1146 /* There can be one or three elements here */
1147 if (count != 1 && count != 3) {
1148 dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
1149 __func__, name, count);
1153 ret = of_property_read_u32_array(opp->np, name, microvolt, count);
1155 dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
1159 opp->u_volt = microvolt[0];
1162 opp->u_volt_min = opp->u_volt;
1163 opp->u_volt_max = opp->u_volt;
1165 opp->u_volt_min = microvolt[1];
1166 opp->u_volt_max = microvolt[2];
1169 /* Search for "opp-microamp-<name>" */
1171 if (dev_opp->prop_name) {
1172 snprintf(name, sizeof(name), "opp-microamp-%s",
1173 dev_opp->prop_name);
1174 prop = of_find_property(opp->np, name, NULL);
1178 /* Search for "opp-microamp" */
1179 sprintf(name, "opp-microamp");
1180 prop = of_find_property(opp->np, name, NULL);
1183 if (prop && !of_property_read_u32(opp->np, name, &val))
1190 * dev_pm_opp_set_supported_hw() - Set supported platforms
1191 * @dev: Device for which supported-hw has to be set.
1192 * @versions: Array of hierarchy of versions to match.
1193 * @count: Number of elements in the array.
1195 * This is required only for the V2 bindings, and it enables a platform to
1196 * specify the hierarchy of versions it supports. OPP layer will then enable
1197 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
1200 * Locking: The internal device_opp and opp structures are RCU protected.
1201 * Hence this function internally uses RCU updater strategy with mutex locks
1202 * to keep the integrity of the internal data structures. Callers should ensure
1203 * that this function is *NOT* called under RCU protection or in contexts where
1204 * mutex cannot be locked.
1206 int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
1209 struct device_opp *dev_opp;
1212 /* Hold our list modification lock here */
1213 mutex_lock(&dev_opp_list_lock);
1215 dev_opp = _add_device_opp(dev);
1221 /* Make sure there are no concurrent readers while updating dev_opp */
1222 WARN_ON(!list_empty(&dev_opp->opp_list));
1224 /* Do we already have a version hierarchy associated with dev_opp? */
1225 if (dev_opp->supported_hw) {
1226 dev_err(dev, "%s: Already have supported hardware list\n",
1232 dev_opp->supported_hw = kmemdup(versions, count * sizeof(*versions),
1234 if (!dev_opp->supported_hw) {
1239 dev_opp->supported_hw_count = count;
1240 mutex_unlock(&dev_opp_list_lock);
1244 _remove_device_opp(dev_opp);
1246 mutex_unlock(&dev_opp_list_lock);
1250 EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
1253 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
1254 * @dev: Device for which supported-hw has to be set.
1256 * This is required only for the V2 bindings, and is called for a matching
1257 * dev_pm_opp_set_supported_hw(). Until this is called, the device_opp structure
1258 * will not be freed.
1260 * Locking: The internal device_opp and opp structures are RCU protected.
1261 * Hence this function internally uses RCU updater strategy with mutex locks
1262 * to keep the integrity of the internal data structures. Callers should ensure
1263 * that this function is *NOT* called under RCU protection or in contexts where
1264 * mutex cannot be locked.
1266 void dev_pm_opp_put_supported_hw(struct device *dev)
1268 struct device_opp *dev_opp;
1270 /* Hold our list modification lock here */
1271 mutex_lock(&dev_opp_list_lock);
1273 /* Check for existing list for 'dev' first */
1274 dev_opp = _find_device_opp(dev);
1275 if (IS_ERR(dev_opp)) {
1276 dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
1280 /* Make sure there are no concurrent readers while updating dev_opp */
1281 WARN_ON(!list_empty(&dev_opp->opp_list));
1283 if (!dev_opp->supported_hw) {
1284 dev_err(dev, "%s: Doesn't have supported hardware list\n",
1289 kfree(dev_opp->supported_hw);
1290 dev_opp->supported_hw = NULL;
1291 dev_opp->supported_hw_count = 0;
1293 /* Try freeing device_opp if this was the last blocking resource */
1294 _remove_device_opp(dev_opp);
1297 mutex_unlock(&dev_opp_list_lock);
1299 EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
1302 * dev_pm_opp_set_prop_name() - Set prop-extn name
1303 * @dev: Device for which the regulator has to be set.
1304 * @name: name to postfix to properties.
1306 * This is required only for the V2 bindings, and it enables a platform to
1307 * specify the extn to be used for certain property names. The properties to
1308 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
1309 * should postfix the property name with -<name> while looking for them.
1311 * Locking: The internal device_opp and opp structures are RCU protected.
1312 * Hence this function internally uses RCU updater strategy with mutex locks
1313 * to keep the integrity of the internal data structures. Callers should ensure
1314 * that this function is *NOT* called under RCU protection or in contexts where
1315 * mutex cannot be locked.
1317 int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
1319 struct device_opp *dev_opp;
1322 /* Hold our list modification lock here */
1323 mutex_lock(&dev_opp_list_lock);
1325 dev_opp = _add_device_opp(dev);
1331 /* Make sure there are no concurrent readers while updating dev_opp */
1332 WARN_ON(!list_empty(&dev_opp->opp_list));
1334 /* Do we already have a prop-name associated with dev_opp? */
1335 if (dev_opp->prop_name) {
1336 dev_err(dev, "%s: Already have prop-name %s\n", __func__,
1337 dev_opp->prop_name);
1342 dev_opp->prop_name = kstrdup(name, GFP_KERNEL);
1343 if (!dev_opp->prop_name) {
1348 mutex_unlock(&dev_opp_list_lock);
1352 _remove_device_opp(dev_opp);
1354 mutex_unlock(&dev_opp_list_lock);
1358 EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
1361 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
1362 * @dev: Device for which the regulator has to be set.
1364 * This is required only for the V2 bindings, and is called for a matching
1365 * dev_pm_opp_set_prop_name(). Until this is called, the device_opp structure
1366 * will not be freed.
1368 * Locking: The internal device_opp and opp structures are RCU protected.
1369 * Hence this function internally uses RCU updater strategy with mutex locks
1370 * to keep the integrity of the internal data structures. Callers should ensure
1371 * that this function is *NOT* called under RCU protection or in contexts where
1372 * mutex cannot be locked.
1374 void dev_pm_opp_put_prop_name(struct device *dev)
1376 struct device_opp *dev_opp;
1378 /* Hold our list modification lock here */
1379 mutex_lock(&dev_opp_list_lock);
1381 /* Check for existing list for 'dev' first */
1382 dev_opp = _find_device_opp(dev);
1383 if (IS_ERR(dev_opp)) {
1384 dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
1388 /* Make sure there are no concurrent readers while updating dev_opp */
1389 WARN_ON(!list_empty(&dev_opp->opp_list));
1391 if (!dev_opp->prop_name) {
1392 dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
1396 kfree(dev_opp->prop_name);
1397 dev_opp->prop_name = NULL;
1399 /* Try freeing device_opp if this was the last blocking resource */
1400 _remove_device_opp(dev_opp);
1403 mutex_unlock(&dev_opp_list_lock);
1405 EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
1408 * dev_pm_opp_set_regulator() - Set regulator name for the device
1409 * @dev: Device for which regulator name is being set.
1410 * @name: Name of the regulator.
1412 * In order to support OPP switching, OPP layer needs to know the name of the
1413 * device's regulator, as the core would be required to switch voltages as well.
1415 * This must be called before any OPPs are initialized for the device.
1417 * Locking: The internal device_opp and opp structures are RCU protected.
1418 * Hence this function internally uses RCU updater strategy with mutex locks
1419 * to keep the integrity of the internal data structures. Callers should ensure
1420 * that this function is *NOT* called under RCU protection or in contexts where
1421 * mutex cannot be locked.
1423 int dev_pm_opp_set_regulator(struct device *dev, const char *name)
1425 struct device_opp *dev_opp;
1426 struct regulator *reg;
1429 mutex_lock(&dev_opp_list_lock);
1431 dev_opp = _add_device_opp(dev);
1437 /* This should be called before OPPs are initialized */
1438 if (WARN_ON(!list_empty(&dev_opp->opp_list))) {
1443 /* Already have a regulator set */
1444 if (WARN_ON(!IS_ERR_OR_NULL(dev_opp->regulator))) {
1448 /* Allocate the regulator */
1449 reg = regulator_get_optional(dev, name);
1452 if (ret != -EPROBE_DEFER)
1453 dev_err(dev, "%s: no regulator (%s) found: %d\n",
1454 __func__, name, ret);
1458 dev_opp->regulator = reg;
1460 mutex_unlock(&dev_opp_list_lock);
1464 _remove_device_opp(dev_opp);
1466 mutex_unlock(&dev_opp_list_lock);
1470 EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
1473 * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
1474 * @dev: Device for which regulator was set.
1476 * Locking: The internal device_opp and opp structures are RCU protected.
1477 * Hence this function internally uses RCU updater strategy with mutex locks
1478 * to keep the integrity of the internal data structures. Callers should ensure
1479 * that this function is *NOT* called under RCU protection or in contexts where
1480 * mutex cannot be locked.
1482 void dev_pm_opp_put_regulator(struct device *dev)
1484 struct device_opp *dev_opp;
1486 mutex_lock(&dev_opp_list_lock);
1488 /* Check for existing list for 'dev' first */
1489 dev_opp = _find_device_opp(dev);
1490 if (IS_ERR(dev_opp)) {
1491 dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
1495 if (IS_ERR_OR_NULL(dev_opp->regulator)) {
1496 dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
1500 /* Make sure there are no concurrent readers while updating dev_opp */
1501 WARN_ON(!list_empty(&dev_opp->opp_list));
1503 regulator_put(dev_opp->regulator);
1504 dev_opp->regulator = ERR_PTR(-EINVAL);
1506 /* Try freeing device_opp if this was the last blocking resource */
1507 _remove_device_opp(dev_opp);
1510 mutex_unlock(&dev_opp_list_lock);
1512 EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
1514 static bool _opp_is_supported(struct device *dev, struct device_opp *dev_opp,
1515 struct device_node *np)
1517 unsigned int count = dev_opp->supported_hw_count;
1521 if (!dev_opp->supported_hw)
1525 ret = of_property_read_u32_index(np, "opp-supported-hw", count,
1528 dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
1529 __func__, count, ret);
1533 /* Both of these are bitwise masks of the versions */
1534 if (!(version & dev_opp->supported_hw[count]))
1542 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
1543 * @dev: device for which we do this operation
1546 * This function adds an opp definition to the opp list and returns status. The
1547 * opp can be controlled using dev_pm_opp_enable/disable functions and may be
1548 * removed by dev_pm_opp_remove.
1550 * Locking: The internal device_opp and opp structures are RCU protected.
1551 * Hence this function internally uses RCU updater strategy with mutex locks
1552 * to keep the integrity of the internal data structures. Callers should ensure
1553 * that this function is *NOT* called under RCU protection or in contexts where
1554 * mutex cannot be locked.
1558 * Duplicate OPPs (both freq and volt are same) and opp->available
1559 * -EEXIST Freq are same and volt are different OR
1560 * Duplicate OPPs (both freq and volt are same) and !opp->available
1561 * -ENOMEM Memory allocation failure
1562 * -EINVAL Failed parsing the OPP node
1564 static int _opp_add_static_v2(struct device *dev, struct device_node *np)
1566 struct device_opp *dev_opp;
1567 struct dev_pm_opp *new_opp;
1572 /* Hold our list modification lock here */
1573 mutex_lock(&dev_opp_list_lock);
1575 new_opp = _allocate_opp(dev, &dev_opp);
1581 ret = of_property_read_u64(np, "opp-hz", &rate);
1583 dev_err(dev, "%s: opp-hz not found\n", __func__);
1587 /* Check if the OPP supports hardware's hierarchy of versions or not */
1588 if (!_opp_is_supported(dev, dev_opp, np)) {
1589 dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
1594 * Rate is defined as an unsigned long in clk API, and so casting
1595 * explicitly to its type. Must be fixed once rate is 64 bit
1596 * guaranteed in clk API.
1598 new_opp->rate = (unsigned long)rate;
1599 new_opp->turbo = of_property_read_bool(np, "turbo-mode");
1602 new_opp->dynamic = false;
1603 new_opp->available = true;
1605 if (!of_property_read_u32(np, "clock-latency-ns", &val))
1606 new_opp->clock_latency_ns = val;
1608 ret = opp_parse_supplies(new_opp, dev, dev_opp);
1612 ret = _opp_add(dev, new_opp, dev_opp);
1616 /* OPP to select on device suspend */
1617 if (of_property_read_bool(np, "opp-suspend")) {
1618 if (dev_opp->suspend_opp) {
1619 dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
1620 __func__, dev_opp->suspend_opp->rate,
1623 new_opp->suspend = true;
1624 dev_opp->suspend_opp = new_opp;
1628 if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
1629 dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
1631 mutex_unlock(&dev_opp_list_lock);
1633 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
1634 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
1635 new_opp->u_volt_min, new_opp->u_volt_max,
1636 new_opp->clock_latency_ns);
1639 * Notify the changes in the availability of the operable
1640 * frequency/voltage list.
1642 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
1646 _opp_remove(dev_opp, new_opp, false);
1648 mutex_unlock(&dev_opp_list_lock);
1653 * dev_pm_opp_add() - Add an OPP table from a table definitions
1654 * @dev: device for which we do this operation
1655 * @freq: Frequency in Hz for this OPP
1656 * @u_volt: Voltage in uVolts for this OPP
1658 * This function adds an opp definition to the opp list and returns status.
1659 * The opp is made available by default and it can be controlled using
1660 * dev_pm_opp_enable/disable functions.
1662 * Locking: The internal device_opp and opp structures are RCU protected.
1663 * Hence this function internally uses RCU updater strategy with mutex locks
1664 * to keep the integrity of the internal data structures. Callers should ensure
1665 * that this function is *NOT* called under RCU protection or in contexts where
1666 * mutex cannot be locked.
1670 * Duplicate OPPs (both freq and volt are same) and opp->available
1671 * -EEXIST Freq are same and volt are different OR
1672 * Duplicate OPPs (both freq and volt are same) and !opp->available
1673 * -ENOMEM Memory allocation failure
1675 int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
1677 return _opp_add_v1(dev, freq, u_volt, true);
1679 EXPORT_SYMBOL_GPL(dev_pm_opp_add);
1682 * _opp_set_availability() - helper to set the availability of an opp
1683 * @dev: device for which we do this operation
1684 * @freq: OPP frequency to modify availability
1685 * @availability_req: availability status requested for this opp
1687 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
1688 * share a common logic which is isolated here.
1690 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1691 * copy operation, returns 0 if no modification was done OR modification was
1694 * Locking: The internal device_opp and opp structures are RCU protected.
1695 * Hence this function internally uses RCU updater strategy with mutex locks to
1696 * keep the integrity of the internal data structures. Callers should ensure
1697 * that this function is *NOT* called under RCU protection or in contexts where
1698 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1700 static int _opp_set_availability(struct device *dev, unsigned long freq,
1701 bool availability_req)
1703 struct device_opp *dev_opp;
1704 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
1707 /* keep the node allocated */
1708 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
1712 mutex_lock(&dev_opp_list_lock);
1714 /* Find the device_opp */
1715 dev_opp = _find_device_opp(dev);
1716 if (IS_ERR(dev_opp)) {
1717 r = PTR_ERR(dev_opp);
1718 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
1722 /* Do we have the frequency? */
1723 list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
1724 if (tmp_opp->rate == freq) {
1734 /* Is update really needed? */
1735 if (opp->available == availability_req)
1737 /* copy the old data over */
1740 /* plug in new node */
1741 new_opp->available = availability_req;
1743 list_replace_rcu(&opp->node, &new_opp->node);
1744 mutex_unlock(&dev_opp_list_lock);
1745 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
1747 /* Notify the change of the OPP availability */
1748 if (availability_req)
1749 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE,
1752 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE,
1758 mutex_unlock(&dev_opp_list_lock);
1764 * dev_pm_opp_enable() - Enable a specific OPP
1765 * @dev: device for which we do this operation
1766 * @freq: OPP frequency to enable
1768 * Enables a provided opp. If the operation is valid, this returns 0, else the
1769 * corresponding error value. It is meant to be used for users an OPP available
1770 * after being temporarily made unavailable with dev_pm_opp_disable.
1772 * Locking: The internal device_opp and opp structures are RCU protected.
1773 * Hence this function indirectly uses RCU and mutex locks to keep the
1774 * integrity of the internal data structures. Callers should ensure that
1775 * this function is *NOT* called under RCU protection or in contexts where
1776 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1778 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1779 * copy operation, returns 0 if no modification was done OR modification was
1782 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
1784 return _opp_set_availability(dev, freq, true);
1786 EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
1789 * dev_pm_opp_disable() - Disable a specific OPP
1790 * @dev: device for which we do this operation
1791 * @freq: OPP frequency to disable
1793 * Disables a provided opp. If the operation is valid, this returns
1794 * 0, else the corresponding error value. It is meant to be a temporary
1795 * control by users to make this OPP not available until the circumstances are
1796 * right to make it available again (with a call to dev_pm_opp_enable).
1798 * Locking: The internal device_opp and opp structures are RCU protected.
1799 * Hence this function indirectly uses RCU and mutex locks to keep the
1800 * integrity of the internal data structures. Callers should ensure that
1801 * this function is *NOT* called under RCU protection or in contexts where
1802 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1804 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1805 * copy operation, returns 0 if no modification was done OR modification was
1808 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
1810 return _opp_set_availability(dev, freq, false);
1812 EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
1815 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
1816 * @dev: device pointer used to lookup device OPPs.
1818 * Return: pointer to notifier head if found, otherwise -ENODEV or
1819 * -EINVAL based on type of error casted as pointer. value must be checked
1820 * with IS_ERR to determine valid pointer or error result.
1822 * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
1823 * protected pointer. The reason for the same is that the opp pointer which is
1824 * returned will remain valid for use with opp_get_{voltage, freq} only while
1825 * under the locked area. The pointer returned must be used prior to unlocking
1826 * with rcu_read_unlock() to maintain the integrity of the pointer.
1828 struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
1830 struct device_opp *dev_opp = _find_device_opp(dev);
1832 if (IS_ERR(dev_opp))
1833 return ERR_CAST(dev_opp); /* matching type */
1835 return &dev_opp->srcu_head;
1837 EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
1841 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
1843 * @dev: device pointer used to lookup device OPPs.
1845 * Free OPPs created using static entries present in DT.
1847 * Locking: The internal device_opp and opp structures are RCU protected.
1848 * Hence this function indirectly uses RCU updater strategy with mutex locks
1849 * to keep the integrity of the internal data structures. Callers should ensure
1850 * that this function is *NOT* called under RCU protection or in contexts where
1851 * mutex cannot be locked.
1853 void dev_pm_opp_of_remove_table(struct device *dev)
1855 struct device_opp *dev_opp;
1856 struct dev_pm_opp *opp, *tmp;
1858 /* Hold our list modification lock here */
1859 mutex_lock(&dev_opp_list_lock);
1861 /* Check for existing list for 'dev' */
1862 dev_opp = _find_device_opp(dev);
1863 if (IS_ERR(dev_opp)) {
1864 int error = PTR_ERR(dev_opp);
1866 if (error != -ENODEV)
1867 WARN(1, "%s: dev_opp: %d\n",
1868 IS_ERR_OR_NULL(dev) ?
1869 "Invalid device" : dev_name(dev),
1874 /* Find if dev_opp manages a single device */
1875 if (list_is_singular(&dev_opp->dev_list)) {
1876 /* Free static OPPs */
1877 list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
1879 _opp_remove(dev_opp, opp, true);
1882 _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
1886 mutex_unlock(&dev_opp_list_lock);
1888 EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
1890 /* Returns opp descriptor node for a device, caller must do of_node_put() */
1891 struct device_node *_of_get_opp_desc_node(struct device *dev)
1894 * TODO: Support for multiple OPP tables.
1896 * There should be only ONE phandle present in "operating-points-v2"
1900 return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
1903 /* Initializes OPP tables based on new bindings */
1904 static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
1906 struct device_node *np;
1907 struct device_opp *dev_opp;
1908 int ret = 0, count = 0;
1910 mutex_lock(&dev_opp_list_lock);
1912 dev_opp = _managed_opp(opp_np);
1914 /* OPPs are already managed */
1915 if (!_add_list_dev(dev, dev_opp))
1917 mutex_unlock(&dev_opp_list_lock);
1920 mutex_unlock(&dev_opp_list_lock);
1922 /* We have opp-list node now, iterate over it and add OPPs */
1923 for_each_available_child_of_node(opp_np, np) {
1926 ret = _opp_add_static_v2(dev, np);
1928 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
1934 /* There should be one of more OPP defined */
1935 if (WARN_ON(!count))
1938 mutex_lock(&dev_opp_list_lock);
1940 dev_opp = _find_device_opp(dev);
1941 if (WARN_ON(IS_ERR(dev_opp))) {
1942 ret = PTR_ERR(dev_opp);
1943 mutex_unlock(&dev_opp_list_lock);
1947 dev_opp->np = opp_np;
1948 dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
1950 mutex_unlock(&dev_opp_list_lock);
1955 dev_pm_opp_of_remove_table(dev);
1960 /* Initializes OPP tables based on old-deprecated bindings */
1961 static int _of_add_opp_table_v1(struct device *dev)
1963 const struct property *prop;
1967 prop = of_find_property(dev->of_node, "operating-points", NULL);
1974 * Each OPP is a set of tuples consisting of frequency and
1975 * voltage like <freq-kHz vol-uV>.
1977 nr = prop->length / sizeof(u32);
1979 dev_err(dev, "%s: Invalid OPP list\n", __func__);
1985 unsigned long freq = be32_to_cpup(val++) * 1000;
1986 unsigned long volt = be32_to_cpup(val++);
1988 if (_opp_add_v1(dev, freq, volt, false))
1989 dev_warn(dev, "%s: Failed to add OPP %ld\n",
1998 * dev_pm_opp_of_add_table() - Initialize opp table from device tree
1999 * @dev: device pointer used to lookup device OPPs.
2001 * Register the initial OPP table with the OPP library for given device.
2003 * Locking: The internal device_opp and opp structures are RCU protected.
2004 * Hence this function indirectly uses RCU updater strategy with mutex locks
2005 * to keep the integrity of the internal data structures. Callers should ensure
2006 * that this function is *NOT* called under RCU protection or in contexts where
2007 * mutex cannot be locked.
2011 * Duplicate OPPs (both freq and volt are same) and opp->available
2012 * -EEXIST Freq are same and volt are different OR
2013 * Duplicate OPPs (both freq and volt are same) and !opp->available
2014 * -ENOMEM Memory allocation failure
2015 * -ENODEV when 'operating-points' property is not found or is invalid data
2017 * -ENODATA when empty 'operating-points' property is found
2018 * -EINVAL when invalid entries are found in opp-v2 table
2020 int dev_pm_opp_of_add_table(struct device *dev)
2022 struct device_node *opp_np;
2026 * OPPs have two version of bindings now. The older one is deprecated,
2027 * try for the new binding first.
2029 opp_np = _of_get_opp_desc_node(dev);
2032 * Try old-deprecated bindings for backward compatibility with
2035 return _of_add_opp_table_v1(dev);
2038 ret = _of_add_opp_table_v2(dev, opp_np);
2039 of_node_put(opp_np);
2043 EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);