2 * Generic OPP Interface
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/clk.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/slab.h>
20 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/regulator/consumer.h>
28 * The root of the list of all devices. All device_opp structures branch off
29 * from here, with each device_opp containing the list of opp it supports in
30 * various states of availability.
32 static LIST_HEAD(dev_opp_list);
33 /* Lock to allow exclusive modification to the device and opp lists */
34 DEFINE_MUTEX(dev_opp_list_lock);
36 #define opp_rcu_lockdep_assert() \
38 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
39 !lockdep_is_held(&dev_opp_list_lock), \
40 "Missing rcu_read_lock() or " \
41 "dev_opp_list_lock protection"); \
44 static struct device_list_opp *_find_list_dev(const struct device *dev,
45 struct device_opp *dev_opp)
47 struct device_list_opp *list_dev;
49 list_for_each_entry(list_dev, &dev_opp->dev_list, node)
50 if (list_dev->dev == dev)
56 static struct device_opp *_managed_opp(const struct device_node *np)
58 struct device_opp *dev_opp;
60 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
61 if (dev_opp->np == np) {
63 * Multiple devices can point to the same OPP table and
64 * so will have same node-pointer, np.
66 * But the OPPs will be considered as shared only if the
67 * OPP table contains a "opp-shared" property.
69 return dev_opp->shared_opp ? dev_opp : NULL;
77 * _find_device_opp() - find device_opp struct using device pointer
78 * @dev: device pointer used to lookup device OPPs
80 * Search list of device OPPs for one containing matching device. Does a RCU
81 * reader operation to grab the pointer needed.
83 * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
84 * -EINVAL based on type of error.
86 * Locking: For readers, this function must be called under rcu_read_lock().
87 * device_opp is a RCU protected pointer, which means that device_opp is valid
88 * as long as we are under RCU lock.
90 * For Writers, this function must be called with dev_opp_list_lock held.
92 struct device_opp *_find_device_opp(struct device *dev)
94 struct device_opp *dev_opp;
96 opp_rcu_lockdep_assert();
98 if (IS_ERR_OR_NULL(dev)) {
99 pr_err("%s: Invalid parameters\n", __func__);
100 return ERR_PTR(-EINVAL);
103 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
104 if (_find_list_dev(dev, dev_opp))
107 return ERR_PTR(-ENODEV);
111 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
112 * @opp: opp for which voltage has to be returned for
114 * Return: voltage in micro volt corresponding to the opp, else
117 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
118 * protected pointer. This means that opp which could have been fetched by
119 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
120 * under RCU lock. The pointer returned by the opp_find_freq family must be
121 * used in the same section as the usage of this function with the pointer
122 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
125 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
127 struct dev_pm_opp *tmp_opp;
130 opp_rcu_lockdep_assert();
132 tmp_opp = rcu_dereference(opp);
133 if (IS_ERR_OR_NULL(tmp_opp))
134 pr_err("%s: Invalid parameters\n", __func__);
140 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
143 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
144 * @opp: opp for which frequency has to be returned for
146 * Return: frequency in hertz corresponding to the opp, else
149 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
150 * protected pointer. This means that opp which could have been fetched by
151 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
152 * under RCU lock. The pointer returned by the opp_find_freq family must be
153 * used in the same section as the usage of this function with the pointer
154 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
157 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
159 struct dev_pm_opp *tmp_opp;
162 opp_rcu_lockdep_assert();
164 tmp_opp = rcu_dereference(opp);
165 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
166 pr_err("%s: Invalid parameters\n", __func__);
172 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
175 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
176 * @opp: opp for which turbo mode is being verified
178 * Turbo OPPs are not for normal use, and can be enabled (under certain
179 * conditions) for short duration of times to finish high throughput work
180 * quickly. Running on them for longer times may overheat the chip.
182 * Return: true if opp is turbo opp, else false.
184 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
185 * protected pointer. This means that opp which could have been fetched by
186 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
187 * under RCU lock. The pointer returned by the opp_find_freq family must be
188 * used in the same section as the usage of this function with the pointer
189 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
192 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
194 struct dev_pm_opp *tmp_opp;
196 opp_rcu_lockdep_assert();
198 tmp_opp = rcu_dereference(opp);
199 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
200 pr_err("%s: Invalid parameters\n", __func__);
204 return tmp_opp->turbo;
206 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
209 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
210 * @dev: device for which we do this operation
212 * Return: This function returns the max clock latency in nanoseconds.
214 * Locking: This function takes rcu_read_lock().
216 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
218 struct device_opp *dev_opp;
219 unsigned long clock_latency_ns;
223 dev_opp = _find_device_opp(dev);
225 clock_latency_ns = 0;
227 clock_latency_ns = dev_opp->clock_latency_ns_max;
230 return clock_latency_ns;
232 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
235 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
236 * @dev: device for which we do this operation
238 * Return: This function returns the max voltage latency in nanoseconds.
240 * Locking: This function takes rcu_read_lock().
242 unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
244 struct device_opp *dev_opp;
245 struct dev_pm_opp *opp;
246 struct regulator *reg;
247 unsigned long latency_ns = 0;
248 unsigned long min_uV = ~0, max_uV = 0;
253 dev_opp = _find_device_opp(dev);
254 if (IS_ERR(dev_opp)) {
259 reg = dev_opp->regulator;
260 if (IS_ERR_OR_NULL(reg)) {
261 /* Regulator may not be required for device */
263 dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
269 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
273 if (opp->u_volt_min < min_uV)
274 min_uV = opp->u_volt_min;
275 if (opp->u_volt_max > max_uV)
276 max_uV = opp->u_volt_max;
282 * The caller needs to ensure that dev_opp (and hence the regulator)
283 * isn't freed, while we are executing this routine.
285 ret = regulator_set_voltage_time(reg, min_uV, max_uV);
287 latency_ns = ret * 1000;
291 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
294 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
296 * @dev: device for which we do this operation
298 * Return: This function returns the max transition latency, in nanoseconds, to
299 * switch from one OPP to other.
301 * Locking: This function takes rcu_read_lock().
303 unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
305 return dev_pm_opp_get_max_volt_latency(dev) +
306 dev_pm_opp_get_max_clock_latency(dev);
308 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
311 * dev_pm_opp_get_suspend_opp() - Get suspend opp
312 * @dev: device for which we do this operation
314 * Return: This function returns pointer to the suspend opp if it is
315 * defined and available, otherwise it returns NULL.
317 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
318 * protected pointer. The reason for the same is that the opp pointer which is
319 * returned will remain valid for use with opp_get_{voltage, freq} only while
320 * under the locked area. The pointer returned must be used prior to unlocking
321 * with rcu_read_unlock() to maintain the integrity of the pointer.
323 struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
325 struct device_opp *dev_opp;
327 opp_rcu_lockdep_assert();
329 dev_opp = _find_device_opp(dev);
330 if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
331 !dev_opp->suspend_opp->available)
334 return dev_opp->suspend_opp;
336 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
339 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
340 * @dev: device for which we do this operation
342 * Return: This function returns the number of available opps if there are any,
343 * else returns 0 if none or the corresponding error value.
345 * Locking: This function takes rcu_read_lock().
347 int dev_pm_opp_get_opp_count(struct device *dev)
349 struct device_opp *dev_opp;
350 struct dev_pm_opp *temp_opp;
355 dev_opp = _find_device_opp(dev);
356 if (IS_ERR(dev_opp)) {
357 count = PTR_ERR(dev_opp);
358 dev_err(dev, "%s: device OPP not found (%d)\n",
363 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
364 if (temp_opp->available)
372 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
375 * dev_pm_opp_find_freq_exact() - search for an exact frequency
376 * @dev: device for which we do this operation
377 * @freq: frequency to search for
378 * @available: true/false - match for available opp
380 * Return: Searches for exact match in the opp list and returns pointer to the
381 * matching opp if found, else returns ERR_PTR in case of error and should
382 * be handled using IS_ERR. Error return values can be:
383 * EINVAL: for bad pointer
384 * ERANGE: no match found for search
385 * ENODEV: if device not found in list of registered devices
387 * Note: available is a modifier for the search. if available=true, then the
388 * match is for exact matching frequency and is available in the stored OPP
389 * table. if false, the match is for exact frequency which is not available.
391 * This provides a mechanism to enable an opp which is not available currently
392 * or the opposite as well.
394 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
395 * protected pointer. The reason for the same is that the opp pointer which is
396 * returned will remain valid for use with opp_get_{voltage, freq} only while
397 * under the locked area. The pointer returned must be used prior to unlocking
398 * with rcu_read_unlock() to maintain the integrity of the pointer.
400 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
404 struct device_opp *dev_opp;
405 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
407 opp_rcu_lockdep_assert();
409 dev_opp = _find_device_opp(dev);
410 if (IS_ERR(dev_opp)) {
411 int r = PTR_ERR(dev_opp);
412 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
416 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
417 if (temp_opp->available == available &&
418 temp_opp->rate == freq) {
426 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
429 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
430 * @dev: device for which we do this operation
431 * @freq: Start frequency
433 * Search for the matching ceil *available* OPP from a starting freq
436 * Return: matching *opp and refreshes *freq accordingly, else returns
437 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
439 * EINVAL: for bad pointer
440 * ERANGE: no match found for search
441 * ENODEV: if device not found in list of registered devices
443 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
444 * protected pointer. The reason for the same is that the opp pointer which is
445 * returned will remain valid for use with opp_get_{voltage, freq} only while
446 * under the locked area. The pointer returned must be used prior to unlocking
447 * with rcu_read_unlock() to maintain the integrity of the pointer.
449 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
452 struct device_opp *dev_opp;
453 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
455 opp_rcu_lockdep_assert();
458 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
459 return ERR_PTR(-EINVAL);
462 dev_opp = _find_device_opp(dev);
464 return ERR_CAST(dev_opp);
466 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
467 if (temp_opp->available && temp_opp->rate >= *freq) {
476 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
479 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
480 * @dev: device for which we do this operation
481 * @freq: Start frequency
483 * Search for the matching floor *available* OPP from a starting freq
486 * Return: matching *opp and refreshes *freq accordingly, else returns
487 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
489 * EINVAL: for bad pointer
490 * ERANGE: no match found for search
491 * ENODEV: if device not found in list of registered devices
493 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
494 * protected pointer. The reason for the same is that the opp pointer which is
495 * returned will remain valid for use with opp_get_{voltage, freq} only while
496 * under the locked area. The pointer returned must be used prior to unlocking
497 * with rcu_read_unlock() to maintain the integrity of the pointer.
499 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
502 struct device_opp *dev_opp;
503 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
505 opp_rcu_lockdep_assert();
508 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
509 return ERR_PTR(-EINVAL);
512 dev_opp = _find_device_opp(dev);
514 return ERR_CAST(dev_opp);
516 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
517 if (temp_opp->available) {
518 /* go to the next node, before choosing prev */
519 if (temp_opp->rate > *freq)
530 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
533 * The caller needs to ensure that device_opp (and hence the clk) isn't freed,
534 * while clk returned here is used.
536 static struct clk *_get_opp_clk(struct device *dev)
538 struct device_opp *dev_opp;
543 dev_opp = _find_device_opp(dev);
544 if (IS_ERR(dev_opp)) {
545 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
546 clk = ERR_CAST(dev_opp);
552 dev_err(dev, "%s: No clock available for the device\n",
560 static int _set_opp_voltage(struct device *dev, struct regulator *reg,
561 unsigned long u_volt, unsigned long u_volt_min,
562 unsigned long u_volt_max)
566 /* Regulator not available for device */
568 dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
573 dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
576 ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
579 dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
580 __func__, u_volt_min, u_volt, u_volt_max, ret);
586 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
587 * @dev: device for which we do this operation
588 * @target_freq: frequency to achieve
590 * This configures the power-supplies and clock source to the levels specified
591 * by the OPP corresponding to the target_freq.
593 * Locking: This function takes rcu_read_lock().
595 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
597 struct device_opp *dev_opp;
598 struct dev_pm_opp *old_opp, *opp;
599 struct regulator *reg;
601 unsigned long freq, old_freq;
602 unsigned long u_volt, u_volt_min, u_volt_max;
603 unsigned long ou_volt, ou_volt_min, ou_volt_max;
606 if (unlikely(!target_freq)) {
607 dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
612 clk = _get_opp_clk(dev);
616 freq = clk_round_rate(clk, target_freq);
620 old_freq = clk_get_rate(clk);
622 /* Return early if nothing to do */
623 if (old_freq == freq) {
624 dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
631 dev_opp = _find_device_opp(dev);
632 if (IS_ERR(dev_opp)) {
633 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
635 return PTR_ERR(dev_opp);
638 old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq);
639 if (!IS_ERR(old_opp)) {
640 ou_volt = old_opp->u_volt;
641 ou_volt_min = old_opp->u_volt_min;
642 ou_volt_max = old_opp->u_volt_max;
644 dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
645 __func__, old_freq, PTR_ERR(old_opp));
648 opp = dev_pm_opp_find_freq_ceil(dev, &freq);
651 dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
652 __func__, freq, ret);
657 u_volt = opp->u_volt;
658 u_volt_min = opp->u_volt_min;
659 u_volt_max = opp->u_volt_max;
661 reg = dev_opp->regulator;
665 /* Scaling up? Scale voltage before frequency */
666 if (freq > old_freq) {
667 ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
670 goto restore_voltage;
673 /* Change frequency */
675 dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
676 __func__, old_freq, freq);
678 ret = clk_set_rate(clk, freq);
680 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
682 goto restore_voltage;
685 /* Scaling down? Scale voltage after frequency */
686 if (freq < old_freq) {
687 ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
696 if (clk_set_rate(clk, old_freq))
697 dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
700 /* This shouldn't harm even if the voltages weren't updated earlier */
701 if (!IS_ERR(old_opp))
702 _set_opp_voltage(dev, reg, ou_volt, ou_volt_min, ou_volt_max);
706 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
708 /* List-dev Helpers */
709 static void _kfree_list_dev_rcu(struct rcu_head *head)
711 struct device_list_opp *list_dev;
713 list_dev = container_of(head, struct device_list_opp, rcu_head);
714 kfree_rcu(list_dev, rcu_head);
717 static void _remove_list_dev(struct device_list_opp *list_dev,
718 struct device_opp *dev_opp)
720 opp_debug_unregister(list_dev, dev_opp);
721 list_del(&list_dev->node);
722 call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
723 _kfree_list_dev_rcu);
726 struct device_list_opp *_add_list_dev(const struct device *dev,
727 struct device_opp *dev_opp)
729 struct device_list_opp *list_dev;
732 list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
736 /* Initialize list-dev */
738 list_add_rcu(&list_dev->node, &dev_opp->dev_list);
740 /* Create debugfs entries for the dev_opp */
741 ret = opp_debug_register(list_dev, dev_opp);
743 dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
750 * _add_device_opp() - Find device OPP table or allocate a new one
751 * @dev: device for which we do this operation
753 * It tries to find an existing table first, if it couldn't find one, it
754 * allocates a new OPP table and returns that.
756 * Return: valid device_opp pointer if success, else NULL.
758 static struct device_opp *_add_device_opp(struct device *dev)
760 struct device_opp *dev_opp;
761 struct device_list_opp *list_dev;
762 struct device_node *np;
765 /* Check for existing list for 'dev' first */
766 dev_opp = _find_device_opp(dev);
767 if (!IS_ERR(dev_opp))
771 * Allocate a new device OPP table. In the infrequent case where a new
772 * device is needed to be added, we pay this penalty.
774 dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL);
778 INIT_LIST_HEAD(&dev_opp->dev_list);
780 list_dev = _add_list_dev(dev, dev_opp);
787 * Only required for backward compatibility with v1 bindings, but isn't
788 * harmful for other cases. And so we do it unconditionally.
790 np = of_node_get(dev->of_node);
794 if (!of_property_read_u32(np, "clock-latency", &val))
795 dev_opp->clock_latency_ns_max = val;
796 of_property_read_u32(np, "voltage-tolerance",
797 &dev_opp->voltage_tolerance_v1);
801 /* Find clk for the device */
802 dev_opp->clk = clk_get(dev, NULL);
803 if (IS_ERR(dev_opp->clk)) {
804 ret = PTR_ERR(dev_opp->clk);
805 if (ret != -EPROBE_DEFER)
806 dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
810 srcu_init_notifier_head(&dev_opp->srcu_head);
811 INIT_LIST_HEAD(&dev_opp->opp_list);
813 /* Secure the device list modification */
814 list_add_rcu(&dev_opp->node, &dev_opp_list);
819 * _kfree_device_rcu() - Free device_opp RCU handler
822 static void _kfree_device_rcu(struct rcu_head *head)
824 struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
826 kfree_rcu(device_opp, rcu_head);
830 * _remove_device_opp() - Removes a device OPP table
831 * @dev_opp: device OPP table to be removed.
833 * Removes/frees device OPP table it it doesn't contain any OPPs.
835 static void _remove_device_opp(struct device_opp *dev_opp)
837 struct device_list_opp *list_dev;
839 if (!list_empty(&dev_opp->opp_list))
842 if (dev_opp->supported_hw)
845 if (dev_opp->prop_name)
848 if (!IS_ERR_OR_NULL(dev_opp->regulator))
852 if (!IS_ERR(dev_opp->clk))
853 clk_put(dev_opp->clk);
855 list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
858 _remove_list_dev(list_dev, dev_opp);
860 /* dev_list must be empty now */
861 WARN_ON(!list_empty(&dev_opp->dev_list));
863 list_del_rcu(&dev_opp->node);
864 call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
869 * _kfree_opp_rcu() - Free OPP RCU handler
872 static void _kfree_opp_rcu(struct rcu_head *head)
874 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
876 kfree_rcu(opp, rcu_head);
880 * _opp_remove() - Remove an OPP from a table definition
881 * @dev_opp: points back to the device_opp struct this opp belongs to
882 * @opp: pointer to the OPP to remove
883 * @notify: OPP_EVENT_REMOVE notification should be sent or not
885 * This function removes an opp definition from the opp list.
887 * Locking: The internal device_opp and opp structures are RCU protected.
888 * It is assumed that the caller holds required mutex for an RCU updater
891 static void _opp_remove(struct device_opp *dev_opp,
892 struct dev_pm_opp *opp, bool notify)
895 * Notify the changes in the availability of the operable
896 * frequency/voltage list.
899 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
900 opp_debug_remove_one(opp);
901 list_del_rcu(&opp->node);
902 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
904 _remove_device_opp(dev_opp);
908 * dev_pm_opp_remove() - Remove an OPP from OPP list
909 * @dev: device for which we do this operation
910 * @freq: OPP to remove with matching 'freq'
912 * This function removes an opp from the opp list.
914 * Locking: The internal device_opp and opp structures are RCU protected.
915 * Hence this function internally uses RCU updater strategy with mutex locks
916 * to keep the integrity of the internal data structures. Callers should ensure
917 * that this function is *NOT* called under RCU protection or in contexts where
918 * mutex cannot be locked.
920 void dev_pm_opp_remove(struct device *dev, unsigned long freq)
922 struct dev_pm_opp *opp;
923 struct device_opp *dev_opp;
926 /* Hold our list modification lock here */
927 mutex_lock(&dev_opp_list_lock);
929 dev_opp = _find_device_opp(dev);
933 list_for_each_entry(opp, &dev_opp->opp_list, node) {
934 if (opp->rate == freq) {
941 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
946 _opp_remove(dev_opp, opp, true);
948 mutex_unlock(&dev_opp_list_lock);
950 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
952 static struct dev_pm_opp *_allocate_opp(struct device *dev,
953 struct device_opp **dev_opp)
955 struct dev_pm_opp *opp;
957 /* allocate new OPP node */
958 opp = kzalloc(sizeof(*opp), GFP_KERNEL);
962 INIT_LIST_HEAD(&opp->node);
964 *dev_opp = _add_device_opp(dev);
973 static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
974 struct device_opp *dev_opp)
976 struct regulator *reg = dev_opp->regulator;
978 if (!IS_ERR_OR_NULL(reg) &&
979 !regulator_is_supported_voltage(reg, opp->u_volt_min,
981 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
982 __func__, opp->u_volt_min, opp->u_volt_max);
989 static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
990 struct device_opp *dev_opp)
992 struct dev_pm_opp *opp;
993 struct list_head *head = &dev_opp->opp_list;
997 * Insert new OPP in order of increasing frequency and discard if
1000 * Need to use &dev_opp->opp_list in the condition part of the 'for'
1001 * loop, don't replace it with head otherwise it will become an infinite
1004 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
1005 if (new_opp->rate > opp->rate) {
1010 if (new_opp->rate < opp->rate)
1013 /* Duplicate OPPs */
1014 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
1015 __func__, opp->rate, opp->u_volt, opp->available,
1016 new_opp->rate, new_opp->u_volt, new_opp->available);
1018 return opp->available && new_opp->u_volt == opp->u_volt ?
1022 new_opp->dev_opp = dev_opp;
1023 list_add_rcu(&new_opp->node, head);
1025 ret = opp_debug_create_one(new_opp, dev_opp);
1027 dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
1030 if (!_opp_supported_by_regulators(new_opp, dev_opp)) {
1031 new_opp->available = false;
1032 dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
1033 __func__, new_opp->rate);
1040 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
1041 * @dev: device for which we do this operation
1042 * @freq: Frequency in Hz for this OPP
1043 * @u_volt: Voltage in uVolts for this OPP
1044 * @dynamic: Dynamically added OPPs.
1046 * This function adds an opp definition to the opp list and returns status.
1047 * The opp is made available by default and it can be controlled using
1048 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
1050 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
1051 * and freed by dev_pm_opp_of_remove_table.
1053 * Locking: The internal device_opp and opp structures are RCU protected.
1054 * Hence this function internally uses RCU updater strategy with mutex locks
1055 * to keep the integrity of the internal data structures. Callers should ensure
1056 * that this function is *NOT* called under RCU protection or in contexts where
1057 * mutex cannot be locked.
1061 * Duplicate OPPs (both freq and volt are same) and opp->available
1062 * -EEXIST Freq are same and volt are different OR
1063 * Duplicate OPPs (both freq and volt are same) and !opp->available
1064 * -ENOMEM Memory allocation failure
1066 static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
1069 struct device_opp *dev_opp;
1070 struct dev_pm_opp *new_opp;
1074 /* Hold our list modification lock here */
1075 mutex_lock(&dev_opp_list_lock);
1077 new_opp = _allocate_opp(dev, &dev_opp);
1083 /* populate the opp table */
1084 new_opp->rate = freq;
1085 tol = u_volt * dev_opp->voltage_tolerance_v1 / 100;
1086 new_opp->u_volt = u_volt;
1087 new_opp->u_volt_min = u_volt - tol;
1088 new_opp->u_volt_max = u_volt + tol;
1089 new_opp->available = true;
1090 new_opp->dynamic = dynamic;
1092 ret = _opp_add(dev, new_opp, dev_opp);
1096 mutex_unlock(&dev_opp_list_lock);
1099 * Notify the changes in the availability of the operable
1100 * frequency/voltage list.
1102 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
1106 _opp_remove(dev_opp, new_opp, false);
1108 mutex_unlock(&dev_opp_list_lock);
1112 /* TODO: Support multiple regulators */
1113 static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
1114 struct device_opp *dev_opp)
1116 u32 microvolt[3] = {0};
1119 struct property *prop = NULL;
1120 char name[NAME_MAX];
1122 /* Search for "opp-microvolt-<name>" */
1123 if (dev_opp->prop_name) {
1124 snprintf(name, sizeof(name), "opp-microvolt-%s",
1125 dev_opp->prop_name);
1126 prop = of_find_property(opp->np, name, NULL);
1130 /* Search for "opp-microvolt" */
1131 sprintf(name, "opp-microvolt");
1132 prop = of_find_property(opp->np, name, NULL);
1134 /* Missing property isn't a problem, but an invalid entry is */
1139 count = of_property_count_u32_elems(opp->np, name);
1141 dev_err(dev, "%s: Invalid %s property (%d)\n",
1142 __func__, name, count);
1146 /* There can be one or three elements here */
1147 if (count != 1 && count != 3) {
1148 dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
1149 __func__, name, count);
1153 ret = of_property_read_u32_array(opp->np, name, microvolt, count);
1155 dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
1159 opp->u_volt = microvolt[0];
1160 opp->u_volt_min = microvolt[1];
1161 opp->u_volt_max = microvolt[2];
1163 /* Search for "opp-microamp-<name>" */
1165 if (dev_opp->prop_name) {
1166 snprintf(name, sizeof(name), "opp-microamp-%s",
1167 dev_opp->prop_name);
1168 prop = of_find_property(opp->np, name, NULL);
1172 /* Search for "opp-microamp" */
1173 sprintf(name, "opp-microamp");
1174 prop = of_find_property(opp->np, name, NULL);
1177 if (prop && !of_property_read_u32(opp->np, name, &val))
1184 * dev_pm_opp_set_supported_hw() - Set supported platforms
1185 * @dev: Device for which supported-hw has to be set.
1186 * @versions: Array of hierarchy of versions to match.
1187 * @count: Number of elements in the array.
1189 * This is required only for the V2 bindings, and it enables a platform to
1190 * specify the hierarchy of versions it supports. OPP layer will then enable
1191 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
1194 * Locking: The internal device_opp and opp structures are RCU protected.
1195 * Hence this function internally uses RCU updater strategy with mutex locks
1196 * to keep the integrity of the internal data structures. Callers should ensure
1197 * that this function is *NOT* called under RCU protection or in contexts where
1198 * mutex cannot be locked.
1200 int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
1203 struct device_opp *dev_opp;
1206 /* Hold our list modification lock here */
1207 mutex_lock(&dev_opp_list_lock);
1209 dev_opp = _add_device_opp(dev);
1215 /* Make sure there are no concurrent readers while updating dev_opp */
1216 WARN_ON(!list_empty(&dev_opp->opp_list));
1218 /* Do we already have a version hierarchy associated with dev_opp? */
1219 if (dev_opp->supported_hw) {
1220 dev_err(dev, "%s: Already have supported hardware list\n",
1226 dev_opp->supported_hw = kmemdup(versions, count * sizeof(*versions),
1228 if (!dev_opp->supported_hw) {
1233 dev_opp->supported_hw_count = count;
1234 mutex_unlock(&dev_opp_list_lock);
1238 _remove_device_opp(dev_opp);
1240 mutex_unlock(&dev_opp_list_lock);
1244 EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
1247 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
1248 * @dev: Device for which supported-hw has to be set.
1250 * This is required only for the V2 bindings, and is called for a matching
1251 * dev_pm_opp_set_supported_hw(). Until this is called, the device_opp structure
1252 * will not be freed.
1254 * Locking: The internal device_opp and opp structures are RCU protected.
1255 * Hence this function internally uses RCU updater strategy with mutex locks
1256 * to keep the integrity of the internal data structures. Callers should ensure
1257 * that this function is *NOT* called under RCU protection or in contexts where
1258 * mutex cannot be locked.
1260 void dev_pm_opp_put_supported_hw(struct device *dev)
1262 struct device_opp *dev_opp;
1264 /* Hold our list modification lock here */
1265 mutex_lock(&dev_opp_list_lock);
1267 /* Check for existing list for 'dev' first */
1268 dev_opp = _find_device_opp(dev);
1269 if (IS_ERR(dev_opp)) {
1270 dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
1274 /* Make sure there are no concurrent readers while updating dev_opp */
1275 WARN_ON(!list_empty(&dev_opp->opp_list));
1277 if (!dev_opp->supported_hw) {
1278 dev_err(dev, "%s: Doesn't have supported hardware list\n",
1283 kfree(dev_opp->supported_hw);
1284 dev_opp->supported_hw = NULL;
1285 dev_opp->supported_hw_count = 0;
1287 /* Try freeing device_opp if this was the last blocking resource */
1288 _remove_device_opp(dev_opp);
1291 mutex_unlock(&dev_opp_list_lock);
1293 EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
1296 * dev_pm_opp_set_prop_name() - Set prop-extn name
1297 * @dev: Device for which the regulator has to be set.
1298 * @name: name to postfix to properties.
1300 * This is required only for the V2 bindings, and it enables a platform to
1301 * specify the extn to be used for certain property names. The properties to
1302 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
1303 * should postfix the property name with -<name> while looking for them.
1305 * Locking: The internal device_opp and opp structures are RCU protected.
1306 * Hence this function internally uses RCU updater strategy with mutex locks
1307 * to keep the integrity of the internal data structures. Callers should ensure
1308 * that this function is *NOT* called under RCU protection or in contexts where
1309 * mutex cannot be locked.
1311 int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
1313 struct device_opp *dev_opp;
1316 /* Hold our list modification lock here */
1317 mutex_lock(&dev_opp_list_lock);
1319 dev_opp = _add_device_opp(dev);
1325 /* Make sure there are no concurrent readers while updating dev_opp */
1326 WARN_ON(!list_empty(&dev_opp->opp_list));
1328 /* Do we already have a prop-name associated with dev_opp? */
1329 if (dev_opp->prop_name) {
1330 dev_err(dev, "%s: Already have prop-name %s\n", __func__,
1331 dev_opp->prop_name);
1336 dev_opp->prop_name = kstrdup(name, GFP_KERNEL);
1337 if (!dev_opp->prop_name) {
1342 mutex_unlock(&dev_opp_list_lock);
1346 _remove_device_opp(dev_opp);
1348 mutex_unlock(&dev_opp_list_lock);
1352 EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
1355 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
1356 * @dev: Device for which the regulator has to be set.
1358 * This is required only for the V2 bindings, and is called for a matching
1359 * dev_pm_opp_set_prop_name(). Until this is called, the device_opp structure
1360 * will not be freed.
1362 * Locking: The internal device_opp and opp structures are RCU protected.
1363 * Hence this function internally uses RCU updater strategy with mutex locks
1364 * to keep the integrity of the internal data structures. Callers should ensure
1365 * that this function is *NOT* called under RCU protection or in contexts where
1366 * mutex cannot be locked.
1368 void dev_pm_opp_put_prop_name(struct device *dev)
1370 struct device_opp *dev_opp;
1372 /* Hold our list modification lock here */
1373 mutex_lock(&dev_opp_list_lock);
1375 /* Check for existing list for 'dev' first */
1376 dev_opp = _find_device_opp(dev);
1377 if (IS_ERR(dev_opp)) {
1378 dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
1382 /* Make sure there are no concurrent readers while updating dev_opp */
1383 WARN_ON(!list_empty(&dev_opp->opp_list));
1385 if (!dev_opp->prop_name) {
1386 dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
1390 kfree(dev_opp->prop_name);
1391 dev_opp->prop_name = NULL;
1393 /* Try freeing device_opp if this was the last blocking resource */
1394 _remove_device_opp(dev_opp);
1397 mutex_unlock(&dev_opp_list_lock);
1399 EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
1402 * dev_pm_opp_set_regulator() - Set regulator name for the device
1403 * @dev: Device for which regulator name is being set.
1404 * @name: Name of the regulator.
1406 * In order to support OPP switching, OPP layer needs to know the name of the
1407 * device's regulator, as the core would be required to switch voltages as well.
1409 * This must be called before any OPPs are initialized for the device.
1411 * Locking: The internal device_opp and opp structures are RCU protected.
1412 * Hence this function internally uses RCU updater strategy with mutex locks
1413 * to keep the integrity of the internal data structures. Callers should ensure
1414 * that this function is *NOT* called under RCU protection or in contexts where
1415 * mutex cannot be locked.
1417 int dev_pm_opp_set_regulator(struct device *dev, const char *name)
1419 struct device_opp *dev_opp;
1420 struct regulator *reg;
1423 mutex_lock(&dev_opp_list_lock);
1425 dev_opp = _add_device_opp(dev);
1431 /* This should be called before OPPs are initialized */
1432 if (WARN_ON(!list_empty(&dev_opp->opp_list))) {
1437 /* Already have a regulator set */
1438 if (WARN_ON(!IS_ERR_OR_NULL(dev_opp->regulator))) {
1442 /* Allocate the regulator */
1443 reg = regulator_get_optional(dev, name);
1446 if (ret != -EPROBE_DEFER)
1447 dev_err(dev, "%s: no regulator (%s) found: %d\n",
1448 __func__, name, ret);
1452 dev_opp->regulator = reg;
1454 mutex_unlock(&dev_opp_list_lock);
1458 _remove_device_opp(dev_opp);
1460 mutex_unlock(&dev_opp_list_lock);
1464 EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
1467 * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
1468 * @dev: Device for which regulator was set.
1470 * Locking: The internal device_opp and opp structures are RCU protected.
1471 * Hence this function internally uses RCU updater strategy with mutex locks
1472 * to keep the integrity of the internal data structures. Callers should ensure
1473 * that this function is *NOT* called under RCU protection or in contexts where
1474 * mutex cannot be locked.
1476 void dev_pm_opp_put_regulator(struct device *dev)
1478 struct device_opp *dev_opp;
1480 mutex_lock(&dev_opp_list_lock);
1482 /* Check for existing list for 'dev' first */
1483 dev_opp = _find_device_opp(dev);
1484 if (IS_ERR(dev_opp)) {
1485 dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
1489 if (IS_ERR_OR_NULL(dev_opp->regulator)) {
1490 dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
1494 /* Make sure there are no concurrent readers while updating dev_opp */
1495 WARN_ON(!list_empty(&dev_opp->opp_list));
1497 regulator_put(dev_opp->regulator);
1498 dev_opp->regulator = ERR_PTR(-EINVAL);
1500 /* Try freeing device_opp if this was the last blocking resource */
1501 _remove_device_opp(dev_opp);
1504 mutex_unlock(&dev_opp_list_lock);
1506 EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
1508 static bool _opp_is_supported(struct device *dev, struct device_opp *dev_opp,
1509 struct device_node *np)
1511 unsigned int count = dev_opp->supported_hw_count;
1515 if (!dev_opp->supported_hw)
1519 ret = of_property_read_u32_index(np, "opp-supported-hw", count,
1522 dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
1523 __func__, count, ret);
1527 /* Both of these are bitwise masks of the versions */
1528 if (!(version & dev_opp->supported_hw[count]))
1536 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
1537 * @dev: device for which we do this operation
1540 * This function adds an opp definition to the opp list and returns status. The
1541 * opp can be controlled using dev_pm_opp_enable/disable functions and may be
1542 * removed by dev_pm_opp_remove.
1544 * Locking: The internal device_opp and opp structures are RCU protected.
1545 * Hence this function internally uses RCU updater strategy with mutex locks
1546 * to keep the integrity of the internal data structures. Callers should ensure
1547 * that this function is *NOT* called under RCU protection or in contexts where
1548 * mutex cannot be locked.
1552 * Duplicate OPPs (both freq and volt are same) and opp->available
1553 * -EEXIST Freq are same and volt are different OR
1554 * Duplicate OPPs (both freq and volt are same) and !opp->available
1555 * -ENOMEM Memory allocation failure
1556 * -EINVAL Failed parsing the OPP node
1558 static int _opp_add_static_v2(struct device *dev, struct device_node *np)
1560 struct device_opp *dev_opp;
1561 struct dev_pm_opp *new_opp;
1566 /* Hold our list modification lock here */
1567 mutex_lock(&dev_opp_list_lock);
1569 new_opp = _allocate_opp(dev, &dev_opp);
1575 ret = of_property_read_u64(np, "opp-hz", &rate);
1577 dev_err(dev, "%s: opp-hz not found\n", __func__);
1581 /* Check if the OPP supports hardware's hierarchy of versions or not */
1582 if (!_opp_is_supported(dev, dev_opp, np)) {
1583 dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
1588 * Rate is defined as an unsigned long in clk API, and so casting
1589 * explicitly to its type. Must be fixed once rate is 64 bit
1590 * guaranteed in clk API.
1592 new_opp->rate = (unsigned long)rate;
1593 new_opp->turbo = of_property_read_bool(np, "turbo-mode");
1596 new_opp->dynamic = false;
1597 new_opp->available = true;
1599 if (!of_property_read_u32(np, "clock-latency-ns", &val))
1600 new_opp->clock_latency_ns = val;
1602 ret = opp_parse_supplies(new_opp, dev, dev_opp);
1606 ret = _opp_add(dev, new_opp, dev_opp);
1610 /* OPP to select on device suspend */
1611 if (of_property_read_bool(np, "opp-suspend")) {
1612 if (dev_opp->suspend_opp) {
1613 dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
1614 __func__, dev_opp->suspend_opp->rate,
1617 new_opp->suspend = true;
1618 dev_opp->suspend_opp = new_opp;
1622 if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
1623 dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
1625 mutex_unlock(&dev_opp_list_lock);
1627 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
1628 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
1629 new_opp->u_volt_min, new_opp->u_volt_max,
1630 new_opp->clock_latency_ns);
1633 * Notify the changes in the availability of the operable
1634 * frequency/voltage list.
1636 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
1640 _opp_remove(dev_opp, new_opp, false);
1642 mutex_unlock(&dev_opp_list_lock);
1647 * dev_pm_opp_add() - Add an OPP table from a table definitions
1648 * @dev: device for which we do this operation
1649 * @freq: Frequency in Hz for this OPP
1650 * @u_volt: Voltage in uVolts for this OPP
1652 * This function adds an opp definition to the opp list and returns status.
1653 * The opp is made available by default and it can be controlled using
1654 * dev_pm_opp_enable/disable functions.
1656 * Locking: The internal device_opp and opp structures are RCU protected.
1657 * Hence this function internally uses RCU updater strategy with mutex locks
1658 * to keep the integrity of the internal data structures. Callers should ensure
1659 * that this function is *NOT* called under RCU protection or in contexts where
1660 * mutex cannot be locked.
1664 * Duplicate OPPs (both freq and volt are same) and opp->available
1665 * -EEXIST Freq are same and volt are different OR
1666 * Duplicate OPPs (both freq and volt are same) and !opp->available
1667 * -ENOMEM Memory allocation failure
1669 int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
1671 return _opp_add_v1(dev, freq, u_volt, true);
1673 EXPORT_SYMBOL_GPL(dev_pm_opp_add);
1676 * _opp_set_availability() - helper to set the availability of an opp
1677 * @dev: device for which we do this operation
1678 * @freq: OPP frequency to modify availability
1679 * @availability_req: availability status requested for this opp
1681 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
1682 * share a common logic which is isolated here.
1684 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1685 * copy operation, returns 0 if no modification was done OR modification was
1688 * Locking: The internal device_opp and opp structures are RCU protected.
1689 * Hence this function internally uses RCU updater strategy with mutex locks to
1690 * keep the integrity of the internal data structures. Callers should ensure
1691 * that this function is *NOT* called under RCU protection or in contexts where
1692 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1694 static int _opp_set_availability(struct device *dev, unsigned long freq,
1695 bool availability_req)
1697 struct device_opp *dev_opp;
1698 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
1701 /* keep the node allocated */
1702 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
1706 mutex_lock(&dev_opp_list_lock);
1708 /* Find the device_opp */
1709 dev_opp = _find_device_opp(dev);
1710 if (IS_ERR(dev_opp)) {
1711 r = PTR_ERR(dev_opp);
1712 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
1716 /* Do we have the frequency? */
1717 list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
1718 if (tmp_opp->rate == freq) {
1728 /* Is update really needed? */
1729 if (opp->available == availability_req)
1731 /* copy the old data over */
1734 /* plug in new node */
1735 new_opp->available = availability_req;
1737 list_replace_rcu(&opp->node, &new_opp->node);
1738 mutex_unlock(&dev_opp_list_lock);
1739 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
1741 /* Notify the change of the OPP availability */
1742 if (availability_req)
1743 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE,
1746 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE,
1752 mutex_unlock(&dev_opp_list_lock);
1758 * dev_pm_opp_enable() - Enable a specific OPP
1759 * @dev: device for which we do this operation
1760 * @freq: OPP frequency to enable
1762 * Enables a provided opp. If the operation is valid, this returns 0, else the
1763 * corresponding error value. It is meant to be used for users an OPP available
1764 * after being temporarily made unavailable with dev_pm_opp_disable.
1766 * Locking: The internal device_opp and opp structures are RCU protected.
1767 * Hence this function indirectly uses RCU and mutex locks to keep the
1768 * integrity of the internal data structures. Callers should ensure that
1769 * this function is *NOT* called under RCU protection or in contexts where
1770 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1772 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1773 * copy operation, returns 0 if no modification was done OR modification was
1776 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
1778 return _opp_set_availability(dev, freq, true);
1780 EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
1783 * dev_pm_opp_disable() - Disable a specific OPP
1784 * @dev: device for which we do this operation
1785 * @freq: OPP frequency to disable
1787 * Disables a provided opp. If the operation is valid, this returns
1788 * 0, else the corresponding error value. It is meant to be a temporary
1789 * control by users to make this OPP not available until the circumstances are
1790 * right to make it available again (with a call to dev_pm_opp_enable).
1792 * Locking: The internal device_opp and opp structures are RCU protected.
1793 * Hence this function indirectly uses RCU and mutex locks to keep the
1794 * integrity of the internal data structures. Callers should ensure that
1795 * this function is *NOT* called under RCU protection or in contexts where
1796 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1798 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1799 * copy operation, returns 0 if no modification was done OR modification was
1802 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
1804 return _opp_set_availability(dev, freq, false);
1806 EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
1809 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
1810 * @dev: device pointer used to lookup device OPPs.
1812 * Return: pointer to notifier head if found, otherwise -ENODEV or
1813 * -EINVAL based on type of error casted as pointer. value must be checked
1814 * with IS_ERR to determine valid pointer or error result.
1816 * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
1817 * protected pointer. The reason for the same is that the opp pointer which is
1818 * returned will remain valid for use with opp_get_{voltage, freq} only while
1819 * under the locked area. The pointer returned must be used prior to unlocking
1820 * with rcu_read_unlock() to maintain the integrity of the pointer.
1822 struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
1824 struct device_opp *dev_opp = _find_device_opp(dev);
1826 if (IS_ERR(dev_opp))
1827 return ERR_CAST(dev_opp); /* matching type */
1829 return &dev_opp->srcu_head;
1831 EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
1835 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
1837 * @dev: device pointer used to lookup device OPPs.
1839 * Free OPPs created using static entries present in DT.
1841 * Locking: The internal device_opp and opp structures are RCU protected.
1842 * Hence this function indirectly uses RCU updater strategy with mutex locks
1843 * to keep the integrity of the internal data structures. Callers should ensure
1844 * that this function is *NOT* called under RCU protection or in contexts where
1845 * mutex cannot be locked.
1847 void dev_pm_opp_of_remove_table(struct device *dev)
1849 struct device_opp *dev_opp;
1850 struct dev_pm_opp *opp, *tmp;
1852 /* Hold our list modification lock here */
1853 mutex_lock(&dev_opp_list_lock);
1855 /* Check for existing list for 'dev' */
1856 dev_opp = _find_device_opp(dev);
1857 if (IS_ERR(dev_opp)) {
1858 int error = PTR_ERR(dev_opp);
1860 if (error != -ENODEV)
1861 WARN(1, "%s: dev_opp: %d\n",
1862 IS_ERR_OR_NULL(dev) ?
1863 "Invalid device" : dev_name(dev),
1868 /* Find if dev_opp manages a single device */
1869 if (list_is_singular(&dev_opp->dev_list)) {
1870 /* Free static OPPs */
1871 list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
1873 _opp_remove(dev_opp, opp, true);
1876 _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
1880 mutex_unlock(&dev_opp_list_lock);
1882 EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
1884 /* Returns opp descriptor node for a device, caller must do of_node_put() */
1885 struct device_node *_of_get_opp_desc_node(struct device *dev)
1888 * TODO: Support for multiple OPP tables.
1890 * There should be only ONE phandle present in "operating-points-v2"
1894 return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
1897 /* Initializes OPP tables based on new bindings */
1898 static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
1900 struct device_node *np;
1901 struct device_opp *dev_opp;
1902 int ret = 0, count = 0;
1904 mutex_lock(&dev_opp_list_lock);
1906 dev_opp = _managed_opp(opp_np);
1908 /* OPPs are already managed */
1909 if (!_add_list_dev(dev, dev_opp))
1911 mutex_unlock(&dev_opp_list_lock);
1914 mutex_unlock(&dev_opp_list_lock);
1916 /* We have opp-list node now, iterate over it and add OPPs */
1917 for_each_available_child_of_node(opp_np, np) {
1920 ret = _opp_add_static_v2(dev, np);
1922 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
1928 /* There should be one of more OPP defined */
1929 if (WARN_ON(!count))
1932 mutex_lock(&dev_opp_list_lock);
1934 dev_opp = _find_device_opp(dev);
1935 if (WARN_ON(IS_ERR(dev_opp))) {
1936 ret = PTR_ERR(dev_opp);
1937 mutex_unlock(&dev_opp_list_lock);
1941 dev_opp->np = opp_np;
1942 dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
1944 mutex_unlock(&dev_opp_list_lock);
1949 dev_pm_opp_of_remove_table(dev);
1954 /* Initializes OPP tables based on old-deprecated bindings */
1955 static int _of_add_opp_table_v1(struct device *dev)
1957 const struct property *prop;
1961 prop = of_find_property(dev->of_node, "operating-points", NULL);
1968 * Each OPP is a set of tuples consisting of frequency and
1969 * voltage like <freq-kHz vol-uV>.
1971 nr = prop->length / sizeof(u32);
1973 dev_err(dev, "%s: Invalid OPP list\n", __func__);
1979 unsigned long freq = be32_to_cpup(val++) * 1000;
1980 unsigned long volt = be32_to_cpup(val++);
1982 if (_opp_add_v1(dev, freq, volt, false))
1983 dev_warn(dev, "%s: Failed to add OPP %ld\n",
1992 * dev_pm_opp_of_add_table() - Initialize opp table from device tree
1993 * @dev: device pointer used to lookup device OPPs.
1995 * Register the initial OPP table with the OPP library for given device.
1997 * Locking: The internal device_opp and opp structures are RCU protected.
1998 * Hence this function indirectly uses RCU updater strategy with mutex locks
1999 * to keep the integrity of the internal data structures. Callers should ensure
2000 * that this function is *NOT* called under RCU protection or in contexts where
2001 * mutex cannot be locked.
2005 * Duplicate OPPs (both freq and volt are same) and opp->available
2006 * -EEXIST Freq are same and volt are different OR
2007 * Duplicate OPPs (both freq and volt are same) and !opp->available
2008 * -ENOMEM Memory allocation failure
2009 * -ENODEV when 'operating-points' property is not found or is invalid data
2011 * -ENODATA when empty 'operating-points' property is found
2012 * -EINVAL when invalid entries are found in opp-v2 table
2014 int dev_pm_opp_of_add_table(struct device *dev)
2016 struct device_node *opp_np;
2020 * OPPs have two version of bindings now. The older one is deprecated,
2021 * try for the new binding first.
2023 opp_np = _of_get_opp_desc_node(dev);
2026 * Try old-deprecated bindings for backward compatibility with
2029 return _of_add_opp_table_v1(dev);
2032 ret = _of_add_opp_table_v2(dev, opp_np);
2033 of_node_put(opp_np);
2037 EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);