2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Standard functionality for the common clock API. See Documentation/clk.txt
12 #include <linux/clk-private.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/spinlock.h>
16 #include <linux/err.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
20 #include <linux/device.h>
21 #include <linux/init.h>
22 #include <linux/sched.h>
24 static DEFINE_SPINLOCK(enable_lock);
25 static DEFINE_MUTEX(prepare_lock);
27 static struct task_struct *prepare_owner;
28 static struct task_struct *enable_owner;
30 static int prepare_refcnt;
31 static int enable_refcnt;
33 static HLIST_HEAD(clk_root_list);
34 static HLIST_HEAD(clk_orphan_list);
35 static LIST_HEAD(clk_notifier_list);
38 static void clk_prepare_lock(void)
40 if (!mutex_trylock(&prepare_lock)) {
41 if (prepare_owner == current) {
45 mutex_lock(&prepare_lock);
47 WARN_ON_ONCE(prepare_owner != NULL);
48 WARN_ON_ONCE(prepare_refcnt != 0);
49 prepare_owner = current;
53 static void clk_prepare_unlock(void)
55 WARN_ON_ONCE(prepare_owner != current);
56 WARN_ON_ONCE(prepare_refcnt == 0);
61 mutex_unlock(&prepare_lock);
64 static unsigned long clk_enable_lock(void)
68 if (!spin_trylock_irqsave(&enable_lock, flags)) {
69 if (enable_owner == current) {
73 spin_lock_irqsave(&enable_lock, flags);
75 WARN_ON_ONCE(enable_owner != NULL);
76 WARN_ON_ONCE(enable_refcnt != 0);
77 enable_owner = current;
82 static void clk_enable_unlock(unsigned long flags)
84 WARN_ON_ONCE(enable_owner != current);
85 WARN_ON_ONCE(enable_refcnt == 0);
90 spin_unlock_irqrestore(&enable_lock, flags);
93 /*** debugfs support ***/
95 #ifdef CONFIG_COMMON_CLK_DEBUG
96 #include <linux/debugfs.h>
98 static struct dentry *rootdir;
99 static struct dentry *orphandir;
100 static int inited = 0;
102 static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
107 seq_printf(s, "%*s%-*s %-11d %-12d %-10lu",
109 30 - level * 3, c->name,
110 c->enable_count, c->prepare_count, clk_get_rate(c));
114 static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
122 clk_summary_show_one(s, c, level);
124 hlist_for_each_entry(child, &c->children, child_node)
125 clk_summary_show_subtree(s, child, level + 1);
128 static int clk_summary_show(struct seq_file *s, void *data)
132 seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
133 seq_printf(s, "---------------------------------------------------------------------\n");
137 hlist_for_each_entry(c, &clk_root_list, child_node)
138 clk_summary_show_subtree(s, c, 0);
140 hlist_for_each_entry(c, &clk_orphan_list, child_node)
141 clk_summary_show_subtree(s, c, 0);
143 clk_prepare_unlock();
149 static int clk_summary_open(struct inode *inode, struct file *file)
151 return single_open(file, clk_summary_show, inode->i_private);
154 static const struct file_operations clk_summary_fops = {
155 .open = clk_summary_open,
158 .release = single_release,
161 static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
166 seq_printf(s, "\"%s\": { ", c->name);
167 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
168 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
169 seq_printf(s, "\"rate\": %lu", clk_get_rate(c));
172 static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
179 clk_dump_one(s, c, level);
181 hlist_for_each_entry(child, &c->children, child_node) {
183 clk_dump_subtree(s, child, level + 1);
189 static int clk_dump(struct seq_file *s, void *data)
192 bool first_node = true;
198 hlist_for_each_entry(c, &clk_root_list, child_node) {
202 clk_dump_subtree(s, c, 0);
205 hlist_for_each_entry(c, &clk_orphan_list, child_node) {
207 clk_dump_subtree(s, c, 0);
210 clk_prepare_unlock();
217 static int clk_dump_open(struct inode *inode, struct file *file)
219 return single_open(file, clk_dump, inode->i_private);
222 static const struct file_operations clk_dump_fops = {
223 .open = clk_dump_open,
226 .release = single_release,
229 /* caller must hold prepare_lock */
230 static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
235 if (!clk || !pdentry) {
240 d = debugfs_create_dir(clk->name, pdentry);
246 d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
251 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
256 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
257 (u32 *)&clk->prepare_count);
261 d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
262 (u32 *)&clk->enable_count);
266 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
267 (u32 *)&clk->notifier_count);
275 debugfs_remove(clk->dentry);
280 /* caller must hold prepare_lock */
281 static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
286 if (!clk || !pdentry)
289 ret = clk_debug_create_one(clk, pdentry);
294 hlist_for_each_entry(child, &clk->children, child_node)
295 clk_debug_create_subtree(child, clk->dentry);
303 * clk_debug_register - add a clk node to the debugfs clk tree
304 * @clk: the clk being added to the debugfs clk tree
306 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
307 * initialized. Otherwise it bails out early since the debugfs clk tree
308 * will be created lazily by clk_debug_init as part of a late_initcall.
310 * Caller must hold prepare_lock. Only clk_init calls this function (so
311 * far) so this is taken care.
313 static int clk_debug_register(struct clk *clk)
316 struct dentry *pdentry;
322 parent = clk->parent;
325 * Check to see if a clk is a root clk. Also check that it is
326 * safe to add this clk to debugfs
329 if (clk->flags & CLK_IS_ROOT)
335 pdentry = parent->dentry;
339 ret = clk_debug_create_subtree(clk, pdentry);
346 * clk_debug_reparent - reparent clk node in the debugfs clk tree
347 * @clk: the clk being reparented
348 * @new_parent: the new clk parent, may be NULL
350 * Rename clk entry in the debugfs clk tree if debugfs has been
351 * initialized. Otherwise it bails out early since the debugfs clk tree
352 * will be created lazily by clk_debug_init as part of a late_initcall.
354 * Caller must hold prepare_lock.
356 static void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
359 struct dentry *new_parent_d;
365 new_parent_d = new_parent->dentry;
367 new_parent_d = orphandir;
369 d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
370 new_parent_d, clk->name);
374 pr_debug("%s: failed to rename debugfs entry for %s\n",
375 __func__, clk->name);
379 * clk_debug_init - lazily create the debugfs clk tree visualization
381 * clks are often initialized very early during boot before memory can
382 * be dynamically allocated and well before debugfs is setup.
383 * clk_debug_init walks the clk tree hierarchy while holding
384 * prepare_lock and creates the topology as part of a late_initcall,
385 * thus insuring that clks initialized very early will still be
386 * represented in the debugfs clk tree. This function should only be
387 * called once at boot-time, and all other clks added dynamically will
388 * be done so with clk_debug_register.
390 static int __init clk_debug_init(void)
395 rootdir = debugfs_create_dir("clk", NULL);
400 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, NULL,
405 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, NULL,
410 orphandir = debugfs_create_dir("orphans", rootdir);
417 hlist_for_each_entry(clk, &clk_root_list, child_node)
418 clk_debug_create_subtree(clk, rootdir);
420 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
421 clk_debug_create_subtree(clk, orphandir);
425 clk_prepare_unlock();
429 late_initcall(clk_debug_init);
431 static inline int clk_debug_register(struct clk *clk) { return 0; }
432 static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
437 /* caller must hold prepare_lock */
438 static void clk_unprepare_unused_subtree(struct clk *clk)
445 hlist_for_each_entry(child, &clk->children, child_node)
446 clk_unprepare_unused_subtree(child);
448 if (clk->prepare_count)
451 if (clk->flags & CLK_IGNORE_UNUSED)
454 if (__clk_is_prepared(clk)) {
455 if (clk->ops->unprepare_unused)
456 clk->ops->unprepare_unused(clk->hw);
457 else if (clk->ops->unprepare)
458 clk->ops->unprepare(clk->hw);
461 EXPORT_SYMBOL_GPL(__clk_get_flags);
463 /* caller must hold prepare_lock */
464 static void clk_disable_unused_subtree(struct clk *clk)
472 hlist_for_each_entry(child, &clk->children, child_node)
473 clk_disable_unused_subtree(child);
475 flags = clk_enable_lock();
477 if (clk->enable_count)
480 if (clk->flags & CLK_IGNORE_UNUSED)
484 * some gate clocks have special needs during the disable-unused
485 * sequence. call .disable_unused if available, otherwise fall
488 if (__clk_is_enabled(clk)) {
489 if (clk->ops->disable_unused)
490 clk->ops->disable_unused(clk->hw);
491 else if (clk->ops->disable)
492 clk->ops->disable(clk->hw);
496 clk_enable_unlock(flags);
502 static bool clk_ignore_unused;
503 static int __init clk_ignore_unused_setup(char *__unused)
505 clk_ignore_unused = true;
508 __setup("clk_ignore_unused", clk_ignore_unused_setup);
510 static int clk_disable_unused(void)
514 if (clk_ignore_unused) {
515 pr_warn("clk: Not disabling unused clocks\n");
521 hlist_for_each_entry(clk, &clk_root_list, child_node)
522 clk_disable_unused_subtree(clk);
524 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
525 clk_disable_unused_subtree(clk);
527 hlist_for_each_entry(clk, &clk_root_list, child_node)
528 clk_unprepare_unused_subtree(clk);
530 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
531 clk_unprepare_unused_subtree(clk);
533 clk_prepare_unlock();
537 late_initcall_sync(clk_disable_unused);
539 /*** helper functions ***/
541 const char *__clk_get_name(struct clk *clk)
543 return !clk ? NULL : clk->name;
545 EXPORT_SYMBOL_GPL(__clk_get_name);
547 struct clk_hw *__clk_get_hw(struct clk *clk)
549 return !clk ? NULL : clk->hw;
552 u8 __clk_get_num_parents(struct clk *clk)
554 return !clk ? 0 : clk->num_parents;
557 struct clk *__clk_get_parent(struct clk *clk)
559 return !clk ? NULL : clk->parent;
562 struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
564 if (!clk || index >= clk->num_parents)
566 else if (!clk->parents)
567 return __clk_lookup(clk->parent_names[index]);
568 else if (!clk->parents[index])
569 return clk->parents[index] =
570 __clk_lookup(clk->parent_names[index]);
572 return clk->parents[index];
575 unsigned int __clk_get_enable_count(struct clk *clk)
577 return !clk ? 0 : clk->enable_count;
580 unsigned int __clk_get_prepare_count(struct clk *clk)
582 return !clk ? 0 : clk->prepare_count;
585 unsigned long __clk_get_rate(struct clk *clk)
596 if (clk->flags & CLK_IS_ROOT)
606 unsigned long __clk_get_flags(struct clk *clk)
608 return !clk ? 0 : clk->flags;
611 bool __clk_is_prepared(struct clk *clk)
619 * .is_prepared is optional for clocks that can prepare
620 * fall back to software usage counter if it is missing
622 if (!clk->ops->is_prepared) {
623 ret = clk->prepare_count ? 1 : 0;
627 ret = clk->ops->is_prepared(clk->hw);
632 bool __clk_is_enabled(struct clk *clk)
640 * .is_enabled is only mandatory for clocks that gate
641 * fall back to software usage counter if .is_enabled is missing
643 if (!clk->ops->is_enabled) {
644 ret = clk->enable_count ? 1 : 0;
648 ret = clk->ops->is_enabled(clk->hw);
653 static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
658 if (!strcmp(clk->name, name))
661 hlist_for_each_entry(child, &clk->children, child_node) {
662 ret = __clk_lookup_subtree(name, child);
670 struct clk *__clk_lookup(const char *name)
672 struct clk *root_clk;
678 /* search the 'proper' clk tree first */
679 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
680 ret = __clk_lookup_subtree(name, root_clk);
685 /* if not found, then search the orphan tree */
686 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
687 ret = __clk_lookup_subtree(name, root_clk);
696 * Helper for finding best parent to provide a given frequency. This can be used
697 * directly as a determine_rate callback (e.g. for a mux), or from a more
698 * complex clock that may combine a mux with other operations.
700 long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
701 unsigned long *best_parent_rate,
702 struct clk **best_parent_p)
704 struct clk *clk = hw->clk, *parent, *best_parent = NULL;
706 unsigned long parent_rate, best = 0;
708 /* if NO_REPARENT flag set, pass through to current parent */
709 if (clk->flags & CLK_SET_RATE_NO_REPARENT) {
710 parent = clk->parent;
711 if (clk->flags & CLK_SET_RATE_PARENT)
712 best = __clk_round_rate(parent, rate);
714 best = __clk_get_rate(parent);
716 best = __clk_get_rate(clk);
720 /* find the parent that can provide the fastest rate <= rate */
721 num_parents = clk->num_parents;
722 for (i = 0; i < num_parents; i++) {
723 parent = clk_get_parent_by_index(clk, i);
726 if (clk->flags & CLK_SET_RATE_PARENT)
727 parent_rate = __clk_round_rate(parent, rate);
729 parent_rate = __clk_get_rate(parent);
730 if (parent_rate <= rate && parent_rate > best) {
731 best_parent = parent;
738 *best_parent_p = best_parent;
739 *best_parent_rate = best;
746 void __clk_unprepare(struct clk *clk)
751 if (WARN_ON(clk->prepare_count == 0))
754 if (--clk->prepare_count > 0)
757 WARN_ON(clk->enable_count > 0);
759 if (clk->ops->unprepare)
760 clk->ops->unprepare(clk->hw);
762 __clk_unprepare(clk->parent);
766 * clk_unprepare - undo preparation of a clock source
767 * @clk: the clk being unprepared
769 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
770 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
771 * if the operation may sleep. One example is a clk which is accessed over
772 * I2c. In the complex case a clk gate operation may require a fast and a slow
773 * part. It is this reason that clk_unprepare and clk_disable are not mutually
774 * exclusive. In fact clk_disable must be called before clk_unprepare.
776 void clk_unprepare(struct clk *clk)
779 __clk_unprepare(clk);
780 clk_prepare_unlock();
782 EXPORT_SYMBOL_GPL(clk_unprepare);
784 int __clk_prepare(struct clk *clk)
791 if (clk->prepare_count == 0) {
792 ret = __clk_prepare(clk->parent);
796 if (clk->ops->prepare) {
797 ret = clk->ops->prepare(clk->hw);
799 __clk_unprepare(clk->parent);
805 clk->prepare_count++;
811 * clk_prepare - prepare a clock source
812 * @clk: the clk being prepared
814 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
815 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
816 * operation may sleep. One example is a clk which is accessed over I2c. In
817 * the complex case a clk ungate operation may require a fast and a slow part.
818 * It is this reason that clk_prepare and clk_enable are not mutually
819 * exclusive. In fact clk_prepare must be called before clk_enable.
820 * Returns 0 on success, -EERROR otherwise.
822 int clk_prepare(struct clk *clk)
827 ret = __clk_prepare(clk);
828 clk_prepare_unlock();
832 EXPORT_SYMBOL_GPL(clk_prepare);
834 static void __clk_disable(struct clk *clk)
839 if (WARN_ON(IS_ERR(clk)))
842 if (WARN_ON(clk->enable_count == 0))
845 if (--clk->enable_count > 0)
848 if (clk->ops->disable)
849 clk->ops->disable(clk->hw);
851 __clk_disable(clk->parent);
855 * clk_disable - gate a clock
856 * @clk: the clk being gated
858 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
859 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
860 * clk if the operation is fast and will never sleep. One example is a
861 * SoC-internal clk which is controlled via simple register writes. In the
862 * complex case a clk gate operation may require a fast and a slow part. It is
863 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
864 * In fact clk_disable must be called before clk_unprepare.
866 void clk_disable(struct clk *clk)
870 flags = clk_enable_lock();
872 clk_enable_unlock(flags);
874 EXPORT_SYMBOL_GPL(clk_disable);
876 static int __clk_enable(struct clk *clk)
883 if (WARN_ON(clk->prepare_count == 0))
886 if (clk->enable_count == 0) {
887 ret = __clk_enable(clk->parent);
892 if (clk->ops->enable) {
893 ret = clk->ops->enable(clk->hw);
895 __clk_disable(clk->parent);
906 * clk_enable - ungate a clock
907 * @clk: the clk being ungated
909 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
910 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
911 * if the operation will never sleep. One example is a SoC-internal clk which
912 * is controlled via simple register writes. In the complex case a clk ungate
913 * operation may require a fast and a slow part. It is this reason that
914 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
915 * must be called before clk_enable. Returns 0 on success, -EERROR
918 int clk_enable(struct clk *clk)
923 flags = clk_enable_lock();
924 ret = __clk_enable(clk);
925 clk_enable_unlock(flags);
929 EXPORT_SYMBOL_GPL(clk_enable);
932 * __clk_round_rate - round the given rate for a clk
933 * @clk: round the rate of this clock
934 * @rate: the rate which is to be rounded
936 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
938 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
940 unsigned long parent_rate = 0;
946 parent = clk->parent;
948 parent_rate = parent->rate;
950 if (clk->ops->determine_rate)
951 return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
953 else if (clk->ops->round_rate)
954 return clk->ops->round_rate(clk->hw, rate, &parent_rate);
955 else if (clk->flags & CLK_SET_RATE_PARENT)
956 return __clk_round_rate(clk->parent, rate);
962 * clk_round_rate - round the given rate for a clk
963 * @clk: the clk for which we are rounding a rate
964 * @rate: the rate which is to be rounded
966 * Takes in a rate as input and rounds it to a rate that the clk can actually
967 * use which is then returned. If clk doesn't support round_rate operation
968 * then the parent rate is returned.
970 long clk_round_rate(struct clk *clk, unsigned long rate)
975 ret = __clk_round_rate(clk, rate);
976 clk_prepare_unlock();
980 EXPORT_SYMBOL_GPL(clk_round_rate);
983 * __clk_notify - call clk notifier chain
984 * @clk: struct clk * that is changing rate
985 * @msg: clk notifier type (see include/linux/clk.h)
986 * @old_rate: old clk rate
987 * @new_rate: new clk rate
989 * Triggers a notifier call chain on the clk rate-change notification
990 * for 'clk'. Passes a pointer to the struct clk and the previous
991 * and current rates to the notifier callback. Intended to be called by
992 * internal clock code only. Returns NOTIFY_DONE from the last driver
993 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
994 * a driver returns that.
996 static int __clk_notify(struct clk *clk, unsigned long msg,
997 unsigned long old_rate, unsigned long new_rate)
999 struct clk_notifier *cn;
1000 struct clk_notifier_data cnd;
1001 int ret = NOTIFY_DONE;
1004 cnd.old_rate = old_rate;
1005 cnd.new_rate = new_rate;
1007 list_for_each_entry(cn, &clk_notifier_list, node) {
1008 if (cn->clk == clk) {
1009 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1019 * __clk_recalc_rates
1020 * @clk: first clk in the subtree
1021 * @msg: notification type (see include/linux/clk.h)
1023 * Walks the subtree of clks starting with clk and recalculates rates as it
1024 * goes. Note that if a clk does not implement the .recalc_rate callback then
1025 * it is assumed that the clock will take on the rate of its parent.
1027 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1030 * Caller must hold prepare_lock.
1032 static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
1034 unsigned long old_rate;
1035 unsigned long parent_rate = 0;
1038 old_rate = clk->rate;
1041 parent_rate = clk->parent->rate;
1043 if (clk->ops->recalc_rate)
1044 clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
1046 clk->rate = parent_rate;
1049 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1050 * & ABORT_RATE_CHANGE notifiers
1052 if (clk->notifier_count && msg)
1053 __clk_notify(clk, msg, old_rate, clk->rate);
1055 hlist_for_each_entry(child, &clk->children, child_node)
1056 __clk_recalc_rates(child, msg);
1060 * clk_get_rate - return the rate of clk
1061 * @clk: the clk whose rate is being returned
1063 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1064 * is set, which means a recalc_rate will be issued.
1065 * If clk is NULL then returns 0.
1067 unsigned long clk_get_rate(struct clk *clk)
1073 if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
1074 __clk_recalc_rates(clk, 0);
1076 rate = __clk_get_rate(clk);
1077 clk_prepare_unlock();
1081 EXPORT_SYMBOL_GPL(clk_get_rate);
1083 static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
1088 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1092 * find index of new parent clock using cached parent ptrs,
1093 * or if not yet cached, use string name comparison and cache
1094 * them now to avoid future calls to __clk_lookup.
1096 for (i = 0; i < clk->num_parents; i++) {
1097 if (clk->parents && clk->parents[i] == parent)
1099 else if (!strcmp(clk->parent_names[i], parent->name)) {
1101 clk->parents[i] = __clk_lookup(parent->name);
1109 static void clk_reparent(struct clk *clk, struct clk *new_parent)
1111 /* avoid duplicate POST_RATE_CHANGE notifications */
1112 if (new_parent->new_child == clk)
1113 new_parent->new_child = NULL;
1115 hlist_del(&clk->child_node);
1118 hlist_add_head(&clk->child_node, &new_parent->children);
1120 hlist_add_head(&clk->child_node, &clk_orphan_list);
1122 clk->parent = new_parent;
1125 static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
1127 unsigned long flags;
1129 struct clk *old_parent = clk->parent;
1132 * Migrate prepare state between parents and prevent race with
1135 * If the clock is not prepared, then a race with
1136 * clk_enable/disable() is impossible since we already have the
1137 * prepare lock (future calls to clk_enable() need to be preceded by
1140 * If the clock is prepared, migrate the prepared state to the new
1141 * parent and also protect against a race with clk_enable() by
1142 * forcing the clock and the new parent on. This ensures that all
1143 * future calls to clk_enable() are practically NOPs with respect to
1144 * hardware and software states.
1146 * See also: Comment for clk_set_parent() below.
1148 if (clk->prepare_count) {
1149 __clk_prepare(parent);
1154 /* update the clk tree topology */
1155 flags = clk_enable_lock();
1156 clk_reparent(clk, parent);
1157 clk_enable_unlock(flags);
1159 /* change clock input source */
1160 if (parent && clk->ops->set_parent)
1161 ret = clk->ops->set_parent(clk->hw, p_index);
1164 flags = clk_enable_lock();
1165 clk_reparent(clk, old_parent);
1166 clk_enable_unlock(flags);
1168 if (clk->prepare_count) {
1170 clk_disable(parent);
1171 __clk_unprepare(parent);
1177 * Finish the migration of prepare state and undo the changes done
1178 * for preventing a race with clk_enable().
1180 if (clk->prepare_count) {
1182 clk_disable(old_parent);
1183 __clk_unprepare(old_parent);
1186 /* update debugfs with new clk tree topology */
1187 clk_debug_reparent(clk, parent);
1192 * __clk_speculate_rates
1193 * @clk: first clk in the subtree
1194 * @parent_rate: the "future" rate of clk's parent
1196 * Walks the subtree of clks starting with clk, speculating rates as it
1197 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1199 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1200 * pre-rate change notifications and returns early if no clks in the
1201 * subtree have subscribed to the notifications. Note that if a clk does not
1202 * implement the .recalc_rate callback then it is assumed that the clock will
1203 * take on the rate of its parent.
1205 * Caller must hold prepare_lock.
1207 static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
1210 unsigned long new_rate;
1211 int ret = NOTIFY_DONE;
1213 if (clk->ops->recalc_rate)
1214 new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
1216 new_rate = parent_rate;
1218 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1219 if (clk->notifier_count)
1220 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
1222 if (ret & NOTIFY_STOP_MASK)
1225 hlist_for_each_entry(child, &clk->children, child_node) {
1226 ret = __clk_speculate_rates(child, new_rate);
1227 if (ret & NOTIFY_STOP_MASK)
1235 static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
1236 struct clk *new_parent, u8 p_index)
1240 clk->new_rate = new_rate;
1241 clk->new_parent = new_parent;
1242 clk->new_parent_index = p_index;
1243 /* include clk in new parent's PRE_RATE_CHANGE notifications */
1244 clk->new_child = NULL;
1245 if (new_parent && new_parent != clk->parent)
1246 new_parent->new_child = clk;
1248 hlist_for_each_entry(child, &clk->children, child_node) {
1249 if (child->ops->recalc_rate)
1250 child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
1252 child->new_rate = new_rate;
1253 clk_calc_subtree(child, child->new_rate, NULL, 0);
1258 * calculate the new rates returning the topmost clock that has to be
1261 static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
1263 struct clk *top = clk;
1264 struct clk *old_parent, *parent;
1265 unsigned long best_parent_rate = 0;
1266 unsigned long new_rate;
1270 if (IS_ERR_OR_NULL(clk))
1273 /* save parent rate, if it exists */
1274 parent = old_parent = clk->parent;
1276 best_parent_rate = parent->rate;
1278 /* find the closest rate and parent clk/rate */
1279 if (clk->ops->determine_rate) {
1280 new_rate = clk->ops->determine_rate(clk->hw, rate,
1283 } else if (clk->ops->round_rate) {
1284 new_rate = clk->ops->round_rate(clk->hw, rate,
1286 } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
1287 /* pass-through clock without adjustable parent */
1288 clk->new_rate = clk->rate;
1291 /* pass-through clock with adjustable parent */
1292 top = clk_calc_new_rates(parent, rate);
1293 new_rate = parent->new_rate;
1297 /* some clocks must be gated to change parent */
1298 if (parent != old_parent &&
1299 (clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
1300 pr_debug("%s: %s not gated but wants to reparent\n",
1301 __func__, clk->name);
1305 /* try finding the new parent index */
1307 p_index = clk_fetch_parent_index(clk, parent);
1308 if (p_index == clk->num_parents) {
1309 pr_debug("%s: clk %s can not be parent of clk %s\n",
1310 __func__, parent->name, clk->name);
1315 if ((clk->flags & CLK_SET_RATE_PARENT) && parent &&
1316 best_parent_rate != parent->rate)
1317 top = clk_calc_new_rates(parent, best_parent_rate);
1320 clk_calc_subtree(clk, new_rate, parent, p_index);
1326 * Notify about rate changes in a subtree. Always walk down the whole tree
1327 * so that in case of an error we can walk down the whole tree again and
1330 static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
1332 struct clk *child, *tmp_clk, *fail_clk = NULL;
1333 int ret = NOTIFY_DONE;
1335 if (clk->rate == clk->new_rate)
1338 if (clk->notifier_count) {
1339 ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
1340 if (ret & NOTIFY_STOP_MASK)
1344 hlist_for_each_entry(child, &clk->children, child_node) {
1345 /* Skip children who will be reparented to another clock */
1346 if (child->new_parent && child->new_parent != clk)
1348 tmp_clk = clk_propagate_rate_change(child, event);
1353 /* handle the new child who might not be in clk->children yet */
1354 if (clk->new_child) {
1355 tmp_clk = clk_propagate_rate_change(clk->new_child, event);
1364 * walk down a subtree and set the new rates notifying the rate
1367 static void clk_change_rate(struct clk *clk)
1370 unsigned long old_rate;
1371 unsigned long best_parent_rate = 0;
1373 old_rate = clk->rate;
1376 if (clk->new_parent && clk->new_parent != clk->parent)
1377 __clk_set_parent(clk, clk->new_parent, clk->new_parent_index);
1380 best_parent_rate = clk->parent->rate;
1382 if (clk->ops->set_rate)
1383 clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
1385 if (clk->ops->recalc_rate)
1386 clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
1388 clk->rate = best_parent_rate;
1390 if (clk->notifier_count && old_rate != clk->rate)
1391 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
1393 hlist_for_each_entry(child, &clk->children, child_node) {
1394 /* Skip children who will be reparented to another clock */
1395 if (child->new_parent && child->new_parent != clk)
1397 clk_change_rate(child);
1400 /* handle the new child who might not be in clk->children yet */
1402 clk_change_rate(clk->new_child);
1406 * clk_set_rate - specify a new rate for clk
1407 * @clk: the clk whose rate is being changed
1408 * @rate: the new rate for clk
1410 * In the simplest case clk_set_rate will only adjust the rate of clk.
1412 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1413 * propagate up to clk's parent; whether or not this happens depends on the
1414 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
1415 * after calling .round_rate then upstream parent propagation is ignored. If
1416 * *parent_rate comes back with a new rate for clk's parent then we propagate
1417 * up to clk's parent and set its rate. Upward propagation will continue
1418 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1419 * .round_rate stops requesting changes to clk's parent_rate.
1421 * Rate changes are accomplished via tree traversal that also recalculates the
1422 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
1424 * Returns 0 on success, -EERROR otherwise.
1426 int clk_set_rate(struct clk *clk, unsigned long rate)
1428 struct clk *top, *fail_clk;
1434 /* prevent racing with updates to the clock topology */
1437 /* bail early if nothing to do */
1438 if (rate == clk_get_rate(clk))
1441 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
1446 /* calculate new rates and get the topmost changed clock */
1447 top = clk_calc_new_rates(clk, rate);
1453 /* notify that we are about to change rates */
1454 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1456 pr_warn("%s: failed to set %s rate\n", __func__,
1458 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1463 /* change the rates */
1464 clk_change_rate(top);
1467 clk_prepare_unlock();
1471 EXPORT_SYMBOL_GPL(clk_set_rate);
1474 * clk_get_parent - return the parent of a clk
1475 * @clk: the clk whose parent gets returned
1477 * Simply returns clk->parent. Returns NULL if clk is NULL.
1479 struct clk *clk_get_parent(struct clk *clk)
1484 parent = __clk_get_parent(clk);
1485 clk_prepare_unlock();
1489 EXPORT_SYMBOL_GPL(clk_get_parent);
1492 * .get_parent is mandatory for clocks with multiple possible parents. It is
1493 * optional for single-parent clocks. Always call .get_parent if it is
1494 * available and WARN if it is missing for multi-parent clocks.
1496 * For single-parent clocks without .get_parent, first check to see if the
1497 * .parents array exists, and if so use it to avoid an expensive tree
1498 * traversal. If .parents does not exist then walk the tree with __clk_lookup.
1500 static struct clk *__clk_init_parent(struct clk *clk)
1502 struct clk *ret = NULL;
1505 /* handle the trivial cases */
1507 if (!clk->num_parents)
1510 if (clk->num_parents == 1) {
1511 if (IS_ERR_OR_NULL(clk->parent))
1512 ret = clk->parent = __clk_lookup(clk->parent_names[0]);
1517 if (!clk->ops->get_parent) {
1518 WARN(!clk->ops->get_parent,
1519 "%s: multi-parent clocks must implement .get_parent\n",
1525 * Do our best to cache parent clocks in clk->parents. This prevents
1526 * unnecessary and expensive calls to __clk_lookup. We don't set
1527 * clk->parent here; that is done by the calling function
1530 index = clk->ops->get_parent(clk->hw);
1534 kzalloc((sizeof(struct clk*) * clk->num_parents),
1537 ret = clk_get_parent_by_index(clk, index);
1543 void __clk_reparent(struct clk *clk, struct clk *new_parent)
1545 clk_reparent(clk, new_parent);
1546 clk_debug_reparent(clk, new_parent);
1547 __clk_recalc_rates(clk, POST_RATE_CHANGE);
1551 * clk_set_parent - switch the parent of a mux clk
1552 * @clk: the mux clk whose input we are switching
1553 * @parent: the new input to clk
1555 * Re-parent clk to use parent as its new input source. If clk is in
1556 * prepared state, the clk will get enabled for the duration of this call. If
1557 * that's not acceptable for a specific clk (Eg: the consumer can't handle
1558 * that, the reparenting is glitchy in hardware, etc), use the
1559 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
1561 * After successfully changing clk's parent clk_set_parent will update the
1562 * clk topology, sysfs topology and propagate rate recalculation via
1563 * __clk_recalc_rates.
1565 * Returns 0 on success, -EERROR otherwise.
1567 int clk_set_parent(struct clk *clk, struct clk *parent)
1571 unsigned long p_rate = 0;
1579 /* verify ops for for multi-parent clks */
1580 if ((clk->num_parents > 1) && (!clk->ops->set_parent))
1583 /* prevent racing with updates to the clock topology */
1586 if (clk->parent == parent)
1589 /* check that we are allowed to re-parent if the clock is in use */
1590 if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
1595 /* try finding the new parent index */
1597 p_index = clk_fetch_parent_index(clk, parent);
1598 p_rate = parent->rate;
1599 if (p_index == clk->num_parents) {
1600 pr_debug("%s: clk %s can not be parent of clk %s\n",
1601 __func__, parent->name, clk->name);
1607 /* propagate PRE_RATE_CHANGE notifications */
1608 ret = __clk_speculate_rates(clk, p_rate);
1610 /* abort if a driver objects */
1611 if (ret & NOTIFY_STOP_MASK)
1614 /* do the re-parent */
1615 ret = __clk_set_parent(clk, parent, p_index);
1617 /* propagate rate recalculation accordingly */
1619 __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
1621 __clk_recalc_rates(clk, POST_RATE_CHANGE);
1624 clk_prepare_unlock();
1628 EXPORT_SYMBOL_GPL(clk_set_parent);
1631 * __clk_init - initialize the data structures in a struct clk
1632 * @dev: device initializing this clk, placeholder for now
1633 * @clk: clk being initialized
1635 * Initializes the lists in struct clk, queries the hardware for the
1636 * parent and rate and sets them both.
1638 int __clk_init(struct device *dev, struct clk *clk)
1642 struct hlist_node *tmp2;
1649 /* check to see if a clock with this name is already registered */
1650 if (__clk_lookup(clk->name)) {
1651 pr_debug("%s: clk %s already initialized\n",
1652 __func__, clk->name);
1657 /* check that clk_ops are sane. See Documentation/clk.txt */
1658 if (clk->ops->set_rate &&
1659 !((clk->ops->round_rate || clk->ops->determine_rate) &&
1660 clk->ops->recalc_rate)) {
1661 pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
1662 __func__, clk->name);
1667 if (clk->ops->set_parent && !clk->ops->get_parent) {
1668 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1669 __func__, clk->name);
1674 /* throw a WARN if any entries in parent_names are NULL */
1675 for (i = 0; i < clk->num_parents; i++)
1676 WARN(!clk->parent_names[i],
1677 "%s: invalid NULL in %s's .parent_names\n",
1678 __func__, clk->name);
1681 * Allocate an array of struct clk *'s to avoid unnecessary string
1682 * look-ups of clk's possible parents. This can fail for clocks passed
1683 * in to clk_init during early boot; thus any access to clk->parents[]
1684 * must always check for a NULL pointer and try to populate it if
1687 * If clk->parents is not NULL we skip this entire block. This allows
1688 * for clock drivers to statically initialize clk->parents.
1690 if (clk->num_parents > 1 && !clk->parents) {
1691 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1694 * __clk_lookup returns NULL for parents that have not been
1695 * clk_init'd; thus any access to clk->parents[] must check
1696 * for a NULL pointer. We can always perform lazy lookups for
1697 * missing parents later on.
1700 for (i = 0; i < clk->num_parents; i++)
1702 __clk_lookup(clk->parent_names[i]);
1705 clk->parent = __clk_init_parent(clk);
1708 * Populate clk->parent if parent has already been __clk_init'd. If
1709 * parent has not yet been __clk_init'd then place clk in the orphan
1710 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
1713 * Every time a new clk is clk_init'd then we walk the list of orphan
1714 * clocks and re-parent any that are children of the clock currently
1718 hlist_add_head(&clk->child_node,
1719 &clk->parent->children);
1720 else if (clk->flags & CLK_IS_ROOT)
1721 hlist_add_head(&clk->child_node, &clk_root_list);
1723 hlist_add_head(&clk->child_node, &clk_orphan_list);
1726 * Set clk's rate. The preferred method is to use .recalc_rate. For
1727 * simple clocks and lazy developers the default fallback is to use the
1728 * parent's rate. If a clock doesn't have a parent (or is orphaned)
1729 * then rate is set to zero.
1731 if (clk->ops->recalc_rate)
1732 clk->rate = clk->ops->recalc_rate(clk->hw,
1733 __clk_get_rate(clk->parent));
1734 else if (clk->parent)
1735 clk->rate = clk->parent->rate;
1740 * walk the list of orphan clocks and reparent any that are children of
1743 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
1744 if (orphan->ops->get_parent) {
1745 i = orphan->ops->get_parent(orphan->hw);
1746 if (!strcmp(clk->name, orphan->parent_names[i]))
1747 __clk_reparent(orphan, clk);
1751 for (i = 0; i < orphan->num_parents; i++)
1752 if (!strcmp(clk->name, orphan->parent_names[i])) {
1753 __clk_reparent(orphan, clk);
1759 * optional platform-specific magic
1761 * The .init callback is not used by any of the basic clock types, but
1762 * exists for weird hardware that must perform initialization magic.
1763 * Please consider other ways of solving initialization problems before
1764 * using this callback, as its use is discouraged.
1767 clk->ops->init(clk->hw);
1769 clk_debug_register(clk);
1772 clk_prepare_unlock();
1778 * __clk_register - register a clock and return a cookie.
1780 * Same as clk_register, except that the .clk field inside hw shall point to a
1781 * preallocated (generally statically allocated) struct clk. None of the fields
1782 * of the struct clk need to be initialized.
1784 * The data pointed to by .init and .clk field shall NOT be marked as init
1787 * __clk_register is only exposed via clk-private.h and is intended for use with
1788 * very large numbers of clocks that need to be statically initialized. It is
1789 * a layering violation to include clk-private.h from any code which implements
1790 * a clock's .ops; as such any statically initialized clock data MUST be in a
1791 * separate C file from the logic that implements its operations. Returns 0
1792 * on success, otherwise an error code.
1794 struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1800 clk->name = hw->init->name;
1801 clk->ops = hw->init->ops;
1803 clk->flags = hw->init->flags;
1804 clk->parent_names = hw->init->parent_names;
1805 clk->num_parents = hw->init->num_parents;
1807 ret = __clk_init(dev, clk);
1809 return ERR_PTR(ret);
1813 EXPORT_SYMBOL_GPL(__clk_register);
1815 static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
1819 clk->name = kstrdup(hw->init->name, GFP_KERNEL);
1821 pr_err("%s: could not allocate clk->name\n", __func__);
1825 clk->ops = hw->init->ops;
1827 clk->flags = hw->init->flags;
1828 clk->num_parents = hw->init->num_parents;
1831 /* allocate local copy in case parent_names is __initdata */
1832 clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
1835 if (!clk->parent_names) {
1836 pr_err("%s: could not allocate clk->parent_names\n", __func__);
1838 goto fail_parent_names;
1842 /* copy each string name in case parent_names is __initdata */
1843 for (i = 0; i < clk->num_parents; i++) {
1844 clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
1846 if (!clk->parent_names[i]) {
1847 pr_err("%s: could not copy parent_names\n", __func__);
1849 goto fail_parent_names_copy;
1853 ret = __clk_init(dev, clk);
1857 fail_parent_names_copy:
1859 kfree(clk->parent_names[i]);
1860 kfree(clk->parent_names);
1868 * clk_register - allocate a new clock, register it and return an opaque cookie
1869 * @dev: device that is registering this clock
1870 * @hw: link to hardware-specific clock data
1872 * clk_register is the primary interface for populating the clock tree with new
1873 * clock nodes. It returns a pointer to the newly allocated struct clk which
1874 * cannot be dereferenced by driver code but may be used in conjuction with the
1875 * rest of the clock API. In the event of an error clk_register will return an
1876 * error code; drivers must test for an error code after calling clk_register.
1878 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
1883 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
1885 pr_err("%s: could not allocate clk\n", __func__);
1890 ret = _clk_register(dev, hw, clk);
1896 return ERR_PTR(ret);
1898 EXPORT_SYMBOL_GPL(clk_register);
1901 * clk_unregister - unregister a currently registered clock
1902 * @clk: clock to unregister
1904 * Currently unimplemented.
1906 void clk_unregister(struct clk *clk) {}
1907 EXPORT_SYMBOL_GPL(clk_unregister);
1909 static void devm_clk_release(struct device *dev, void *res)
1911 clk_unregister(res);
1915 * devm_clk_register - resource managed clk_register()
1916 * @dev: device that is registering this clock
1917 * @hw: link to hardware-specific clock data
1919 * Managed clk_register(). Clocks returned from this function are
1920 * automatically clk_unregister()ed on driver detach. See clk_register() for
1923 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
1928 clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL);
1930 return ERR_PTR(-ENOMEM);
1932 ret = _clk_register(dev, hw, clk);
1934 devres_add(dev, clk);
1942 EXPORT_SYMBOL_GPL(devm_clk_register);
1944 static int devm_clk_match(struct device *dev, void *res, void *data)
1946 struct clk *c = res;
1953 * devm_clk_unregister - resource managed clk_unregister()
1954 * @clk: clock to unregister
1956 * Deallocate a clock allocated with devm_clk_register(). Normally
1957 * this function will not need to be called and the resource management
1958 * code will ensure that the resource is freed.
1960 void devm_clk_unregister(struct device *dev, struct clk *clk)
1962 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
1964 EXPORT_SYMBOL_GPL(devm_clk_unregister);
1966 /*** clk rate change notifiers ***/
1969 * clk_notifier_register - add a clk rate change notifier
1970 * @clk: struct clk * to watch
1971 * @nb: struct notifier_block * with callback info
1973 * Request notification when clk's rate changes. This uses an SRCU
1974 * notifier because we want it to block and notifier unregistrations are
1975 * uncommon. The callbacks associated with the notifier must not
1976 * re-enter into the clk framework by calling any top-level clk APIs;
1977 * this will cause a nested prepare_lock mutex.
1979 * Pre-change notifier callbacks will be passed the current, pre-change
1980 * rate of the clk via struct clk_notifier_data.old_rate. The new,
1981 * post-change rate of the clk is passed via struct
1982 * clk_notifier_data.new_rate.
1984 * Post-change notifiers will pass the now-current, post-change rate of
1985 * the clk in both struct clk_notifier_data.old_rate and struct
1986 * clk_notifier_data.new_rate.
1988 * Abort-change notifiers are effectively the opposite of pre-change
1989 * notifiers: the original pre-change clk rate is passed in via struct
1990 * clk_notifier_data.new_rate and the failed post-change rate is passed
1991 * in via struct clk_notifier_data.old_rate.
1993 * clk_notifier_register() must be called from non-atomic context.
1994 * Returns -EINVAL if called with null arguments, -ENOMEM upon
1995 * allocation failure; otherwise, passes along the return value of
1996 * srcu_notifier_chain_register().
1998 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
2000 struct clk_notifier *cn;
2008 /* search the list of notifiers for this clk */
2009 list_for_each_entry(cn, &clk_notifier_list, node)
2013 /* if clk wasn't in the notifier list, allocate new clk_notifier */
2014 if (cn->clk != clk) {
2015 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
2020 srcu_init_notifier_head(&cn->notifier_head);
2022 list_add(&cn->node, &clk_notifier_list);
2025 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
2027 clk->notifier_count++;
2030 clk_prepare_unlock();
2034 EXPORT_SYMBOL_GPL(clk_notifier_register);
2037 * clk_notifier_unregister - remove a clk rate change notifier
2038 * @clk: struct clk *
2039 * @nb: struct notifier_block * with callback info
2041 * Request no further notification for changes to 'clk' and frees memory
2042 * allocated in clk_notifier_register.
2044 * Returns -EINVAL if called with null arguments; otherwise, passes
2045 * along the return value of srcu_notifier_chain_unregister().
2047 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
2049 struct clk_notifier *cn = NULL;
2057 list_for_each_entry(cn, &clk_notifier_list, node)
2061 if (cn->clk == clk) {
2062 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
2064 clk->notifier_count--;
2066 /* XXX the notifier code should handle this better */
2067 if (!cn->notifier_head.head) {
2068 srcu_cleanup_notifier_head(&cn->notifier_head);
2069 list_del(&cn->node);
2077 clk_prepare_unlock();
2081 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
2085 * struct of_clk_provider - Clock provider registration structure
2086 * @link: Entry in global list of clock providers
2087 * @node: Pointer to device tree node of clock provider
2088 * @get: Get clock callback. Returns NULL or a struct clk for the
2089 * given clock specifier
2090 * @data: context pointer to be passed into @get callback
2092 struct of_clk_provider {
2093 struct list_head link;
2095 struct device_node *node;
2096 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
2100 extern struct of_device_id __clk_of_table[];
2102 static const struct of_device_id __clk_of_table_sentinel
2103 __used __section(__clk_of_table_end);
2105 static LIST_HEAD(of_clk_providers);
2106 static DEFINE_MUTEX(of_clk_lock);
2108 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
2113 EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
2115 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
2117 struct clk_onecell_data *clk_data = data;
2118 unsigned int idx = clkspec->args[0];
2120 if (idx >= clk_data->clk_num) {
2121 pr_err("%s: invalid clock index %d\n", __func__, idx);
2122 return ERR_PTR(-EINVAL);
2125 return clk_data->clks[idx];
2127 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
2130 * of_clk_add_provider() - Register a clock provider for a node
2131 * @np: Device node pointer associated with clock provider
2132 * @clk_src_get: callback for decoding clock
2133 * @data: context pointer for @clk_src_get callback.
2135 int of_clk_add_provider(struct device_node *np,
2136 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
2140 struct of_clk_provider *cp;
2142 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
2146 cp->node = of_node_get(np);
2148 cp->get = clk_src_get;
2150 mutex_lock(&of_clk_lock);
2151 list_add(&cp->link, &of_clk_providers);
2152 mutex_unlock(&of_clk_lock);
2153 pr_debug("Added clock from %s\n", np->full_name);
2157 EXPORT_SYMBOL_GPL(of_clk_add_provider);
2160 * of_clk_del_provider() - Remove a previously registered clock provider
2161 * @np: Device node pointer associated with clock provider
2163 void of_clk_del_provider(struct device_node *np)
2165 struct of_clk_provider *cp;
2167 mutex_lock(&of_clk_lock);
2168 list_for_each_entry(cp, &of_clk_providers, link) {
2169 if (cp->node == np) {
2170 list_del(&cp->link);
2171 of_node_put(cp->node);
2176 mutex_unlock(&of_clk_lock);
2178 EXPORT_SYMBOL_GPL(of_clk_del_provider);
2180 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
2182 struct of_clk_provider *provider;
2183 struct clk *clk = ERR_PTR(-ENOENT);
2185 /* Check if we have such a provider in our array */
2186 mutex_lock(&of_clk_lock);
2187 list_for_each_entry(provider, &of_clk_providers, link) {
2188 if (provider->node == clkspec->np)
2189 clk = provider->get(clkspec, provider->data);
2193 mutex_unlock(&of_clk_lock);
2198 const char *of_clk_get_parent_name(struct device_node *np, int index)
2200 struct of_phandle_args clkspec;
2201 const char *clk_name;
2207 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
2212 if (of_property_read_string_index(clkspec.np, "clock-output-names",
2213 clkspec.args_count ? clkspec.args[0] : 0,
2215 clk_name = clkspec.np->name;
2217 of_node_put(clkspec.np);
2220 EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
2223 * of_clk_init() - Scan and init clock providers from the DT
2224 * @matches: array of compatible values and init functions for providers.
2226 * This function scans the device tree for matching clock providers and
2227 * calls their initialization functions
2229 void __init of_clk_init(const struct of_device_id *matches)
2231 struct device_node *np;
2234 matches = __clk_of_table;
2236 for_each_matching_node(np, matches) {
2237 const struct of_device_id *match = of_match_node(matches, np);
2238 of_clk_init_cb_t clk_init_cb = match->data;