1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
4 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
6 * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst
10 #include <linux/clk-provider.h>
11 #include <linux/clk/clk-conf.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/spinlock.h>
15 #include <linux/err.h>
16 #include <linux/list.h>
17 #include <linux/slab.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/sched.h>
23 #include <linux/clkdev.h>
27 static DEFINE_SPINLOCK(enable_lock);
28 static DEFINE_MUTEX(prepare_lock);
30 static struct task_struct *prepare_owner;
31 static struct task_struct *enable_owner;
33 static int prepare_refcnt;
34 static int enable_refcnt;
36 static HLIST_HEAD(clk_root_list);
37 static HLIST_HEAD(clk_orphan_list);
38 static LIST_HEAD(clk_notifier_list);
40 /*** private data structures ***/
44 const struct clk_ops *ops;
48 struct clk_core *parent;
49 const char **parent_names;
50 struct clk_core **parents;
54 unsigned long req_rate;
55 unsigned long new_rate;
56 struct clk_core *new_parent;
57 struct clk_core *new_child;
61 unsigned int enable_count;
62 unsigned int prepare_count;
63 unsigned int protect_count;
64 unsigned long min_rate;
65 unsigned long max_rate;
66 unsigned long accuracy;
69 struct hlist_head children;
70 struct hlist_node child_node;
71 struct hlist_head clks;
72 unsigned int notifier_count;
73 #ifdef CONFIG_DEBUG_FS
74 struct dentry *dentry;
75 struct hlist_node debug_node;
80 #define CREATE_TRACE_POINTS
81 #include <trace/events/clk.h>
84 struct clk_core *core;
87 unsigned long min_rate;
88 unsigned long max_rate;
89 unsigned int exclusive_count;
90 struct hlist_node clks_node;
94 static int clk_pm_runtime_get(struct clk_core *core)
98 if (!core->rpm_enabled)
101 ret = pm_runtime_get_sync(core->dev);
102 return ret < 0 ? ret : 0;
105 static void clk_pm_runtime_put(struct clk_core *core)
107 if (!core->rpm_enabled)
110 pm_runtime_put_sync(core->dev);
114 static void clk_prepare_lock(void)
116 if (!mutex_trylock(&prepare_lock)) {
117 if (prepare_owner == current) {
121 mutex_lock(&prepare_lock);
123 WARN_ON_ONCE(prepare_owner != NULL);
124 WARN_ON_ONCE(prepare_refcnt != 0);
125 prepare_owner = current;
129 static void clk_prepare_unlock(void)
131 WARN_ON_ONCE(prepare_owner != current);
132 WARN_ON_ONCE(prepare_refcnt == 0);
134 if (--prepare_refcnt)
136 prepare_owner = NULL;
137 mutex_unlock(&prepare_lock);
140 static unsigned long clk_enable_lock(void)
141 __acquires(enable_lock)
146 * On UP systems, spin_trylock_irqsave() always returns true, even if
147 * we already hold the lock. So, in that case, we rely only on
148 * reference counting.
150 if (!IS_ENABLED(CONFIG_SMP) ||
151 !spin_trylock_irqsave(&enable_lock, flags)) {
152 if (enable_owner == current) {
154 __acquire(enable_lock);
155 if (!IS_ENABLED(CONFIG_SMP))
156 local_save_flags(flags);
159 spin_lock_irqsave(&enable_lock, flags);
161 WARN_ON_ONCE(enable_owner != NULL);
162 WARN_ON_ONCE(enable_refcnt != 0);
163 enable_owner = current;
168 static void clk_enable_unlock(unsigned long flags)
169 __releases(enable_lock)
171 WARN_ON_ONCE(enable_owner != current);
172 WARN_ON_ONCE(enable_refcnt == 0);
174 if (--enable_refcnt) {
175 __release(enable_lock);
179 spin_unlock_irqrestore(&enable_lock, flags);
182 static bool clk_core_rate_is_protected(struct clk_core *core)
184 return core->protect_count;
187 static bool clk_core_is_prepared(struct clk_core *core)
192 * .is_prepared is optional for clocks that can prepare
193 * fall back to software usage counter if it is missing
195 if (!core->ops->is_prepared)
196 return core->prepare_count;
198 if (!clk_pm_runtime_get(core)) {
199 ret = core->ops->is_prepared(core->hw);
200 clk_pm_runtime_put(core);
206 static bool clk_core_is_enabled(struct clk_core *core)
211 * .is_enabled is only mandatory for clocks that gate
212 * fall back to software usage counter if .is_enabled is missing
214 if (!core->ops->is_enabled)
215 return core->enable_count;
218 * Check if clock controller's device is runtime active before
219 * calling .is_enabled callback. If not, assume that clock is
220 * disabled, because we might be called from atomic context, from
221 * which pm_runtime_get() is not allowed.
222 * This function is called mainly from clk_disable_unused_subtree,
223 * which ensures proper runtime pm activation of controller before
224 * taking enable spinlock, but the below check is needed if one tries
225 * to call it from other places.
227 if (core->rpm_enabled) {
228 pm_runtime_get_noresume(core->dev);
229 if (!pm_runtime_active(core->dev)) {
235 ret = core->ops->is_enabled(core->hw);
237 if (core->rpm_enabled)
238 pm_runtime_put(core->dev);
243 /*** helper functions ***/
245 const char *__clk_get_name(const struct clk *clk)
247 return !clk ? NULL : clk->core->name;
249 EXPORT_SYMBOL_GPL(__clk_get_name);
251 const char *clk_hw_get_name(const struct clk_hw *hw)
253 return hw->core->name;
255 EXPORT_SYMBOL_GPL(clk_hw_get_name);
257 struct clk_hw *__clk_get_hw(struct clk *clk)
259 return !clk ? NULL : clk->core->hw;
261 EXPORT_SYMBOL_GPL(__clk_get_hw);
263 unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
265 return hw->core->num_parents;
267 EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
269 struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
271 return hw->core->parent ? hw->core->parent->hw : NULL;
273 EXPORT_SYMBOL_GPL(clk_hw_get_parent);
275 static struct clk_core *__clk_lookup_subtree(const char *name,
276 struct clk_core *core)
278 struct clk_core *child;
279 struct clk_core *ret;
281 if (!strcmp(core->name, name))
284 hlist_for_each_entry(child, &core->children, child_node) {
285 ret = __clk_lookup_subtree(name, child);
293 static struct clk_core *clk_core_lookup(const char *name)
295 struct clk_core *root_clk;
296 struct clk_core *ret;
301 /* search the 'proper' clk tree first */
302 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
303 ret = __clk_lookup_subtree(name, root_clk);
308 /* if not found, then search the orphan tree */
309 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
310 ret = __clk_lookup_subtree(name, root_clk);
318 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
321 if (!core || index >= core->num_parents)
324 if (!core->parents[index])
325 core->parents[index] =
326 clk_core_lookup(core->parent_names[index]);
328 return core->parents[index];
332 clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
334 struct clk_core *parent;
336 parent = clk_core_get_parent_by_index(hw->core, index);
338 return !parent ? NULL : parent->hw;
340 EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
342 unsigned int __clk_get_enable_count(struct clk *clk)
344 return !clk ? 0 : clk->core->enable_count;
347 static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
358 if (!core->num_parents)
368 unsigned long clk_hw_get_rate(const struct clk_hw *hw)
370 return clk_core_get_rate_nolock(hw->core);
372 EXPORT_SYMBOL_GPL(clk_hw_get_rate);
374 static unsigned long __clk_get_accuracy(struct clk_core *core)
379 return core->accuracy;
382 unsigned long __clk_get_flags(struct clk *clk)
384 return !clk ? 0 : clk->core->flags;
386 EXPORT_SYMBOL_GPL(__clk_get_flags);
388 unsigned long clk_hw_get_flags(const struct clk_hw *hw)
390 return hw->core->flags;
392 EXPORT_SYMBOL_GPL(clk_hw_get_flags);
394 bool clk_hw_is_prepared(const struct clk_hw *hw)
396 return clk_core_is_prepared(hw->core);
399 bool clk_hw_rate_is_protected(const struct clk_hw *hw)
401 return clk_core_rate_is_protected(hw->core);
404 bool clk_hw_is_enabled(const struct clk_hw *hw)
406 return clk_core_is_enabled(hw->core);
409 bool __clk_is_enabled(struct clk *clk)
414 return clk_core_is_enabled(clk->core);
416 EXPORT_SYMBOL_GPL(__clk_is_enabled);
418 static bool mux_is_better_rate(unsigned long rate, unsigned long now,
419 unsigned long best, unsigned long flags)
421 if (flags & CLK_MUX_ROUND_CLOSEST)
422 return abs(now - rate) < abs(best - rate);
424 return now <= rate && now > best;
427 int clk_mux_determine_rate_flags(struct clk_hw *hw,
428 struct clk_rate_request *req,
431 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
432 int i, num_parents, ret;
433 unsigned long best = 0;
434 struct clk_rate_request parent_req = *req;
436 /* if NO_REPARENT flag set, pass through to current parent */
437 if (core->flags & CLK_SET_RATE_NO_REPARENT) {
438 parent = core->parent;
439 if (core->flags & CLK_SET_RATE_PARENT) {
440 ret = __clk_determine_rate(parent ? parent->hw : NULL,
445 best = parent_req.rate;
447 best = clk_core_get_rate_nolock(parent);
449 best = clk_core_get_rate_nolock(core);
455 /* find the parent that can provide the fastest rate <= rate */
456 num_parents = core->num_parents;
457 for (i = 0; i < num_parents; i++) {
458 parent = clk_core_get_parent_by_index(core, i);
462 if (core->flags & CLK_SET_RATE_PARENT) {
464 ret = __clk_determine_rate(parent->hw, &parent_req);
468 parent_req.rate = clk_core_get_rate_nolock(parent);
471 if (mux_is_better_rate(req->rate, parent_req.rate,
473 best_parent = parent;
474 best = parent_req.rate;
483 req->best_parent_hw = best_parent->hw;
484 req->best_parent_rate = best;
489 EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
491 struct clk *__clk_lookup(const char *name)
493 struct clk_core *core = clk_core_lookup(name);
495 return !core ? NULL : core->hw->clk;
498 static void clk_core_get_boundaries(struct clk_core *core,
499 unsigned long *min_rate,
500 unsigned long *max_rate)
502 struct clk *clk_user;
504 *min_rate = core->min_rate;
505 *max_rate = core->max_rate;
507 hlist_for_each_entry(clk_user, &core->clks, clks_node)
508 *min_rate = max(*min_rate, clk_user->min_rate);
510 hlist_for_each_entry(clk_user, &core->clks, clks_node)
511 *max_rate = min(*max_rate, clk_user->max_rate);
514 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
515 unsigned long max_rate)
517 hw->core->min_rate = min_rate;
518 hw->core->max_rate = max_rate;
520 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
523 * Helper for finding best parent to provide a given frequency. This can be used
524 * directly as a determine_rate callback (e.g. for a mux), or from a more
525 * complex clock that may combine a mux with other operations.
527 int __clk_mux_determine_rate(struct clk_hw *hw,
528 struct clk_rate_request *req)
530 return clk_mux_determine_rate_flags(hw, req, 0);
532 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
534 int __clk_mux_determine_rate_closest(struct clk_hw *hw,
535 struct clk_rate_request *req)
537 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
539 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
543 static void clk_core_rate_unprotect(struct clk_core *core)
545 lockdep_assert_held(&prepare_lock);
550 if (WARN(core->protect_count == 0,
551 "%s already unprotected\n", core->name))
554 if (--core->protect_count > 0)
557 clk_core_rate_unprotect(core->parent);
560 static int clk_core_rate_nuke_protect(struct clk_core *core)
564 lockdep_assert_held(&prepare_lock);
569 if (core->protect_count == 0)
572 ret = core->protect_count;
573 core->protect_count = 1;
574 clk_core_rate_unprotect(core);
580 * clk_rate_exclusive_put - release exclusivity over clock rate control
581 * @clk: the clk over which the exclusivity is released
583 * clk_rate_exclusive_put() completes a critical section during which a clock
584 * consumer cannot tolerate any other consumer making any operation on the
585 * clock which could result in a rate change or rate glitch. Exclusive clocks
586 * cannot have their rate changed, either directly or indirectly due to changes
587 * further up the parent chain of clocks. As a result, clocks up parent chain
588 * also get under exclusive control of the calling consumer.
590 * If exlusivity is claimed more than once on clock, even by the same consumer,
591 * the rate effectively gets locked as exclusivity can't be preempted.
593 * Calls to clk_rate_exclusive_put() must be balanced with calls to
594 * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return
597 void clk_rate_exclusive_put(struct clk *clk)
605 * if there is something wrong with this consumer protect count, stop
606 * here before messing with the provider
608 if (WARN_ON(clk->exclusive_count <= 0))
611 clk_core_rate_unprotect(clk->core);
612 clk->exclusive_count--;
614 clk_prepare_unlock();
616 EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);
618 static void clk_core_rate_protect(struct clk_core *core)
620 lockdep_assert_held(&prepare_lock);
625 if (core->protect_count == 0)
626 clk_core_rate_protect(core->parent);
628 core->protect_count++;
631 static void clk_core_rate_restore_protect(struct clk_core *core, int count)
633 lockdep_assert_held(&prepare_lock);
641 clk_core_rate_protect(core);
642 core->protect_count = count;
646 * clk_rate_exclusive_get - get exclusivity over the clk rate control
647 * @clk: the clk over which the exclusity of rate control is requested
649 * clk_rate_exlusive_get() begins a critical section during which a clock
650 * consumer cannot tolerate any other consumer making any operation on the
651 * clock which could result in a rate change or rate glitch. Exclusive clocks
652 * cannot have their rate changed, either directly or indirectly due to changes
653 * further up the parent chain of clocks. As a result, clocks up parent chain
654 * also get under exclusive control of the calling consumer.
656 * If exlusivity is claimed more than once on clock, even by the same consumer,
657 * the rate effectively gets locked as exclusivity can't be preempted.
659 * Calls to clk_rate_exclusive_get() should be balanced with calls to
660 * clk_rate_exclusive_put(). Calls to this function may sleep.
661 * Returns 0 on success, -EERROR otherwise
663 int clk_rate_exclusive_get(struct clk *clk)
669 clk_core_rate_protect(clk->core);
670 clk->exclusive_count++;
671 clk_prepare_unlock();
675 EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
677 static void clk_core_unprepare(struct clk_core *core)
679 lockdep_assert_held(&prepare_lock);
684 if (WARN(core->prepare_count == 0,
685 "%s already unprepared\n", core->name))
688 if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL,
689 "Unpreparing critical %s\n", core->name))
692 if (core->flags & CLK_SET_RATE_GATE)
693 clk_core_rate_unprotect(core);
695 if (--core->prepare_count > 0)
698 WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name);
700 trace_clk_unprepare(core);
702 if (core->ops->unprepare)
703 core->ops->unprepare(core->hw);
705 clk_pm_runtime_put(core);
707 trace_clk_unprepare_complete(core);
708 clk_core_unprepare(core->parent);
711 static void clk_core_unprepare_lock(struct clk_core *core)
714 clk_core_unprepare(core);
715 clk_prepare_unlock();
719 * clk_unprepare - undo preparation of a clock source
720 * @clk: the clk being unprepared
722 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
723 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
724 * if the operation may sleep. One example is a clk which is accessed over
725 * I2c. In the complex case a clk gate operation may require a fast and a slow
726 * part. It is this reason that clk_unprepare and clk_disable are not mutually
727 * exclusive. In fact clk_disable must be called before clk_unprepare.
729 void clk_unprepare(struct clk *clk)
731 if (IS_ERR_OR_NULL(clk))
734 clk_core_unprepare_lock(clk->core);
736 EXPORT_SYMBOL_GPL(clk_unprepare);
738 static int clk_core_prepare(struct clk_core *core)
742 lockdep_assert_held(&prepare_lock);
747 if (core->prepare_count == 0) {
748 ret = clk_pm_runtime_get(core);
752 ret = clk_core_prepare(core->parent);
756 trace_clk_prepare(core);
758 if (core->ops->prepare)
759 ret = core->ops->prepare(core->hw);
761 trace_clk_prepare_complete(core);
767 core->prepare_count++;
770 * CLK_SET_RATE_GATE is a special case of clock protection
771 * Instead of a consumer claiming exclusive rate control, it is
772 * actually the provider which prevents any consumer from making any
773 * operation which could result in a rate change or rate glitch while
774 * the clock is prepared.
776 if (core->flags & CLK_SET_RATE_GATE)
777 clk_core_rate_protect(core);
781 clk_core_unprepare(core->parent);
783 clk_pm_runtime_put(core);
787 static int clk_core_prepare_lock(struct clk_core *core)
792 ret = clk_core_prepare(core);
793 clk_prepare_unlock();
799 * clk_prepare - prepare a clock source
800 * @clk: the clk being prepared
802 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
803 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
804 * operation may sleep. One example is a clk which is accessed over I2c. In
805 * the complex case a clk ungate operation may require a fast and a slow part.
806 * It is this reason that clk_prepare and clk_enable are not mutually
807 * exclusive. In fact clk_prepare must be called before clk_enable.
808 * Returns 0 on success, -EERROR otherwise.
810 int clk_prepare(struct clk *clk)
815 return clk_core_prepare_lock(clk->core);
817 EXPORT_SYMBOL_GPL(clk_prepare);
819 static void clk_core_disable(struct clk_core *core)
821 lockdep_assert_held(&enable_lock);
826 if (WARN(core->enable_count == 0, "%s already disabled\n", core->name))
829 if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL,
830 "Disabling critical %s\n", core->name))
833 if (--core->enable_count > 0)
836 trace_clk_disable_rcuidle(core);
838 if (core->ops->disable)
839 core->ops->disable(core->hw);
841 trace_clk_disable_complete_rcuidle(core);
843 clk_core_disable(core->parent);
846 static void clk_core_disable_lock(struct clk_core *core)
850 flags = clk_enable_lock();
851 clk_core_disable(core);
852 clk_enable_unlock(flags);
856 * clk_disable - gate a clock
857 * @clk: the clk being gated
859 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
860 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
861 * clk if the operation is fast and will never sleep. One example is a
862 * SoC-internal clk which is controlled via simple register writes. In the
863 * complex case a clk gate operation may require a fast and a slow part. It is
864 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
865 * In fact clk_disable must be called before clk_unprepare.
867 void clk_disable(struct clk *clk)
869 if (IS_ERR_OR_NULL(clk))
872 clk_core_disable_lock(clk->core);
874 EXPORT_SYMBOL_GPL(clk_disable);
876 static int clk_core_enable(struct clk_core *core)
880 lockdep_assert_held(&enable_lock);
885 if (WARN(core->prepare_count == 0,
886 "Enabling unprepared %s\n", core->name))
889 if (core->enable_count == 0) {
890 ret = clk_core_enable(core->parent);
895 trace_clk_enable_rcuidle(core);
897 if (core->ops->enable)
898 ret = core->ops->enable(core->hw);
900 trace_clk_enable_complete_rcuidle(core);
903 clk_core_disable(core->parent);
908 core->enable_count++;
912 static int clk_core_enable_lock(struct clk_core *core)
917 flags = clk_enable_lock();
918 ret = clk_core_enable(core);
919 clk_enable_unlock(flags);
925 * clk_gate_restore_context - restore context for poweroff
926 * @hw: the clk_hw pointer of clock whose state is to be restored
928 * The clock gate restore context function enables or disables
929 * the gate clocks based on the enable_count. This is done in cases
930 * where the clock context is lost and based on the enable_count
931 * the clock either needs to be enabled/disabled. This
932 * helps restore the state of gate clocks.
934 void clk_gate_restore_context(struct clk_hw *hw)
936 struct clk_core *core = hw->core;
938 if (core->enable_count)
939 core->ops->enable(hw);
941 core->ops->disable(hw);
943 EXPORT_SYMBOL_GPL(clk_gate_restore_context);
945 static int clk_core_save_context(struct clk_core *core)
947 struct clk_core *child;
950 hlist_for_each_entry(child, &core->children, child_node) {
951 ret = clk_core_save_context(child);
956 if (core->ops && core->ops->save_context)
957 ret = core->ops->save_context(core->hw);
962 static void clk_core_restore_context(struct clk_core *core)
964 struct clk_core *child;
966 if (core->ops && core->ops->restore_context)
967 core->ops->restore_context(core->hw);
969 hlist_for_each_entry(child, &core->children, child_node)
970 clk_core_restore_context(child);
974 * clk_save_context - save clock context for poweroff
976 * Saves the context of the clock register for powerstates in which the
977 * contents of the registers will be lost. Occurs deep within the suspend
978 * code. Returns 0 on success.
980 int clk_save_context(void)
982 struct clk_core *clk;
985 hlist_for_each_entry(clk, &clk_root_list, child_node) {
986 ret = clk_core_save_context(clk);
991 hlist_for_each_entry(clk, &clk_orphan_list, child_node) {
992 ret = clk_core_save_context(clk);
999 EXPORT_SYMBOL_GPL(clk_save_context);
1002 * clk_restore_context - restore clock context after poweroff
1004 * Restore the saved clock context upon resume.
1007 void clk_restore_context(void)
1009 struct clk_core *core;
1011 hlist_for_each_entry(core, &clk_root_list, child_node)
1012 clk_core_restore_context(core);
1014 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1015 clk_core_restore_context(core);
1017 EXPORT_SYMBOL_GPL(clk_restore_context);
1020 * clk_enable - ungate a clock
1021 * @clk: the clk being ungated
1023 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
1024 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
1025 * if the operation will never sleep. One example is a SoC-internal clk which
1026 * is controlled via simple register writes. In the complex case a clk ungate
1027 * operation may require a fast and a slow part. It is this reason that
1028 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
1029 * must be called before clk_enable. Returns 0 on success, -EERROR
1032 int clk_enable(struct clk *clk)
1037 return clk_core_enable_lock(clk->core);
1039 EXPORT_SYMBOL_GPL(clk_enable);
1041 static int clk_core_prepare_enable(struct clk_core *core)
1045 ret = clk_core_prepare_lock(core);
1049 ret = clk_core_enable_lock(core);
1051 clk_core_unprepare_lock(core);
1056 static void clk_core_disable_unprepare(struct clk_core *core)
1058 clk_core_disable_lock(core);
1059 clk_core_unprepare_lock(core);
1062 static void clk_unprepare_unused_subtree(struct clk_core *core)
1064 struct clk_core *child;
1066 lockdep_assert_held(&prepare_lock);
1068 hlist_for_each_entry(child, &core->children, child_node)
1069 clk_unprepare_unused_subtree(child);
1071 if (core->prepare_count)
1074 if (core->flags & CLK_IGNORE_UNUSED)
1077 if (clk_pm_runtime_get(core))
1080 if (clk_core_is_prepared(core)) {
1081 trace_clk_unprepare(core);
1082 if (core->ops->unprepare_unused)
1083 core->ops->unprepare_unused(core->hw);
1084 else if (core->ops->unprepare)
1085 core->ops->unprepare(core->hw);
1086 trace_clk_unprepare_complete(core);
1089 clk_pm_runtime_put(core);
1092 static void clk_disable_unused_subtree(struct clk_core *core)
1094 struct clk_core *child;
1095 unsigned long flags;
1097 lockdep_assert_held(&prepare_lock);
1099 hlist_for_each_entry(child, &core->children, child_node)
1100 clk_disable_unused_subtree(child);
1102 if (core->flags & CLK_OPS_PARENT_ENABLE)
1103 clk_core_prepare_enable(core->parent);
1105 if (clk_pm_runtime_get(core))
1108 flags = clk_enable_lock();
1110 if (core->enable_count)
1113 if (core->flags & CLK_IGNORE_UNUSED)
1117 * some gate clocks have special needs during the disable-unused
1118 * sequence. call .disable_unused if available, otherwise fall
1121 if (clk_core_is_enabled(core)) {
1122 trace_clk_disable(core);
1123 if (core->ops->disable_unused)
1124 core->ops->disable_unused(core->hw);
1125 else if (core->ops->disable)
1126 core->ops->disable(core->hw);
1127 trace_clk_disable_complete(core);
1131 clk_enable_unlock(flags);
1132 clk_pm_runtime_put(core);
1134 if (core->flags & CLK_OPS_PARENT_ENABLE)
1135 clk_core_disable_unprepare(core->parent);
1138 static bool clk_ignore_unused;
1139 static int __init clk_ignore_unused_setup(char *__unused)
1141 clk_ignore_unused = true;
1144 __setup("clk_ignore_unused", clk_ignore_unused_setup);
1146 static int clk_disable_unused(void)
1148 struct clk_core *core;
1150 if (clk_ignore_unused) {
1151 pr_warn("clk: Not disabling unused clocks\n");
1157 hlist_for_each_entry(core, &clk_root_list, child_node)
1158 clk_disable_unused_subtree(core);
1160 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1161 clk_disable_unused_subtree(core);
1163 hlist_for_each_entry(core, &clk_root_list, child_node)
1164 clk_unprepare_unused_subtree(core);
1166 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1167 clk_unprepare_unused_subtree(core);
1169 clk_prepare_unlock();
1173 late_initcall_sync(clk_disable_unused);
1175 static int clk_core_determine_round_nolock(struct clk_core *core,
1176 struct clk_rate_request *req)
1180 lockdep_assert_held(&prepare_lock);
1186 * At this point, core protection will be disabled if
1187 * - if the provider is not protected at all
1188 * - if the calling consumer is the only one which has exclusivity
1191 if (clk_core_rate_is_protected(core)) {
1192 req->rate = core->rate;
1193 } else if (core->ops->determine_rate) {
1194 return core->ops->determine_rate(core->hw, req);
1195 } else if (core->ops->round_rate) {
1196 rate = core->ops->round_rate(core->hw, req->rate,
1197 &req->best_parent_rate);
1209 static void clk_core_init_rate_req(struct clk_core * const core,
1210 struct clk_rate_request *req)
1212 struct clk_core *parent;
1214 if (WARN_ON(!core || !req))
1217 parent = core->parent;
1219 req->best_parent_hw = parent->hw;
1220 req->best_parent_rate = parent->rate;
1222 req->best_parent_hw = NULL;
1223 req->best_parent_rate = 0;
1227 static bool clk_core_can_round(struct clk_core * const core)
1229 if (core->ops->determine_rate || core->ops->round_rate)
1235 static int clk_core_round_rate_nolock(struct clk_core *core,
1236 struct clk_rate_request *req)
1238 lockdep_assert_held(&prepare_lock);
1245 clk_core_init_rate_req(core, req);
1247 if (clk_core_can_round(core))
1248 return clk_core_determine_round_nolock(core, req);
1249 else if (core->flags & CLK_SET_RATE_PARENT)
1250 return clk_core_round_rate_nolock(core->parent, req);
1252 req->rate = core->rate;
1257 * __clk_determine_rate - get the closest rate actually supported by a clock
1258 * @hw: determine the rate of this clock
1259 * @req: target rate request
1261 * Useful for clk_ops such as .set_rate and .determine_rate.
1263 int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
1270 return clk_core_round_rate_nolock(hw->core, req);
1272 EXPORT_SYMBOL_GPL(__clk_determine_rate);
1274 unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
1277 struct clk_rate_request req;
1279 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
1282 ret = clk_core_round_rate_nolock(hw->core, &req);
1288 EXPORT_SYMBOL_GPL(clk_hw_round_rate);
1291 * clk_round_rate - round the given rate for a clk
1292 * @clk: the clk for which we are rounding a rate
1293 * @rate: the rate which is to be rounded
1295 * Takes in a rate as input and rounds it to a rate that the clk can actually
1296 * use which is then returned. If clk doesn't support round_rate operation
1297 * then the parent rate is returned.
1299 long clk_round_rate(struct clk *clk, unsigned long rate)
1301 struct clk_rate_request req;
1309 if (clk->exclusive_count)
1310 clk_core_rate_unprotect(clk->core);
1312 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
1315 ret = clk_core_round_rate_nolock(clk->core, &req);
1317 if (clk->exclusive_count)
1318 clk_core_rate_protect(clk->core);
1320 clk_prepare_unlock();
1327 EXPORT_SYMBOL_GPL(clk_round_rate);
1330 * __clk_notify - call clk notifier chain
1331 * @core: clk that is changing rate
1332 * @msg: clk notifier type (see include/linux/clk.h)
1333 * @old_rate: old clk rate
1334 * @new_rate: new clk rate
1336 * Triggers a notifier call chain on the clk rate-change notification
1337 * for 'clk'. Passes a pointer to the struct clk and the previous
1338 * and current rates to the notifier callback. Intended to be called by
1339 * internal clock code only. Returns NOTIFY_DONE from the last driver
1340 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1341 * a driver returns that.
1343 static int __clk_notify(struct clk_core *core, unsigned long msg,
1344 unsigned long old_rate, unsigned long new_rate)
1346 struct clk_notifier *cn;
1347 struct clk_notifier_data cnd;
1348 int ret = NOTIFY_DONE;
1350 cnd.old_rate = old_rate;
1351 cnd.new_rate = new_rate;
1353 list_for_each_entry(cn, &clk_notifier_list, node) {
1354 if (cn->clk->core == core) {
1356 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1358 if (ret & NOTIFY_STOP_MASK)
1367 * __clk_recalc_accuracies
1368 * @core: first clk in the subtree
1370 * Walks the subtree of clks starting with clk and recalculates accuracies as
1371 * it goes. Note that if a clk does not implement the .recalc_accuracy
1372 * callback then it is assumed that the clock will take on the accuracy of its
1375 static void __clk_recalc_accuracies(struct clk_core *core)
1377 unsigned long parent_accuracy = 0;
1378 struct clk_core *child;
1380 lockdep_assert_held(&prepare_lock);
1383 parent_accuracy = core->parent->accuracy;
1385 if (core->ops->recalc_accuracy)
1386 core->accuracy = core->ops->recalc_accuracy(core->hw,
1389 core->accuracy = parent_accuracy;
1391 hlist_for_each_entry(child, &core->children, child_node)
1392 __clk_recalc_accuracies(child);
1395 static long clk_core_get_accuracy(struct clk_core *core)
1397 unsigned long accuracy;
1400 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1401 __clk_recalc_accuracies(core);
1403 accuracy = __clk_get_accuracy(core);
1404 clk_prepare_unlock();
1410 * clk_get_accuracy - return the accuracy of clk
1411 * @clk: the clk whose accuracy is being returned
1413 * Simply returns the cached accuracy of the clk, unless
1414 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1416 * If clk is NULL then returns 0.
1418 long clk_get_accuracy(struct clk *clk)
1423 return clk_core_get_accuracy(clk->core);
1425 EXPORT_SYMBOL_GPL(clk_get_accuracy);
1427 static unsigned long clk_recalc(struct clk_core *core,
1428 unsigned long parent_rate)
1430 unsigned long rate = parent_rate;
1432 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
1433 rate = core->ops->recalc_rate(core->hw, parent_rate);
1434 clk_pm_runtime_put(core);
1440 * __clk_recalc_rates
1441 * @core: first clk in the subtree
1442 * @msg: notification type (see include/linux/clk.h)
1444 * Walks the subtree of clks starting with clk and recalculates rates as it
1445 * goes. Note that if a clk does not implement the .recalc_rate callback then
1446 * it is assumed that the clock will take on the rate of its parent.
1448 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1451 static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
1453 unsigned long old_rate;
1454 unsigned long parent_rate = 0;
1455 struct clk_core *child;
1457 lockdep_assert_held(&prepare_lock);
1459 old_rate = core->rate;
1462 parent_rate = core->parent->rate;
1464 core->rate = clk_recalc(core, parent_rate);
1467 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1468 * & ABORT_RATE_CHANGE notifiers
1470 if (core->notifier_count && msg)
1471 __clk_notify(core, msg, old_rate, core->rate);
1473 hlist_for_each_entry(child, &core->children, child_node)
1474 __clk_recalc_rates(child, msg);
1477 static unsigned long clk_core_get_rate(struct clk_core *core)
1483 if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1484 __clk_recalc_rates(core, 0);
1486 rate = clk_core_get_rate_nolock(core);
1487 clk_prepare_unlock();
1493 * clk_get_rate - return the rate of clk
1494 * @clk: the clk whose rate is being returned
1496 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1497 * is set, which means a recalc_rate will be issued.
1498 * If clk is NULL then returns 0.
1500 unsigned long clk_get_rate(struct clk *clk)
1505 return clk_core_get_rate(clk->core);
1507 EXPORT_SYMBOL_GPL(clk_get_rate);
1509 static int clk_fetch_parent_index(struct clk_core *core,
1510 struct clk_core *parent)
1517 for (i = 0; i < core->num_parents; i++) {
1518 if (core->parents[i] == parent)
1521 if (core->parents[i])
1524 /* Fallback to comparing globally unique names */
1525 if (!strcmp(parent->name, core->parent_names[i])) {
1526 core->parents[i] = parent;
1535 * Update the orphan status of @core and all its children.
1537 static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
1539 struct clk_core *child;
1541 core->orphan = is_orphan;
1543 hlist_for_each_entry(child, &core->children, child_node)
1544 clk_core_update_orphan_status(child, is_orphan);
1547 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
1549 bool was_orphan = core->orphan;
1551 hlist_del(&core->child_node);
1554 bool becomes_orphan = new_parent->orphan;
1556 /* avoid duplicate POST_RATE_CHANGE notifications */
1557 if (new_parent->new_child == core)
1558 new_parent->new_child = NULL;
1560 hlist_add_head(&core->child_node, &new_parent->children);
1562 if (was_orphan != becomes_orphan)
1563 clk_core_update_orphan_status(core, becomes_orphan);
1565 hlist_add_head(&core->child_node, &clk_orphan_list);
1567 clk_core_update_orphan_status(core, true);
1570 core->parent = new_parent;
1573 static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1574 struct clk_core *parent)
1576 unsigned long flags;
1577 struct clk_core *old_parent = core->parent;
1580 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock
1582 * 2. Migrate prepare state between parents and prevent race with
1585 * If the clock is not prepared, then a race with
1586 * clk_enable/disable() is impossible since we already have the
1587 * prepare lock (future calls to clk_enable() need to be preceded by
1590 * If the clock is prepared, migrate the prepared state to the new
1591 * parent and also protect against a race with clk_enable() by
1592 * forcing the clock and the new parent on. This ensures that all
1593 * future calls to clk_enable() are practically NOPs with respect to
1594 * hardware and software states.
1596 * See also: Comment for clk_set_parent() below.
1599 /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */
1600 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1601 clk_core_prepare_enable(old_parent);
1602 clk_core_prepare_enable(parent);
1605 /* migrate prepare count if > 0 */
1606 if (core->prepare_count) {
1607 clk_core_prepare_enable(parent);
1608 clk_core_enable_lock(core);
1611 /* update the clk tree topology */
1612 flags = clk_enable_lock();
1613 clk_reparent(core, parent);
1614 clk_enable_unlock(flags);
1619 static void __clk_set_parent_after(struct clk_core *core,
1620 struct clk_core *parent,
1621 struct clk_core *old_parent)
1624 * Finish the migration of prepare state and undo the changes done
1625 * for preventing a race with clk_enable().
1627 if (core->prepare_count) {
1628 clk_core_disable_lock(core);
1629 clk_core_disable_unprepare(old_parent);
1632 /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
1633 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1634 clk_core_disable_unprepare(parent);
1635 clk_core_disable_unprepare(old_parent);
1639 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
1642 unsigned long flags;
1644 struct clk_core *old_parent;
1646 old_parent = __clk_set_parent_before(core, parent);
1648 trace_clk_set_parent(core, parent);
1650 /* change clock input source */
1651 if (parent && core->ops->set_parent)
1652 ret = core->ops->set_parent(core->hw, p_index);
1654 trace_clk_set_parent_complete(core, parent);
1657 flags = clk_enable_lock();
1658 clk_reparent(core, old_parent);
1659 clk_enable_unlock(flags);
1660 __clk_set_parent_after(core, old_parent, parent);
1665 __clk_set_parent_after(core, parent, old_parent);
1671 * __clk_speculate_rates
1672 * @core: first clk in the subtree
1673 * @parent_rate: the "future" rate of clk's parent
1675 * Walks the subtree of clks starting with clk, speculating rates as it
1676 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1678 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1679 * pre-rate change notifications and returns early if no clks in the
1680 * subtree have subscribed to the notifications. Note that if a clk does not
1681 * implement the .recalc_rate callback then it is assumed that the clock will
1682 * take on the rate of its parent.
1684 static int __clk_speculate_rates(struct clk_core *core,
1685 unsigned long parent_rate)
1687 struct clk_core *child;
1688 unsigned long new_rate;
1689 int ret = NOTIFY_DONE;
1691 lockdep_assert_held(&prepare_lock);
1693 new_rate = clk_recalc(core, parent_rate);
1695 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1696 if (core->notifier_count)
1697 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1699 if (ret & NOTIFY_STOP_MASK) {
1700 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1701 __func__, core->name, ret);
1705 hlist_for_each_entry(child, &core->children, child_node) {
1706 ret = __clk_speculate_rates(child, new_rate);
1707 if (ret & NOTIFY_STOP_MASK)
1715 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1716 struct clk_core *new_parent, u8 p_index)
1718 struct clk_core *child;
1720 core->new_rate = new_rate;
1721 core->new_parent = new_parent;
1722 core->new_parent_index = p_index;
1723 /* include clk in new parent's PRE_RATE_CHANGE notifications */
1724 core->new_child = NULL;
1725 if (new_parent && new_parent != core->parent)
1726 new_parent->new_child = core;
1728 hlist_for_each_entry(child, &core->children, child_node) {
1729 child->new_rate = clk_recalc(child, new_rate);
1730 clk_calc_subtree(child, child->new_rate, NULL, 0);
1735 * calculate the new rates returning the topmost clock that has to be
1738 static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1741 struct clk_core *top = core;
1742 struct clk_core *old_parent, *parent;
1743 unsigned long best_parent_rate = 0;
1744 unsigned long new_rate;
1745 unsigned long min_rate;
1746 unsigned long max_rate;
1751 if (IS_ERR_OR_NULL(core))
1754 /* save parent rate, if it exists */
1755 parent = old_parent = core->parent;
1757 best_parent_rate = parent->rate;
1759 clk_core_get_boundaries(core, &min_rate, &max_rate);
1761 /* find the closest rate and parent clk/rate */
1762 if (clk_core_can_round(core)) {
1763 struct clk_rate_request req;
1766 req.min_rate = min_rate;
1767 req.max_rate = max_rate;
1769 clk_core_init_rate_req(core, &req);
1771 ret = clk_core_determine_round_nolock(core, &req);
1775 best_parent_rate = req.best_parent_rate;
1776 new_rate = req.rate;
1777 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
1779 if (new_rate < min_rate || new_rate > max_rate)
1781 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
1782 /* pass-through clock without adjustable parent */
1783 core->new_rate = core->rate;
1786 /* pass-through clock with adjustable parent */
1787 top = clk_calc_new_rates(parent, rate);
1788 new_rate = parent->new_rate;
1792 /* some clocks must be gated to change parent */
1793 if (parent != old_parent &&
1794 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1795 pr_debug("%s: %s not gated but wants to reparent\n",
1796 __func__, core->name);
1800 /* try finding the new parent index */
1801 if (parent && core->num_parents > 1) {
1802 p_index = clk_fetch_parent_index(core, parent);
1804 pr_debug("%s: clk %s can not be parent of clk %s\n",
1805 __func__, parent->name, core->name);
1810 if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
1811 best_parent_rate != parent->rate)
1812 top = clk_calc_new_rates(parent, best_parent_rate);
1815 clk_calc_subtree(core, new_rate, parent, p_index);
1821 * Notify about rate changes in a subtree. Always walk down the whole tree
1822 * so that in case of an error we can walk down the whole tree again and
1825 static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
1826 unsigned long event)
1828 struct clk_core *child, *tmp_clk, *fail_clk = NULL;
1829 int ret = NOTIFY_DONE;
1831 if (core->rate == core->new_rate)
1834 if (core->notifier_count) {
1835 ret = __clk_notify(core, event, core->rate, core->new_rate);
1836 if (ret & NOTIFY_STOP_MASK)
1840 hlist_for_each_entry(child, &core->children, child_node) {
1841 /* Skip children who will be reparented to another clock */
1842 if (child->new_parent && child->new_parent != core)
1844 tmp_clk = clk_propagate_rate_change(child, event);
1849 /* handle the new child who might not be in core->children yet */
1850 if (core->new_child) {
1851 tmp_clk = clk_propagate_rate_change(core->new_child, event);
1860 * walk down a subtree and set the new rates notifying the rate
1863 static void clk_change_rate(struct clk_core *core)
1865 struct clk_core *child;
1866 struct hlist_node *tmp;
1867 unsigned long old_rate;
1868 unsigned long best_parent_rate = 0;
1869 bool skip_set_rate = false;
1870 struct clk_core *old_parent;
1871 struct clk_core *parent = NULL;
1873 old_rate = core->rate;
1875 if (core->new_parent) {
1876 parent = core->new_parent;
1877 best_parent_rate = core->new_parent->rate;
1878 } else if (core->parent) {
1879 parent = core->parent;
1880 best_parent_rate = core->parent->rate;
1883 if (clk_pm_runtime_get(core))
1886 if (core->flags & CLK_SET_RATE_UNGATE) {
1887 unsigned long flags;
1889 clk_core_prepare(core);
1890 flags = clk_enable_lock();
1891 clk_core_enable(core);
1892 clk_enable_unlock(flags);
1895 if (core->new_parent && core->new_parent != core->parent) {
1896 old_parent = __clk_set_parent_before(core, core->new_parent);
1897 trace_clk_set_parent(core, core->new_parent);
1899 if (core->ops->set_rate_and_parent) {
1900 skip_set_rate = true;
1901 core->ops->set_rate_and_parent(core->hw, core->new_rate,
1903 core->new_parent_index);
1904 } else if (core->ops->set_parent) {
1905 core->ops->set_parent(core->hw, core->new_parent_index);
1908 trace_clk_set_parent_complete(core, core->new_parent);
1909 __clk_set_parent_after(core, core->new_parent, old_parent);
1912 if (core->flags & CLK_OPS_PARENT_ENABLE)
1913 clk_core_prepare_enable(parent);
1915 trace_clk_set_rate(core, core->new_rate);
1917 if (!skip_set_rate && core->ops->set_rate)
1918 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
1920 trace_clk_set_rate_complete(core, core->new_rate);
1922 core->rate = clk_recalc(core, best_parent_rate);
1924 if (core->flags & CLK_SET_RATE_UNGATE) {
1925 unsigned long flags;
1927 flags = clk_enable_lock();
1928 clk_core_disable(core);
1929 clk_enable_unlock(flags);
1930 clk_core_unprepare(core);
1933 if (core->flags & CLK_OPS_PARENT_ENABLE)
1934 clk_core_disable_unprepare(parent);
1936 if (core->notifier_count && old_rate != core->rate)
1937 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
1939 if (core->flags & CLK_RECALC_NEW_RATES)
1940 (void)clk_calc_new_rates(core, core->new_rate);
1943 * Use safe iteration, as change_rate can actually swap parents
1944 * for certain clock types.
1946 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
1947 /* Skip children who will be reparented to another clock */
1948 if (child->new_parent && child->new_parent != core)
1950 clk_change_rate(child);
1953 /* handle the new child who might not be in core->children yet */
1954 if (core->new_child)
1955 clk_change_rate(core->new_child);
1957 clk_pm_runtime_put(core);
1960 static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
1961 unsigned long req_rate)
1964 struct clk_rate_request req;
1966 lockdep_assert_held(&prepare_lock);
1971 /* simulate what the rate would be if it could be freely set */
1972 cnt = clk_core_rate_nuke_protect(core);
1976 clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
1977 req.rate = req_rate;
1979 ret = clk_core_round_rate_nolock(core, &req);
1981 /* restore the protection */
1982 clk_core_rate_restore_protect(core, cnt);
1984 return ret ? 0 : req.rate;
1987 static int clk_core_set_rate_nolock(struct clk_core *core,
1988 unsigned long req_rate)
1990 struct clk_core *top, *fail_clk;
1997 rate = clk_core_req_round_rate_nolock(core, req_rate);
1999 /* bail early if nothing to do */
2000 if (rate == clk_core_get_rate_nolock(core))
2003 /* fail on a direct rate set of a protected provider */
2004 if (clk_core_rate_is_protected(core))
2007 /* calculate new rates and get the topmost changed clock */
2008 top = clk_calc_new_rates(core, req_rate);
2012 ret = clk_pm_runtime_get(core);
2016 /* notify that we are about to change rates */
2017 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
2019 pr_debug("%s: failed to set %s rate\n", __func__,
2021 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
2026 /* change the rates */
2027 clk_change_rate(top);
2029 core->req_rate = req_rate;
2031 clk_pm_runtime_put(core);
2037 * clk_set_rate - specify a new rate for clk
2038 * @clk: the clk whose rate is being changed
2039 * @rate: the new rate for clk
2041 * In the simplest case clk_set_rate will only adjust the rate of clk.
2043 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
2044 * propagate up to clk's parent; whether or not this happens depends on the
2045 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
2046 * after calling .round_rate then upstream parent propagation is ignored. If
2047 * *parent_rate comes back with a new rate for clk's parent then we propagate
2048 * up to clk's parent and set its rate. Upward propagation will continue
2049 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
2050 * .round_rate stops requesting changes to clk's parent_rate.
2052 * Rate changes are accomplished via tree traversal that also recalculates the
2053 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
2055 * Returns 0 on success, -EERROR otherwise.
2057 int clk_set_rate(struct clk *clk, unsigned long rate)
2064 /* prevent racing with updates to the clock topology */
2067 if (clk->exclusive_count)
2068 clk_core_rate_unprotect(clk->core);
2070 ret = clk_core_set_rate_nolock(clk->core, rate);
2072 if (clk->exclusive_count)
2073 clk_core_rate_protect(clk->core);
2075 clk_prepare_unlock();
2079 EXPORT_SYMBOL_GPL(clk_set_rate);
2082 * clk_set_rate_exclusive - specify a new rate get exclusive control
2083 * @clk: the clk whose rate is being changed
2084 * @rate: the new rate for clk
2086 * This is a combination of clk_set_rate() and clk_rate_exclusive_get()
2087 * within a critical section
2089 * This can be used initially to ensure that at least 1 consumer is
2090 * statisfied when several consumers are competing for exclusivity over the
2091 * same clock provider.
2093 * The exclusivity is not applied if setting the rate failed.
2095 * Calls to clk_rate_exclusive_get() should be balanced with calls to
2096 * clk_rate_exclusive_put().
2098 * Returns 0 on success, -EERROR otherwise.
2100 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
2107 /* prevent racing with updates to the clock topology */
2111 * The temporary protection removal is not here, on purpose
2112 * This function is meant to be used instead of clk_rate_protect,
2113 * so before the consumer code path protect the clock provider
2116 ret = clk_core_set_rate_nolock(clk->core, rate);
2118 clk_core_rate_protect(clk->core);
2119 clk->exclusive_count++;
2122 clk_prepare_unlock();
2126 EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
2129 * clk_set_rate_range - set a rate range for a clock source
2130 * @clk: clock source
2131 * @min: desired minimum clock rate in Hz, inclusive
2132 * @max: desired maximum clock rate in Hz, inclusive
2134 * Returns success (0) or negative errno.
2136 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
2139 unsigned long old_min, old_max, rate;
2145 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
2146 __func__, clk->core->name, clk->dev_id, clk->con_id,
2153 if (clk->exclusive_count)
2154 clk_core_rate_unprotect(clk->core);
2156 /* Save the current values in case we need to rollback the change */
2157 old_min = clk->min_rate;
2158 old_max = clk->max_rate;
2159 clk->min_rate = min;
2160 clk->max_rate = max;
2162 rate = clk_core_get_rate_nolock(clk->core);
2163 if (rate < min || rate > max) {
2166 * We are in bit of trouble here, current rate is outside the
2167 * the requested range. We are going try to request appropriate
2168 * range boundary but there is a catch. It may fail for the
2169 * usual reason (clock broken, clock protected, etc) but also
2171 * - round_rate() was not favorable and fell on the wrong
2172 * side of the boundary
2173 * - the determine_rate() callback does not really check for
2174 * this corner case when determining the rate
2182 ret = clk_core_set_rate_nolock(clk->core, rate);
2184 /* rollback the changes */
2185 clk->min_rate = old_min;
2186 clk->max_rate = old_max;
2190 if (clk->exclusive_count)
2191 clk_core_rate_protect(clk->core);
2193 clk_prepare_unlock();
2197 EXPORT_SYMBOL_GPL(clk_set_rate_range);
2200 * clk_set_min_rate - set a minimum clock rate for a clock source
2201 * @clk: clock source
2202 * @rate: desired minimum clock rate in Hz, inclusive
2204 * Returns success (0) or negative errno.
2206 int clk_set_min_rate(struct clk *clk, unsigned long rate)
2211 return clk_set_rate_range(clk, rate, clk->max_rate);
2213 EXPORT_SYMBOL_GPL(clk_set_min_rate);
2216 * clk_set_max_rate - set a maximum clock rate for a clock source
2217 * @clk: clock source
2218 * @rate: desired maximum clock rate in Hz, inclusive
2220 * Returns success (0) or negative errno.
2222 int clk_set_max_rate(struct clk *clk, unsigned long rate)
2227 return clk_set_rate_range(clk, clk->min_rate, rate);
2229 EXPORT_SYMBOL_GPL(clk_set_max_rate);
2232 * clk_get_parent - return the parent of a clk
2233 * @clk: the clk whose parent gets returned
2235 * Simply returns clk->parent. Returns NULL if clk is NULL.
2237 struct clk *clk_get_parent(struct clk *clk)
2245 /* TODO: Create a per-user clk and change callers to call clk_put */
2246 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
2247 clk_prepare_unlock();
2251 EXPORT_SYMBOL_GPL(clk_get_parent);
2253 static struct clk_core *__clk_init_parent(struct clk_core *core)
2257 if (core->num_parents > 1 && core->ops->get_parent)
2258 index = core->ops->get_parent(core->hw);
2260 return clk_core_get_parent_by_index(core, index);
2263 static void clk_core_reparent(struct clk_core *core,
2264 struct clk_core *new_parent)
2266 clk_reparent(core, new_parent);
2267 __clk_recalc_accuracies(core);
2268 __clk_recalc_rates(core, POST_RATE_CHANGE);
2271 void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
2276 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
2280 * clk_has_parent - check if a clock is a possible parent for another
2281 * @clk: clock source
2282 * @parent: parent clock source
2284 * This function can be used in drivers that need to check that a clock can be
2285 * the parent of another without actually changing the parent.
2287 * Returns true if @parent is a possible parent for @clk, false otherwise.
2289 bool clk_has_parent(struct clk *clk, struct clk *parent)
2291 struct clk_core *core, *parent_core;
2293 /* NULL clocks should be nops, so return success if either is NULL. */
2294 if (!clk || !parent)
2298 parent_core = parent->core;
2300 /* Optimize for the case where the parent is already the parent. */
2301 if (core->parent == parent_core)
2304 return match_string(core->parent_names, core->num_parents,
2305 parent_core->name) >= 0;
2307 EXPORT_SYMBOL_GPL(clk_has_parent);
2309 static int clk_core_set_parent_nolock(struct clk_core *core,
2310 struct clk_core *parent)
2314 unsigned long p_rate = 0;
2316 lockdep_assert_held(&prepare_lock);
2321 if (core->parent == parent)
2324 /* verify ops for for multi-parent clks */
2325 if (core->num_parents > 1 && !core->ops->set_parent)
2328 /* check that we are allowed to re-parent if the clock is in use */
2329 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
2332 if (clk_core_rate_is_protected(core))
2335 /* try finding the new parent index */
2337 p_index = clk_fetch_parent_index(core, parent);
2339 pr_debug("%s: clk %s can not be parent of clk %s\n",
2340 __func__, parent->name, core->name);
2343 p_rate = parent->rate;
2346 ret = clk_pm_runtime_get(core);
2350 /* propagate PRE_RATE_CHANGE notifications */
2351 ret = __clk_speculate_rates(core, p_rate);
2353 /* abort if a driver objects */
2354 if (ret & NOTIFY_STOP_MASK)
2357 /* do the re-parent */
2358 ret = __clk_set_parent(core, parent, p_index);
2360 /* propagate rate an accuracy recalculation accordingly */
2362 __clk_recalc_rates(core, ABORT_RATE_CHANGE);
2364 __clk_recalc_rates(core, POST_RATE_CHANGE);
2365 __clk_recalc_accuracies(core);
2369 clk_pm_runtime_put(core);
2375 * clk_set_parent - switch the parent of a mux clk
2376 * @clk: the mux clk whose input we are switching
2377 * @parent: the new input to clk
2379 * Re-parent clk to use parent as its new input source. If clk is in
2380 * prepared state, the clk will get enabled for the duration of this call. If
2381 * that's not acceptable for a specific clk (Eg: the consumer can't handle
2382 * that, the reparenting is glitchy in hardware, etc), use the
2383 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
2385 * After successfully changing clk's parent clk_set_parent will update the
2386 * clk topology, sysfs topology and propagate rate recalculation via
2387 * __clk_recalc_rates.
2389 * Returns 0 on success, -EERROR otherwise.
2391 int clk_set_parent(struct clk *clk, struct clk *parent)
2400 if (clk->exclusive_count)
2401 clk_core_rate_unprotect(clk->core);
2403 ret = clk_core_set_parent_nolock(clk->core,
2404 parent ? parent->core : NULL);
2406 if (clk->exclusive_count)
2407 clk_core_rate_protect(clk->core);
2409 clk_prepare_unlock();
2413 EXPORT_SYMBOL_GPL(clk_set_parent);
2415 static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
2419 lockdep_assert_held(&prepare_lock);
2424 if (clk_core_rate_is_protected(core))
2427 trace_clk_set_phase(core, degrees);
2429 if (core->ops->set_phase) {
2430 ret = core->ops->set_phase(core->hw, degrees);
2432 core->phase = degrees;
2435 trace_clk_set_phase_complete(core, degrees);
2441 * clk_set_phase - adjust the phase shift of a clock signal
2442 * @clk: clock signal source
2443 * @degrees: number of degrees the signal is shifted
2445 * Shifts the phase of a clock signal by the specified
2446 * degrees. Returns 0 on success, -EERROR otherwise.
2448 * This function makes no distinction about the input or reference
2449 * signal that we adjust the clock signal phase against. For example
2450 * phase locked-loop clock signal generators we may shift phase with
2451 * respect to feedback clock signal input, but for other cases the
2452 * clock phase may be shifted with respect to some other, unspecified
2455 * Additionally the concept of phase shift does not propagate through
2456 * the clock tree hierarchy, which sets it apart from clock rates and
2457 * clock accuracy. A parent clock phase attribute does not have an
2458 * impact on the phase attribute of a child clock.
2460 int clk_set_phase(struct clk *clk, int degrees)
2467 /* sanity check degrees */
2474 if (clk->exclusive_count)
2475 clk_core_rate_unprotect(clk->core);
2477 ret = clk_core_set_phase_nolock(clk->core, degrees);
2479 if (clk->exclusive_count)
2480 clk_core_rate_protect(clk->core);
2482 clk_prepare_unlock();
2486 EXPORT_SYMBOL_GPL(clk_set_phase);
2488 static int clk_core_get_phase(struct clk_core *core)
2493 /* Always try to update cached phase if possible */
2494 if (core->ops->get_phase)
2495 core->phase = core->ops->get_phase(core->hw);
2497 clk_prepare_unlock();
2503 * clk_get_phase - return the phase shift of a clock signal
2504 * @clk: clock signal source
2506 * Returns the phase shift of a clock node in degrees, otherwise returns
2509 int clk_get_phase(struct clk *clk)
2514 return clk_core_get_phase(clk->core);
2516 EXPORT_SYMBOL_GPL(clk_get_phase);
2518 static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
2520 /* Assume a default value of 50% */
2525 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
2527 static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
2529 struct clk_duty *duty = &core->duty;
2532 if (!core->ops->get_duty_cycle)
2533 return clk_core_update_duty_cycle_parent_nolock(core);
2535 ret = core->ops->get_duty_cycle(core->hw, duty);
2539 /* Don't trust the clock provider too much */
2540 if (duty->den == 0 || duty->num > duty->den) {
2548 clk_core_reset_duty_cycle_nolock(core);
2552 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
2557 core->flags & CLK_DUTY_CYCLE_PARENT) {
2558 ret = clk_core_update_duty_cycle_nolock(core->parent);
2559 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2561 clk_core_reset_duty_cycle_nolock(core);
2567 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2568 struct clk_duty *duty);
2570 static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
2571 struct clk_duty *duty)
2575 lockdep_assert_held(&prepare_lock);
2577 if (clk_core_rate_is_protected(core))
2580 trace_clk_set_duty_cycle(core, duty);
2582 if (!core->ops->set_duty_cycle)
2583 return clk_core_set_duty_cycle_parent_nolock(core, duty);
2585 ret = core->ops->set_duty_cycle(core->hw, duty);
2587 memcpy(&core->duty, duty, sizeof(*duty));
2589 trace_clk_set_duty_cycle_complete(core, duty);
2594 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2595 struct clk_duty *duty)
2600 core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
2601 ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
2602 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2609 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
2610 * @clk: clock signal source
2611 * @num: numerator of the duty cycle ratio to be applied
2612 * @den: denominator of the duty cycle ratio to be applied
2614 * Apply the duty cycle ratio if the ratio is valid and the clock can
2615 * perform this operation
2617 * Returns (0) on success, a negative errno otherwise.
2619 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
2622 struct clk_duty duty;
2627 /* sanity check the ratio */
2628 if (den == 0 || num > den)
2636 if (clk->exclusive_count)
2637 clk_core_rate_unprotect(clk->core);
2639 ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
2641 if (clk->exclusive_count)
2642 clk_core_rate_protect(clk->core);
2644 clk_prepare_unlock();
2648 EXPORT_SYMBOL_GPL(clk_set_duty_cycle);
2650 static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
2653 struct clk_duty *duty = &core->duty;
2658 ret = clk_core_update_duty_cycle_nolock(core);
2660 ret = mult_frac(scale, duty->num, duty->den);
2662 clk_prepare_unlock();
2668 * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
2669 * @clk: clock signal source
2670 * @scale: scaling factor to be applied to represent the ratio as an integer
2672 * Returns the duty cycle ratio of a clock node multiplied by the provided
2673 * scaling factor, or negative errno on error.
2675 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
2680 return clk_core_get_scaled_duty_cycle(clk->core, scale);
2682 EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle);
2685 * clk_is_match - check if two clk's point to the same hardware clock
2686 * @p: clk compared against q
2687 * @q: clk compared against p
2689 * Returns true if the two struct clk pointers both point to the same hardware
2690 * clock node. Put differently, returns true if struct clk *p and struct clk *q
2691 * share the same struct clk_core object.
2693 * Returns false otherwise. Note that two NULL clks are treated as matching.
2695 bool clk_is_match(const struct clk *p, const struct clk *q)
2697 /* trivial case: identical struct clk's or both NULL */
2701 /* true if clk->core pointers match. Avoid dereferencing garbage */
2702 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
2703 if (p->core == q->core)
2708 EXPORT_SYMBOL_GPL(clk_is_match);
2710 /*** debugfs support ***/
2712 #ifdef CONFIG_DEBUG_FS
2713 #include <linux/debugfs.h>
2715 static struct dentry *rootdir;
2716 static int inited = 0;
2717 static DEFINE_MUTEX(clk_debug_lock);
2718 static HLIST_HEAD(clk_debug_list);
2720 static struct hlist_head *all_lists[] = {
2726 static struct hlist_head *orphan_list[] = {
2731 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
2737 seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n",
2739 30 - level * 3, c->name,
2740 c->enable_count, c->prepare_count, c->protect_count,
2741 clk_core_get_rate(c), clk_core_get_accuracy(c),
2742 clk_core_get_phase(c),
2743 clk_core_get_scaled_duty_cycle(c, 100000));
2746 static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
2749 struct clk_core *child;
2754 clk_summary_show_one(s, c, level);
2756 hlist_for_each_entry(child, &c->children, child_node)
2757 clk_summary_show_subtree(s, child, level + 1);
2760 static int clk_summary_show(struct seq_file *s, void *data)
2763 struct hlist_head **lists = (struct hlist_head **)s->private;
2765 seq_puts(s, " enable prepare protect duty\n");
2766 seq_puts(s, " clock count count count rate accuracy phase cycle\n");
2767 seq_puts(s, "---------------------------------------------------------------------------------------------\n");
2771 for (; *lists; lists++)
2772 hlist_for_each_entry(c, *lists, child_node)
2773 clk_summary_show_subtree(s, c, 0);
2775 clk_prepare_unlock();
2779 DEFINE_SHOW_ATTRIBUTE(clk_summary);
2781 static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
2786 /* This should be JSON format, i.e. elements separated with a comma */
2787 seq_printf(s, "\"%s\": { ", c->name);
2788 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
2789 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
2790 seq_printf(s, "\"protect_count\": %d,", c->protect_count);
2791 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
2792 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
2793 seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c));
2794 seq_printf(s, "\"duty_cycle\": %u",
2795 clk_core_get_scaled_duty_cycle(c, 100000));
2798 static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
2800 struct clk_core *child;
2805 clk_dump_one(s, c, level);
2807 hlist_for_each_entry(child, &c->children, child_node) {
2809 clk_dump_subtree(s, child, level + 1);
2815 static int clk_dump_show(struct seq_file *s, void *data)
2818 bool first_node = true;
2819 struct hlist_head **lists = (struct hlist_head **)s->private;
2824 for (; *lists; lists++) {
2825 hlist_for_each_entry(c, *lists, child_node) {
2829 clk_dump_subtree(s, c, 0);
2833 clk_prepare_unlock();
2838 DEFINE_SHOW_ATTRIBUTE(clk_dump);
2840 static const struct {
2844 #define ENTRY(f) { f, #f }
2845 ENTRY(CLK_SET_RATE_GATE),
2846 ENTRY(CLK_SET_PARENT_GATE),
2847 ENTRY(CLK_SET_RATE_PARENT),
2848 ENTRY(CLK_IGNORE_UNUSED),
2849 ENTRY(CLK_IS_BASIC),
2850 ENTRY(CLK_GET_RATE_NOCACHE),
2851 ENTRY(CLK_SET_RATE_NO_REPARENT),
2852 ENTRY(CLK_GET_ACCURACY_NOCACHE),
2853 ENTRY(CLK_RECALC_NEW_RATES),
2854 ENTRY(CLK_SET_RATE_UNGATE),
2855 ENTRY(CLK_IS_CRITICAL),
2856 ENTRY(CLK_OPS_PARENT_ENABLE),
2857 ENTRY(CLK_DUTY_CYCLE_PARENT),
2861 static int clk_flags_show(struct seq_file *s, void *data)
2863 struct clk_core *core = s->private;
2864 unsigned long flags = core->flags;
2867 for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
2868 if (flags & clk_flags[i].flag) {
2869 seq_printf(s, "%s\n", clk_flags[i].name);
2870 flags &= ~clk_flags[i].flag;
2875 seq_printf(s, "0x%lx\n", flags);
2880 DEFINE_SHOW_ATTRIBUTE(clk_flags);
2882 static int possible_parents_show(struct seq_file *s, void *data)
2884 struct clk_core *core = s->private;
2887 for (i = 0; i < core->num_parents - 1; i++)
2888 seq_printf(s, "%s ", core->parent_names[i]);
2890 seq_printf(s, "%s\n", core->parent_names[i]);
2894 DEFINE_SHOW_ATTRIBUTE(possible_parents);
2896 static int clk_duty_cycle_show(struct seq_file *s, void *data)
2898 struct clk_core *core = s->private;
2899 struct clk_duty *duty = &core->duty;
2901 seq_printf(s, "%u/%u\n", duty->num, duty->den);
2905 DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
2907 static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
2909 struct dentry *root;
2911 if (!core || !pdentry)
2914 root = debugfs_create_dir(core->name, pdentry);
2915 core->dentry = root;
2917 debugfs_create_ulong("clk_rate", 0444, root, &core->rate);
2918 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
2919 debugfs_create_u32("clk_phase", 0444, root, &core->phase);
2920 debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
2921 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
2922 debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
2923 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
2924 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
2925 debugfs_create_file("clk_duty_cycle", 0444, root, core,
2926 &clk_duty_cycle_fops);
2928 if (core->num_parents > 1)
2929 debugfs_create_file("clk_possible_parents", 0444, root, core,
2930 &possible_parents_fops);
2932 if (core->ops->debug_init)
2933 core->ops->debug_init(core->hw, core->dentry);
2937 * clk_debug_register - add a clk node to the debugfs clk directory
2938 * @core: the clk being added to the debugfs clk directory
2940 * Dynamically adds a clk to the debugfs clk directory if debugfs has been
2941 * initialized. Otherwise it bails out early since the debugfs clk directory
2942 * will be created lazily by clk_debug_init as part of a late_initcall.
2944 static void clk_debug_register(struct clk_core *core)
2946 mutex_lock(&clk_debug_lock);
2947 hlist_add_head(&core->debug_node, &clk_debug_list);
2949 clk_debug_create_one(core, rootdir);
2950 mutex_unlock(&clk_debug_lock);
2954 * clk_debug_unregister - remove a clk node from the debugfs clk directory
2955 * @core: the clk being removed from the debugfs clk directory
2957 * Dynamically removes a clk and all its child nodes from the
2958 * debugfs clk directory if clk->dentry points to debugfs created by
2959 * clk_debug_register in __clk_core_init.
2961 static void clk_debug_unregister(struct clk_core *core)
2963 mutex_lock(&clk_debug_lock);
2964 hlist_del_init(&core->debug_node);
2965 debugfs_remove_recursive(core->dentry);
2966 core->dentry = NULL;
2967 mutex_unlock(&clk_debug_lock);
2971 * clk_debug_init - lazily populate the debugfs clk directory
2973 * clks are often initialized very early during boot before memory can be
2974 * dynamically allocated and well before debugfs is setup. This function
2975 * populates the debugfs clk directory once at boot-time when we know that
2976 * debugfs is setup. It should only be called once at boot-time, all other clks
2977 * added dynamically will be done so with clk_debug_register.
2979 static int __init clk_debug_init(void)
2981 struct clk_core *core;
2983 rootdir = debugfs_create_dir("clk", NULL);
2985 debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
2987 debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
2989 debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list,
2991 debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list,
2994 mutex_lock(&clk_debug_lock);
2995 hlist_for_each_entry(core, &clk_debug_list, debug_node)
2996 clk_debug_create_one(core, rootdir);
2999 mutex_unlock(&clk_debug_lock);
3003 late_initcall(clk_debug_init);
3005 static inline void clk_debug_register(struct clk_core *core) { }
3006 static inline void clk_debug_reparent(struct clk_core *core,
3007 struct clk_core *new_parent)
3010 static inline void clk_debug_unregister(struct clk_core *core)
3016 * __clk_core_init - initialize the data structures in a struct clk_core
3017 * @core: clk_core being initialized
3019 * Initializes the lists in struct clk_core, queries the hardware for the
3020 * parent and rate and sets them both.
3022 static int __clk_core_init(struct clk_core *core)
3025 struct clk_core *orphan;
3026 struct hlist_node *tmp2;
3034 ret = clk_pm_runtime_get(core);
3038 /* check to see if a clock with this name is already registered */
3039 if (clk_core_lookup(core->name)) {
3040 pr_debug("%s: clk %s already initialized\n",
3041 __func__, core->name);
3046 /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */
3047 if (core->ops->set_rate &&
3048 !((core->ops->round_rate || core->ops->determine_rate) &&
3049 core->ops->recalc_rate)) {
3050 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
3051 __func__, core->name);
3056 if (core->ops->set_parent && !core->ops->get_parent) {
3057 pr_err("%s: %s must implement .get_parent & .set_parent\n",
3058 __func__, core->name);
3063 if (core->num_parents > 1 && !core->ops->get_parent) {
3064 pr_err("%s: %s must implement .get_parent as it has multi parents\n",
3065 __func__, core->name);
3070 if (core->ops->set_rate_and_parent &&
3071 !(core->ops->set_parent && core->ops->set_rate)) {
3072 pr_err("%s: %s must implement .set_parent & .set_rate\n",
3073 __func__, core->name);
3078 /* throw a WARN if any entries in parent_names are NULL */
3079 for (i = 0; i < core->num_parents; i++)
3080 WARN(!core->parent_names[i],
3081 "%s: invalid NULL in %s's .parent_names\n",
3082 __func__, core->name);
3084 core->parent = __clk_init_parent(core);
3087 * Populate core->parent if parent has already been clk_core_init'd. If
3088 * parent has not yet been clk_core_init'd then place clk in the orphan
3089 * list. If clk doesn't have any parents then place it in the root
3092 * Every time a new clk is clk_init'd then we walk the list of orphan
3093 * clocks and re-parent any that are children of the clock currently
3097 hlist_add_head(&core->child_node,
3098 &core->parent->children);
3099 core->orphan = core->parent->orphan;
3100 } else if (!core->num_parents) {
3101 hlist_add_head(&core->child_node, &clk_root_list);
3102 core->orphan = false;
3104 hlist_add_head(&core->child_node, &clk_orphan_list);
3105 core->orphan = true;
3109 * optional platform-specific magic
3111 * The .init callback is not used by any of the basic clock types, but
3112 * exists for weird hardware that must perform initialization magic.
3113 * Please consider other ways of solving initialization problems before
3114 * using this callback, as its use is discouraged.
3116 if (core->ops->init)
3117 core->ops->init(core->hw);
3120 * Set clk's accuracy. The preferred method is to use
3121 * .recalc_accuracy. For simple clocks and lazy developers the default
3122 * fallback is to use the parent's accuracy. If a clock doesn't have a
3123 * parent (or is orphaned) then accuracy is set to zero (perfect
3126 if (core->ops->recalc_accuracy)
3127 core->accuracy = core->ops->recalc_accuracy(core->hw,
3128 __clk_get_accuracy(core->parent));
3129 else if (core->parent)
3130 core->accuracy = core->parent->accuracy;
3136 * Since a phase is by definition relative to its parent, just
3137 * query the current clock phase, or just assume it's in phase.
3139 if (core->ops->get_phase)
3140 core->phase = core->ops->get_phase(core->hw);
3145 * Set clk's duty cycle.
3147 clk_core_update_duty_cycle_nolock(core);
3150 * Set clk's rate. The preferred method is to use .recalc_rate. For
3151 * simple clocks and lazy developers the default fallback is to use the
3152 * parent's rate. If a clock doesn't have a parent (or is orphaned)
3153 * then rate is set to zero.
3155 if (core->ops->recalc_rate)
3156 rate = core->ops->recalc_rate(core->hw,
3157 clk_core_get_rate_nolock(core->parent));
3158 else if (core->parent)
3159 rate = core->parent->rate;
3162 core->rate = core->req_rate = rate;
3165 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
3166 * don't get accidentally disabled when walking the orphan tree and
3167 * reparenting clocks
3169 if (core->flags & CLK_IS_CRITICAL) {
3170 unsigned long flags;
3172 clk_core_prepare(core);
3174 flags = clk_enable_lock();
3175 clk_core_enable(core);
3176 clk_enable_unlock(flags);
3180 * walk the list of orphan clocks and reparent any that newly finds a
3183 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
3184 struct clk_core *parent = __clk_init_parent(orphan);
3187 * We need to use __clk_set_parent_before() and _after() to
3188 * to properly migrate any prepare/enable count of the orphan
3189 * clock. This is important for CLK_IS_CRITICAL clocks, which
3190 * are enabled during init but might not have a parent yet.
3193 /* update the clk tree topology */
3194 __clk_set_parent_before(orphan, parent);
3195 __clk_set_parent_after(orphan, parent, NULL);
3196 __clk_recalc_accuracies(orphan);
3197 __clk_recalc_rates(orphan, 0);
3201 kref_init(&core->ref);
3203 clk_pm_runtime_put(core);
3205 clk_prepare_unlock();
3208 clk_debug_register(core);
3214 * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core
3215 * @core: clk to add consumer to
3216 * @clk: consumer to link to a clk
3218 static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
3221 hlist_add_head(&clk->clks_node, &core->clks);
3222 clk_prepare_unlock();
3226 * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core
3227 * @clk: consumer to unlink
3229 static void clk_core_unlink_consumer(struct clk *clk)
3231 lockdep_assert_held(&prepare_lock);
3232 hlist_del(&clk->clks_node);
3236 * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core
3237 * @core: clk to allocate a consumer for
3238 * @dev_id: string describing device name
3239 * @con_id: connection ID string on device
3241 * Returns: clk consumer left unlinked from the consumer list
3243 static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
3248 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
3250 return ERR_PTR(-ENOMEM);
3253 clk->dev_id = dev_id;
3254 clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
3255 clk->max_rate = ULONG_MAX;
3261 * free_clk - Free a clk consumer
3262 * @clk: clk consumer to free
3264 * Note, this assumes the clk has been unlinked from the clk_core consumer
3267 static void free_clk(struct clk *clk)
3269 kfree_const(clk->con_id);
3274 * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given
3276 * @hw: clk_hw associated with the clk being consumed
3277 * @dev_id: string describing device name
3278 * @con_id: connection ID string on device
3280 * This is the main function used to create a clk pointer for use by clk
3281 * consumers. It connects a consumer to the clk_core and clk_hw structures
3282 * used by the framework and clk provider respectively.
3284 struct clk *clk_hw_create_clk(struct clk_hw *hw,
3285 const char *dev_id, const char *con_id)
3288 struct clk_core *core;
3290 /* This is to allow this function to be chained to others */
3291 if (IS_ERR_OR_NULL(hw))
3292 return ERR_CAST(hw);
3295 clk = alloc_clk(core, dev_id, con_id);
3299 if (!try_module_get(core->owner)) {
3301 return ERR_PTR(-ENOENT);
3304 kref_get(&core->ref);
3305 clk_core_link_consumer(core, clk);
3311 * clk_register - allocate a new clock, register it and return an opaque cookie
3312 * @dev: device that is registering this clock
3313 * @hw: link to hardware-specific clock data
3315 * clk_register is the primary interface for populating the clock tree with new
3316 * clock nodes. It returns a pointer to the newly allocated struct clk which
3317 * cannot be dereferenced by driver code but may be used in conjunction with the
3318 * rest of the clock API. In the event of an error clk_register will return an
3319 * error code; drivers must test for an error code after calling clk_register.
3321 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
3324 struct clk_core *core;
3326 core = kzalloc(sizeof(*core), GFP_KERNEL);
3332 core->name = kstrdup_const(hw->init->name, GFP_KERNEL);
3338 if (WARN_ON(!hw->init->ops)) {
3342 core->ops = hw->init->ops;
3344 if (dev && pm_runtime_enabled(dev))
3345 core->rpm_enabled = true;
3347 if (dev && dev->driver)
3348 core->owner = dev->driver->owner;
3350 core->flags = hw->init->flags;
3351 core->num_parents = hw->init->num_parents;
3353 core->max_rate = ULONG_MAX;
3356 /* allocate local copy in case parent_names is __initdata */
3357 core->parent_names = kcalloc(core->num_parents, sizeof(char *),
3360 if (!core->parent_names) {
3362 goto fail_parent_names;
3366 /* copy each string name in case parent_names is __initdata */
3367 for (i = 0; i < core->num_parents; i++) {
3368 core->parent_names[i] = kstrdup_const(hw->init->parent_names[i],
3370 if (!core->parent_names[i]) {
3372 goto fail_parent_names_copy;
3376 /* avoid unnecessary string look-ups of clk_core's possible parents. */
3377 core->parents = kcalloc(core->num_parents, sizeof(*core->parents),
3379 if (!core->parents) {
3384 INIT_HLIST_HEAD(&core->clks);
3387 * Don't call clk_hw_create_clk() here because that would pin the
3388 * provider module to itself and prevent it from ever being removed.
3390 hw->clk = alloc_clk(core, NULL, NULL);
3391 if (IS_ERR(hw->clk)) {
3392 ret = PTR_ERR(hw->clk);
3396 clk_core_link_consumer(hw->core, hw->clk);
3398 ret = __clk_core_init(core);
3403 clk_core_unlink_consumer(hw->clk);
3404 clk_prepare_unlock();
3410 kfree(core->parents);
3411 fail_parent_names_copy:
3413 kfree_const(core->parent_names[i]);
3414 kfree(core->parent_names);
3417 kfree_const(core->name);
3421 return ERR_PTR(ret);
3423 EXPORT_SYMBOL_GPL(clk_register);
3426 * clk_hw_register - register a clk_hw and return an error code
3427 * @dev: device that is registering this clock
3428 * @hw: link to hardware-specific clock data
3430 * clk_hw_register is the primary interface for populating the clock tree with
3431 * new clock nodes. It returns an integer equal to zero indicating success or
3432 * less than zero indicating failure. Drivers must test for an error code after
3433 * calling clk_hw_register().
3435 int clk_hw_register(struct device *dev, struct clk_hw *hw)
3437 return PTR_ERR_OR_ZERO(clk_register(dev, hw));
3439 EXPORT_SYMBOL_GPL(clk_hw_register);
3441 /* Free memory allocated for a clock. */
3442 static void __clk_release(struct kref *ref)
3444 struct clk_core *core = container_of(ref, struct clk_core, ref);
3445 int i = core->num_parents;
3447 lockdep_assert_held(&prepare_lock);
3449 kfree(core->parents);
3451 kfree_const(core->parent_names[i]);
3453 kfree(core->parent_names);
3454 kfree_const(core->name);
3459 * Empty clk_ops for unregistered clocks. These are used temporarily
3460 * after clk_unregister() was called on a clock and until last clock
3461 * consumer calls clk_put() and the struct clk object is freed.
3463 static int clk_nodrv_prepare_enable(struct clk_hw *hw)
3468 static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
3473 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
3474 unsigned long parent_rate)
3479 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
3484 static const struct clk_ops clk_nodrv_ops = {
3485 .enable = clk_nodrv_prepare_enable,
3486 .disable = clk_nodrv_disable_unprepare,
3487 .prepare = clk_nodrv_prepare_enable,
3488 .unprepare = clk_nodrv_disable_unprepare,
3489 .set_rate = clk_nodrv_set_rate,
3490 .set_parent = clk_nodrv_set_parent,
3494 * clk_unregister - unregister a currently registered clock
3495 * @clk: clock to unregister
3497 void clk_unregister(struct clk *clk)
3499 unsigned long flags;
3501 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
3504 clk_debug_unregister(clk->core);
3508 if (clk->core->ops == &clk_nodrv_ops) {
3509 pr_err("%s: unregistered clock: %s\n", __func__,
3514 * Assign empty clock ops for consumers that might still hold
3515 * a reference to this clock.
3517 flags = clk_enable_lock();
3518 clk->core->ops = &clk_nodrv_ops;
3519 clk_enable_unlock(flags);
3521 if (!hlist_empty(&clk->core->children)) {
3522 struct clk_core *child;
3523 struct hlist_node *t;
3525 /* Reparent all children to the orphan list. */
3526 hlist_for_each_entry_safe(child, t, &clk->core->children,
3528 clk_core_set_parent_nolock(child, NULL);
3531 hlist_del_init(&clk->core->child_node);
3533 if (clk->core->prepare_count)
3534 pr_warn("%s: unregistering prepared clock: %s\n",
3535 __func__, clk->core->name);
3537 if (clk->core->protect_count)
3538 pr_warn("%s: unregistering protected clock: %s\n",
3539 __func__, clk->core->name);
3541 kref_put(&clk->core->ref, __clk_release);
3543 clk_prepare_unlock();
3545 EXPORT_SYMBOL_GPL(clk_unregister);
3548 * clk_hw_unregister - unregister a currently registered clk_hw
3549 * @hw: hardware-specific clock data to unregister
3551 void clk_hw_unregister(struct clk_hw *hw)
3553 clk_unregister(hw->clk);
3555 EXPORT_SYMBOL_GPL(clk_hw_unregister);
3557 static void devm_clk_release(struct device *dev, void *res)
3559 clk_unregister(*(struct clk **)res);
3562 static void devm_clk_hw_release(struct device *dev, void *res)
3564 clk_hw_unregister(*(struct clk_hw **)res);
3568 * devm_clk_register - resource managed clk_register()
3569 * @dev: device that is registering this clock
3570 * @hw: link to hardware-specific clock data
3572 * Managed clk_register(). Clocks returned from this function are
3573 * automatically clk_unregister()ed on driver detach. See clk_register() for
3576 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
3581 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
3583 return ERR_PTR(-ENOMEM);
3585 clk = clk_register(dev, hw);
3588 devres_add(dev, clkp);
3595 EXPORT_SYMBOL_GPL(devm_clk_register);
3598 * devm_clk_hw_register - resource managed clk_hw_register()
3599 * @dev: device that is registering this clock
3600 * @hw: link to hardware-specific clock data
3602 * Managed clk_hw_register(). Clocks registered by this function are
3603 * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register()
3604 * for more information.
3606 int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
3608 struct clk_hw **hwp;
3611 hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL);
3615 ret = clk_hw_register(dev, hw);
3618 devres_add(dev, hwp);
3625 EXPORT_SYMBOL_GPL(devm_clk_hw_register);
3627 static int devm_clk_match(struct device *dev, void *res, void *data)
3629 struct clk *c = res;
3635 static int devm_clk_hw_match(struct device *dev, void *res, void *data)
3637 struct clk_hw *hw = res;
3645 * devm_clk_unregister - resource managed clk_unregister()
3646 * @clk: clock to unregister
3648 * Deallocate a clock allocated with devm_clk_register(). Normally
3649 * this function will not need to be called and the resource management
3650 * code will ensure that the resource is freed.
3652 void devm_clk_unregister(struct device *dev, struct clk *clk)
3654 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
3656 EXPORT_SYMBOL_GPL(devm_clk_unregister);
3659 * devm_clk_hw_unregister - resource managed clk_hw_unregister()
3660 * @dev: device that is unregistering the hardware-specific clock data
3661 * @hw: link to hardware-specific clock data
3663 * Unregister a clk_hw registered with devm_clk_hw_register(). Normally
3664 * this function will not need to be called and the resource management
3665 * code will ensure that the resource is freed.
3667 void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
3669 WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match,
3672 EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
3678 void __clk_put(struct clk *clk)
3680 struct module *owner;
3682 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
3688 * Before calling clk_put, all calls to clk_rate_exclusive_get() from a
3689 * given user should be balanced with calls to clk_rate_exclusive_put()
3690 * and by that same consumer
3692 if (WARN_ON(clk->exclusive_count)) {
3693 /* We voiced our concern, let's sanitize the situation */
3694 clk->core->protect_count -= (clk->exclusive_count - 1);
3695 clk_core_rate_unprotect(clk->core);
3696 clk->exclusive_count = 0;
3699 hlist_del(&clk->clks_node);
3700 if (clk->min_rate > clk->core->req_rate ||
3701 clk->max_rate < clk->core->req_rate)
3702 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
3704 owner = clk->core->owner;
3705 kref_put(&clk->core->ref, __clk_release);
3707 clk_prepare_unlock();
3714 /*** clk rate change notifiers ***/
3717 * clk_notifier_register - add a clk rate change notifier
3718 * @clk: struct clk * to watch
3719 * @nb: struct notifier_block * with callback info
3721 * Request notification when clk's rate changes. This uses an SRCU
3722 * notifier because we want it to block and notifier unregistrations are
3723 * uncommon. The callbacks associated with the notifier must not
3724 * re-enter into the clk framework by calling any top-level clk APIs;
3725 * this will cause a nested prepare_lock mutex.
3727 * In all notification cases (pre, post and abort rate change) the original
3728 * clock rate is passed to the callback via struct clk_notifier_data.old_rate
3729 * and the new frequency is passed via struct clk_notifier_data.new_rate.
3731 * clk_notifier_register() must be called from non-atomic context.
3732 * Returns -EINVAL if called with null arguments, -ENOMEM upon
3733 * allocation failure; otherwise, passes along the return value of
3734 * srcu_notifier_chain_register().
3736 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
3738 struct clk_notifier *cn;
3746 /* search the list of notifiers for this clk */
3747 list_for_each_entry(cn, &clk_notifier_list, node)
3751 /* if clk wasn't in the notifier list, allocate new clk_notifier */
3752 if (cn->clk != clk) {
3753 cn = kzalloc(sizeof(*cn), GFP_KERNEL);
3758 srcu_init_notifier_head(&cn->notifier_head);
3760 list_add(&cn->node, &clk_notifier_list);
3763 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
3765 clk->core->notifier_count++;
3768 clk_prepare_unlock();
3772 EXPORT_SYMBOL_GPL(clk_notifier_register);
3775 * clk_notifier_unregister - remove a clk rate change notifier
3776 * @clk: struct clk *
3777 * @nb: struct notifier_block * with callback info
3779 * Request no further notification for changes to 'clk' and frees memory
3780 * allocated in clk_notifier_register.
3782 * Returns -EINVAL if called with null arguments; otherwise, passes
3783 * along the return value of srcu_notifier_chain_unregister().
3785 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
3787 struct clk_notifier *cn = NULL;
3795 list_for_each_entry(cn, &clk_notifier_list, node)
3799 if (cn->clk == clk) {
3800 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
3802 clk->core->notifier_count--;
3804 /* XXX the notifier code should handle this better */
3805 if (!cn->notifier_head.head) {
3806 srcu_cleanup_notifier_head(&cn->notifier_head);
3807 list_del(&cn->node);
3815 clk_prepare_unlock();
3819 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
3823 * struct of_clk_provider - Clock provider registration structure
3824 * @link: Entry in global list of clock providers
3825 * @node: Pointer to device tree node of clock provider
3826 * @get: Get clock callback. Returns NULL or a struct clk for the
3827 * given clock specifier
3828 * @data: context pointer to be passed into @get callback
3830 struct of_clk_provider {
3831 struct list_head link;
3833 struct device_node *node;
3834 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
3835 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
3839 static const struct of_device_id __clk_of_table_sentinel
3840 __used __section(__clk_of_table_end);
3842 static LIST_HEAD(of_clk_providers);
3843 static DEFINE_MUTEX(of_clk_mutex);
3845 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
3850 EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
3852 struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
3856 EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
3858 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
3860 struct clk_onecell_data *clk_data = data;
3861 unsigned int idx = clkspec->args[0];
3863 if (idx >= clk_data->clk_num) {
3864 pr_err("%s: invalid clock index %u\n", __func__, idx);
3865 return ERR_PTR(-EINVAL);
3868 return clk_data->clks[idx];
3870 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
3873 of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
3875 struct clk_hw_onecell_data *hw_data = data;
3876 unsigned int idx = clkspec->args[0];
3878 if (idx >= hw_data->num) {
3879 pr_err("%s: invalid index %u\n", __func__, idx);
3880 return ERR_PTR(-EINVAL);
3883 return hw_data->hws[idx];
3885 EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
3888 * of_clk_add_provider() - Register a clock provider for a node
3889 * @np: Device node pointer associated with clock provider
3890 * @clk_src_get: callback for decoding clock
3891 * @data: context pointer for @clk_src_get callback.
3893 int of_clk_add_provider(struct device_node *np,
3894 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
3898 struct of_clk_provider *cp;
3901 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
3905 cp->node = of_node_get(np);
3907 cp->get = clk_src_get;
3909 mutex_lock(&of_clk_mutex);
3910 list_add(&cp->link, &of_clk_providers);
3911 mutex_unlock(&of_clk_mutex);
3912 pr_debug("Added clock from %pOF\n", np);
3914 ret = of_clk_set_defaults(np, true);
3916 of_clk_del_provider(np);
3920 EXPORT_SYMBOL_GPL(of_clk_add_provider);
3923 * of_clk_add_hw_provider() - Register a clock provider for a node
3924 * @np: Device node pointer associated with clock provider
3925 * @get: callback for decoding clk_hw
3926 * @data: context pointer for @get callback.
3928 int of_clk_add_hw_provider(struct device_node *np,
3929 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
3933 struct of_clk_provider *cp;
3936 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
3940 cp->node = of_node_get(np);
3944 mutex_lock(&of_clk_mutex);
3945 list_add(&cp->link, &of_clk_providers);
3946 mutex_unlock(&of_clk_mutex);
3947 pr_debug("Added clk_hw provider from %pOF\n", np);
3949 ret = of_clk_set_defaults(np, true);
3951 of_clk_del_provider(np);
3955 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
3957 static void devm_of_clk_release_provider(struct device *dev, void *res)
3959 of_clk_del_provider(*(struct device_node **)res);
3963 * We allow a child device to use its parent device as the clock provider node
3964 * for cases like MFD sub-devices where the child device driver wants to use
3965 * devm_*() APIs but not list the device in DT as a sub-node.
3967 static struct device_node *get_clk_provider_node(struct device *dev)
3969 struct device_node *np, *parent_np;
3972 parent_np = dev->parent ? dev->parent->of_node : NULL;
3974 if (!of_find_property(np, "#clock-cells", NULL))
3975 if (of_find_property(parent_np, "#clock-cells", NULL))
3982 * devm_of_clk_add_hw_provider() - Managed clk provider node registration
3983 * @dev: Device acting as the clock provider (used for DT node and lifetime)
3984 * @get: callback for decoding clk_hw
3985 * @data: context pointer for @get callback
3987 * Registers clock provider for given device's node. If the device has no DT
3988 * node or if the device node lacks of clock provider information (#clock-cells)
3989 * then the parent device's node is scanned for this information. If parent node
3990 * has the #clock-cells then it is used in registration. Provider is
3991 * automatically released at device exit.
3993 * Return: 0 on success or an errno on failure.
3995 int devm_of_clk_add_hw_provider(struct device *dev,
3996 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4000 struct device_node **ptr, *np;
4003 ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
4008 np = get_clk_provider_node(dev);
4009 ret = of_clk_add_hw_provider(np, get, data);
4012 devres_add(dev, ptr);
4019 EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
4022 * of_clk_del_provider() - Remove a previously registered clock provider
4023 * @np: Device node pointer associated with clock provider
4025 void of_clk_del_provider(struct device_node *np)
4027 struct of_clk_provider *cp;
4029 mutex_lock(&of_clk_mutex);
4030 list_for_each_entry(cp, &of_clk_providers, link) {
4031 if (cp->node == np) {
4032 list_del(&cp->link);
4033 of_node_put(cp->node);
4038 mutex_unlock(&of_clk_mutex);
4040 EXPORT_SYMBOL_GPL(of_clk_del_provider);
4042 static int devm_clk_provider_match(struct device *dev, void *res, void *data)
4044 struct device_node **np = res;
4046 if (WARN_ON(!np || !*np))
4053 * devm_of_clk_del_provider() - Remove clock provider registered using devm
4054 * @dev: Device to whose lifetime the clock provider was bound
4056 void devm_of_clk_del_provider(struct device *dev)
4059 struct device_node *np = get_clk_provider_node(dev);
4061 ret = devres_release(dev, devm_of_clk_release_provider,
4062 devm_clk_provider_match, np);
4066 EXPORT_SYMBOL(devm_of_clk_del_provider);
4068 static struct clk_hw *
4069 __of_clk_get_hw_from_provider(struct of_clk_provider *provider,
4070 struct of_phandle_args *clkspec)
4074 if (provider->get_hw)
4075 return provider->get_hw(clkspec, provider->data);
4077 clk = provider->get(clkspec, provider->data);
4079 return ERR_CAST(clk);
4080 return __clk_get_hw(clk);
4083 struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
4084 const char *dev_id, const char *con_id)
4086 struct of_clk_provider *provider;
4087 struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
4090 return ERR_PTR(-EINVAL);
4092 /* Check if we have such a provider in our array */
4093 mutex_lock(&of_clk_mutex);
4094 list_for_each_entry(provider, &of_clk_providers, link) {
4095 if (provider->node == clkspec->np) {
4096 hw = __of_clk_get_hw_from_provider(provider, clkspec);
4101 mutex_unlock(&of_clk_mutex);
4103 return clk_hw_create_clk(hw, dev_id, con_id);
4107 * of_clk_get_from_provider() - Lookup a clock from a clock provider
4108 * @clkspec: pointer to a clock specifier data structure
4110 * This function looks up a struct clk from the registered list of clock
4111 * providers, an input is a clock specifier data structure as returned
4112 * from the of_parse_phandle_with_args() function call.
4114 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
4116 return __of_clk_get_from_provider(clkspec, NULL, __func__);
4118 EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
4121 * of_clk_get_parent_count() - Count the number of clocks a device node has
4122 * @np: device node to count
4124 * Returns: The number of clocks that are possible parents of this node
4126 unsigned int of_clk_get_parent_count(struct device_node *np)
4130 count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
4136 EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
4138 const char *of_clk_get_parent_name(struct device_node *np, int index)
4140 struct of_phandle_args clkspec;
4141 struct property *prop;
4142 const char *clk_name;
4149 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
4154 index = clkspec.args_count ? clkspec.args[0] : 0;
4157 /* if there is an indices property, use it to transfer the index
4158 * specified into an array offset for the clock-output-names property.
4160 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
4167 /* We went off the end of 'clock-indices' without finding it */
4171 if (of_property_read_string_index(clkspec.np, "clock-output-names",
4175 * Best effort to get the name if the clock has been
4176 * registered with the framework. If the clock isn't
4177 * registered, we return the node name as the name of
4178 * the clock as long as #clock-cells = 0.
4180 clk = of_clk_get_from_provider(&clkspec);
4182 if (clkspec.args_count == 0)
4183 clk_name = clkspec.np->name;
4187 clk_name = __clk_get_name(clk);
4193 of_node_put(clkspec.np);
4196 EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
4199 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
4201 * @np: Device node pointer associated with clock provider
4202 * @parents: pointer to char array that hold the parents' names
4203 * @size: size of the @parents array
4205 * Return: number of parents for the clock node.
4207 int of_clk_parent_fill(struct device_node *np, const char **parents,
4212 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
4217 EXPORT_SYMBOL_GPL(of_clk_parent_fill);
4219 struct clock_provider {
4220 void (*clk_init_cb)(struct device_node *);
4221 struct device_node *np;
4222 struct list_head node;
4226 * This function looks for a parent clock. If there is one, then it
4227 * checks that the provider for this parent clock was initialized, in
4228 * this case the parent clock will be ready.
4230 static int parent_ready(struct device_node *np)
4235 struct clk *clk = of_clk_get(np, i);
4237 /* this parent is ready we can check the next one */
4244 /* at least one parent is not ready, we exit now */
4245 if (PTR_ERR(clk) == -EPROBE_DEFER)
4249 * Here we make assumption that the device tree is
4250 * written correctly. So an error means that there is
4251 * no more parent. As we didn't exit yet, then the
4252 * previous parent are ready. If there is no clock
4253 * parent, no need to wait for them, then we can
4254 * consider their absence as being ready
4261 * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
4262 * @np: Device node pointer associated with clock provider
4263 * @index: clock index
4264 * @flags: pointer to top-level framework flags
4266 * Detects if the clock-critical property exists and, if so, sets the
4267 * corresponding CLK_IS_CRITICAL flag.
4269 * Do not use this function. It exists only for legacy Device Tree
4270 * bindings, such as the one-clock-per-node style that are outdated.
4271 * Those bindings typically put all clock data into .dts and the Linux
4272 * driver has no clock data, thus making it impossible to set this flag
4273 * correctly from the driver. Only those drivers may call
4274 * of_clk_detect_critical from their setup functions.
4276 * Return: error code or zero on success
4278 int of_clk_detect_critical(struct device_node *np,
4279 int index, unsigned long *flags)
4281 struct property *prop;
4288 of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
4290 *flags |= CLK_IS_CRITICAL;
4296 * of_clk_init() - Scan and init clock providers from the DT
4297 * @matches: array of compatible values and init functions for providers.
4299 * This function scans the device tree for matching clock providers
4300 * and calls their initialization functions. It also does it by trying
4301 * to follow the dependencies.
4303 void __init of_clk_init(const struct of_device_id *matches)
4305 const struct of_device_id *match;
4306 struct device_node *np;
4307 struct clock_provider *clk_provider, *next;
4310 LIST_HEAD(clk_provider_list);
4313 matches = &__clk_of_table;
4315 /* First prepare the list of the clocks providers */
4316 for_each_matching_node_and_match(np, matches, &match) {
4317 struct clock_provider *parent;
4319 if (!of_device_is_available(np))
4322 parent = kzalloc(sizeof(*parent), GFP_KERNEL);
4324 list_for_each_entry_safe(clk_provider, next,
4325 &clk_provider_list, node) {
4326 list_del(&clk_provider->node);
4327 of_node_put(clk_provider->np);
4328 kfree(clk_provider);
4334 parent->clk_init_cb = match->data;
4335 parent->np = of_node_get(np);
4336 list_add_tail(&parent->node, &clk_provider_list);
4339 while (!list_empty(&clk_provider_list)) {
4340 is_init_done = false;
4341 list_for_each_entry_safe(clk_provider, next,
4342 &clk_provider_list, node) {
4343 if (force || parent_ready(clk_provider->np)) {
4345 /* Don't populate platform devices */
4346 of_node_set_flag(clk_provider->np,
4349 clk_provider->clk_init_cb(clk_provider->np);
4350 of_clk_set_defaults(clk_provider->np, true);
4352 list_del(&clk_provider->node);
4353 of_node_put(clk_provider->np);
4354 kfree(clk_provider);
4355 is_init_done = true;
4360 * We didn't manage to initialize any of the
4361 * remaining providers during the last loop, so now we
4362 * initialize all the remaining ones unconditionally
4363 * in case the clock parent was not mandatory