2 * drivers/sh/clk.c - SuperH clock framework
4 * Copyright (C) 2005 - 2010 Paul Mundt
6 * This clock framework is derived from the OMAP version by:
8 * Copyright (C) 2004 - 2008 Nokia Corporation
9 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
11 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
17 #define pr_fmt(fmt) "clock: " fmt
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/list.h>
24 #include <linux/kobject.h>
25 #include <linux/sysdev.h>
26 #include <linux/seq_file.h>
27 #include <linux/err.h>
29 #include <linux/debugfs.h>
30 #include <linux/cpufreq.h>
31 #include <linux/clk.h>
32 #include <linux/sh_clk.h>
34 static LIST_HEAD(clock_list);
35 static DEFINE_SPINLOCK(clock_lock);
36 static DEFINE_MUTEX(clock_list_sem);
38 void clk_rate_table_build(struct clk *clk,
39 struct cpufreq_frequency_table *freq_table,
41 struct clk_div_mult_table *src_table,
42 unsigned long *bitmap)
44 unsigned long mult, div;
48 clk->nr_freqs = nr_freqs;
50 for (i = 0; i < nr_freqs; i++) {
54 if (src_table->divisors && i < src_table->nr_divisors)
55 div = src_table->divisors[i];
57 if (src_table->multipliers && i < src_table->nr_multipliers)
58 mult = src_table->multipliers[i];
60 if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
61 freq = CPUFREQ_ENTRY_INVALID;
63 freq = clk->parent->rate * mult / div;
65 freq_table[i].index = i;
66 freq_table[i].frequency = freq;
69 /* Termination entry */
70 freq_table[i].index = i;
71 freq_table[i].frequency = CPUFREQ_TABLE_END;
74 struct clk_rate_round_data;
76 struct clk_rate_round_data {
78 unsigned int min, max;
79 long (*func)(unsigned int, struct clk_rate_round_data *);
83 #define for_each_frequency(pos, r, freq) \
84 for (pos = r->min, freq = r->func(pos, r->arg); \
85 pos < r->max; pos++, freq = r->func(pos, r)) \
86 if (unlikely(freq == 0)) \
90 static long clk_rate_round_helper(struct clk_rate_round_data *rounder)
92 unsigned long rate_error, rate_error_prev = ~0UL;
93 unsigned long rate_best_fit = rounder->rate;
94 unsigned long highest, lowest, freq;
100 for_each_frequency(i, rounder, freq) {
106 rate_error = abs(freq - rounder->rate);
107 if (rate_error < rate_error_prev) {
108 rate_best_fit = freq;
109 rate_error_prev = rate_error;
116 if (rounder->rate >= highest)
117 rate_best_fit = highest;
118 if (rounder->rate <= lowest)
119 rate_best_fit = lowest;
121 return rate_best_fit;
124 static long clk_rate_table_iter(unsigned int pos,
125 struct clk_rate_round_data *rounder)
127 struct cpufreq_frequency_table *freq_table = rounder->arg;
128 unsigned long freq = freq_table[pos].frequency;
130 if (freq == CPUFREQ_ENTRY_INVALID)
136 long clk_rate_table_round(struct clk *clk,
137 struct cpufreq_frequency_table *freq_table,
140 struct clk_rate_round_data table_round = {
142 .max = clk->nr_freqs,
143 .func = clk_rate_table_iter,
148 return clk_rate_round_helper(&table_round);
151 static long clk_rate_div_range_iter(unsigned int pos,
152 struct clk_rate_round_data *rounder)
154 return clk_get_rate(rounder->arg) / pos;
157 long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
158 unsigned int div_max, unsigned long rate)
160 struct clk_rate_round_data div_range_round = {
163 .func = clk_rate_div_range_iter,
164 .arg = clk_get_parent(clk),
168 return clk_rate_round_helper(&div_range_round);
171 int clk_rate_table_find(struct clk *clk,
172 struct cpufreq_frequency_table *freq_table,
177 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
178 unsigned long freq = freq_table[i].frequency;
180 if (freq == CPUFREQ_ENTRY_INVALID)
190 /* Used for clocks that always have same value as the parent clock */
191 unsigned long followparent_recalc(struct clk *clk)
193 return clk->parent ? clk->parent->rate : 0;
196 int clk_reparent(struct clk *child, struct clk *parent)
198 list_del_init(&child->sibling);
200 list_add(&child->sibling, &parent->children);
201 child->parent = parent;
203 /* now do the debugfs renaming to reattach the child
204 to the proper parent */
209 /* Propagate rate to children */
210 void propagate_rate(struct clk *tclk)
214 list_for_each_entry(clkp, &tclk->children, sibling) {
215 if (clkp->ops && clkp->ops->recalc)
216 clkp->rate = clkp->ops->recalc(clkp);
218 propagate_rate(clkp);
222 static void __clk_disable(struct clk *clk)
224 if (WARN(!clk->usecount, "Trying to disable clock %p with 0 usecount\n",
228 if (!(--clk->usecount)) {
229 if (likely(clk->ops && clk->ops->disable))
230 clk->ops->disable(clk);
231 if (likely(clk->parent))
232 __clk_disable(clk->parent);
236 void clk_disable(struct clk *clk)
243 spin_lock_irqsave(&clock_lock, flags);
245 spin_unlock_irqrestore(&clock_lock, flags);
247 EXPORT_SYMBOL_GPL(clk_disable);
249 static int __clk_enable(struct clk *clk)
253 if (clk->usecount++ == 0) {
255 ret = __clk_enable(clk->parent);
260 if (clk->ops && clk->ops->enable) {
261 ret = clk->ops->enable(clk);
264 __clk_disable(clk->parent);
276 int clk_enable(struct clk *clk)
284 spin_lock_irqsave(&clock_lock, flags);
285 ret = __clk_enable(clk);
286 spin_unlock_irqrestore(&clock_lock, flags);
290 EXPORT_SYMBOL_GPL(clk_enable);
292 static LIST_HEAD(root_clks);
295 * recalculate_root_clocks - recalculate and propagate all root clocks
297 * Recalculates all root clocks (clocks with no parent), which if the
298 * clock's .recalc is set correctly, should also propagate their rates.
301 void recalculate_root_clocks(void)
305 list_for_each_entry(clkp, &root_clks, sibling) {
306 if (clkp->ops && clkp->ops->recalc)
307 clkp->rate = clkp->ops->recalc(clkp);
308 propagate_rate(clkp);
312 static struct clk_mapping dummy_mapping;
314 static struct clk *lookup_root_clock(struct clk *clk)
322 static int clk_establish_mapping(struct clk *clk)
324 struct clk_mapping *mapping = clk->mapping;
327 * Propagate mappings.
333 * dummy mapping for root clocks with no specified ranges
336 clk->mapping = &dummy_mapping;
341 * If we're on a child clock and it provides no mapping of its
342 * own, inherit the mapping from its root clock.
344 clkp = lookup_root_clock(clk);
345 mapping = clkp->mapping;
350 * Establish initial mapping.
352 if (!mapping->base && mapping->phys) {
353 kref_init(&mapping->ref);
355 mapping->base = ioremap_nocache(mapping->phys, mapping->len);
356 if (unlikely(!mapping->base))
358 } else if (mapping->base) {
360 * Bump the refcount for an existing mapping
362 kref_get(&mapping->ref);
365 clk->mapping = mapping;
369 static void clk_destroy_mapping(struct kref *kref)
371 struct clk_mapping *mapping;
373 mapping = container_of(kref, struct clk_mapping, ref);
375 iounmap(mapping->base);
378 static void clk_teardown_mapping(struct clk *clk)
380 struct clk_mapping *mapping = clk->mapping;
383 if (mapping == &dummy_mapping)
386 kref_put(&mapping->ref, clk_destroy_mapping);
390 int clk_register(struct clk *clk)
394 if (clk == NULL || IS_ERR(clk))
398 * trap out already registered clocks
400 if (clk->node.next || clk->node.prev)
403 mutex_lock(&clock_list_sem);
405 INIT_LIST_HEAD(&clk->children);
408 ret = clk_establish_mapping(clk);
413 list_add(&clk->sibling, &clk->parent->children);
415 list_add(&clk->sibling, &root_clks);
417 list_add(&clk->node, &clock_list);
418 if (clk->ops && clk->ops->init)
422 mutex_unlock(&clock_list_sem);
426 EXPORT_SYMBOL_GPL(clk_register);
428 void clk_unregister(struct clk *clk)
430 mutex_lock(&clock_list_sem);
431 list_del(&clk->sibling);
432 list_del(&clk->node);
433 clk_teardown_mapping(clk);
434 mutex_unlock(&clock_list_sem);
436 EXPORT_SYMBOL_GPL(clk_unregister);
438 void clk_enable_init_clocks(void)
442 list_for_each_entry(clkp, &clock_list, node)
443 if (clkp->flags & CLK_ENABLE_ON_INIT)
447 unsigned long clk_get_rate(struct clk *clk)
451 EXPORT_SYMBOL_GPL(clk_get_rate);
453 int clk_set_rate(struct clk *clk, unsigned long rate)
455 return clk_set_rate_ex(clk, rate, 0);
457 EXPORT_SYMBOL_GPL(clk_set_rate);
459 int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
461 int ret = -EOPNOTSUPP;
464 spin_lock_irqsave(&clock_lock, flags);
466 if (likely(clk->ops && clk->ops->set_rate)) {
467 ret = clk->ops->set_rate(clk, rate, algo_id);
475 if (clk->ops && clk->ops->recalc)
476 clk->rate = clk->ops->recalc(clk);
481 spin_unlock_irqrestore(&clock_lock, flags);
485 EXPORT_SYMBOL_GPL(clk_set_rate_ex);
487 int clk_set_parent(struct clk *clk, struct clk *parent)
494 if (clk->parent == parent)
497 spin_lock_irqsave(&clock_lock, flags);
498 if (clk->usecount == 0) {
499 if (clk->ops->set_parent)
500 ret = clk->ops->set_parent(clk, parent);
502 ret = clk_reparent(clk, parent);
505 if (clk->ops->recalc)
506 clk->rate = clk->ops->recalc(clk);
507 pr_debug("set parent of %p to %p (new rate %ld)\n",
508 clk, clk->parent, clk->rate);
513 spin_unlock_irqrestore(&clock_lock, flags);
517 EXPORT_SYMBOL_GPL(clk_set_parent);
519 struct clk *clk_get_parent(struct clk *clk)
523 EXPORT_SYMBOL_GPL(clk_get_parent);
525 long clk_round_rate(struct clk *clk, unsigned long rate)
527 if (likely(clk->ops && clk->ops->round_rate)) {
528 unsigned long flags, rounded;
530 spin_lock_irqsave(&clock_lock, flags);
531 rounded = clk->ops->round_rate(clk, rate);
532 spin_unlock_irqrestore(&clock_lock, flags);
537 return clk_get_rate(clk);
539 EXPORT_SYMBOL_GPL(clk_round_rate);
542 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
544 static pm_message_t prev_state;
547 switch (state.event) {
549 /* Resumeing from hibernation */
550 if (prev_state.event != PM_EVENT_FREEZE)
553 list_for_each_entry(clkp, &clock_list, node) {
554 if (likely(clkp->ops)) {
555 unsigned long rate = clkp->rate;
557 if (likely(clkp->ops->set_parent))
558 clkp->ops->set_parent(clkp,
560 if (likely(clkp->ops->set_rate))
561 clkp->ops->set_rate(clkp,
563 else if (likely(clkp->ops->recalc))
564 clkp->rate = clkp->ops->recalc(clkp);
568 case PM_EVENT_FREEZE:
570 case PM_EVENT_SUSPEND:
578 static int clks_sysdev_resume(struct sys_device *dev)
580 return clks_sysdev_suspend(dev, PMSG_ON);
583 static struct sysdev_class clks_sysdev_class = {
587 static struct sysdev_driver clks_sysdev_driver = {
588 .suspend = clks_sysdev_suspend,
589 .resume = clks_sysdev_resume,
592 static struct sys_device clks_sysdev_dev = {
593 .cls = &clks_sysdev_class,
596 static int __init clk_sysdev_init(void)
598 sysdev_class_register(&clks_sysdev_class);
599 sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
600 sysdev_register(&clks_sysdev_dev);
604 subsys_initcall(clk_sysdev_init);
608 * debugfs support to trace clock tree hierarchy and attributes
610 static struct dentry *clk_debugfs_root;
612 static int clk_debugfs_register_one(struct clk *c)
615 struct dentry *d, *child, *child_tmp;
616 struct clk *pa = c->parent;
620 p += sprintf(p, "%p", c);
621 d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
626 d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount);
631 d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate);
636 d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags);
645 list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
646 debugfs_remove(child);
647 debugfs_remove(c->dentry);
651 static int clk_debugfs_register(struct clk *c)
654 struct clk *pa = c->parent;
656 if (pa && !pa->dentry) {
657 err = clk_debugfs_register(pa);
663 err = clk_debugfs_register_one(c);
670 static int __init clk_debugfs_init(void)
676 d = debugfs_create_dir("clock", NULL);
679 clk_debugfs_root = d;
681 list_for_each_entry(c, &clock_list, node) {
682 err = clk_debugfs_register(c);
688 debugfs_remove_recursive(clk_debugfs_root);
691 late_initcall(clk_debugfs_init);