| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * arch/sh/kernel/cpu/clock.c - SuperH clock framework | 
|  | 3 | * | 
| Paul Mundt | 237b98f | 2006-09-27 17:28:20 +0900 | [diff] [blame] | 4 | *  Copyright (C) 2005, 2006  Paul Mundt | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 5 | * | 
|  | 6 | * This clock framework is derived from the OMAP version by: | 
|  | 7 | * | 
| Paul Mundt | 1d11856 | 2006-12-01 13:15:14 +0900 | [diff] [blame] | 8 | *	Copyright (C) 2004 - 2005 Nokia Corporation | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 9 | *	Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> | 
|  | 10 | * | 
| Paul Mundt | 1d11856 | 2006-12-01 13:15:14 +0900 | [diff] [blame] | 11 | *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> | 
|  | 12 | * | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 13 | * This file is subject to the terms and conditions of the GNU General Public | 
|  | 14 | * License.  See the file "COPYING" in the main directory of this archive | 
|  | 15 | * for more details. | 
|  | 16 | */ | 
|  | 17 | #include <linux/kernel.h> | 
|  | 18 | #include <linux/init.h> | 
|  | 19 | #include <linux/module.h> | 
| Paul Mundt | 237b98f | 2006-09-27 17:28:20 +0900 | [diff] [blame] | 20 | #include <linux/mutex.h> | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 21 | #include <linux/list.h> | 
|  | 22 | #include <linux/kref.h> | 
|  | 23 | #include <linux/seq_file.h> | 
|  | 24 | #include <linux/err.h> | 
| Paul Mundt | 1d11856 | 2006-12-01 13:15:14 +0900 | [diff] [blame] | 25 | #include <linux/platform_device.h> | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 26 | #include <asm/clock.h> | 
|  | 27 | #include <asm/timer.h> | 
|  | 28 |  | 
|  | 29 | static LIST_HEAD(clock_list); | 
|  | 30 | static DEFINE_SPINLOCK(clock_lock); | 
| Paul Mundt | 237b98f | 2006-09-27 17:28:20 +0900 | [diff] [blame] | 31 | static DEFINE_MUTEX(clock_list_sem); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 32 |  | 
|  | 33 | /* | 
|  | 34 | * Each subtype is expected to define the init routines for these clocks, | 
|  | 35 | * as each subtype (or processor family) will have these clocks at the | 
|  | 36 | * very least. These are all provided through the CPG, which even some of | 
|  | 37 | * the more quirky parts (such as ST40, SH4-202, etc.) still have. | 
|  | 38 | * | 
|  | 39 | * The processor-specific code is expected to register any additional | 
|  | 40 | * clock sources that are of interest. | 
|  | 41 | */ | 
|  | 42 | static struct clk master_clk = { | 
|  | 43 | .name		= "master_clk", | 
|  | 44 | .flags		= CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 45 | .rate		= CONFIG_SH_PCLK_FREQ, | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 46 | }; | 
|  | 47 |  | 
|  | 48 | static struct clk module_clk = { | 
|  | 49 | .name		= "module_clk", | 
|  | 50 | .parent		= &master_clk, | 
|  | 51 | .flags		= CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, | 
|  | 52 | }; | 
|  | 53 |  | 
|  | 54 | static struct clk bus_clk = { | 
|  | 55 | .name		= "bus_clk", | 
|  | 56 | .parent		= &master_clk, | 
|  | 57 | .flags		= CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, | 
|  | 58 | }; | 
|  | 59 |  | 
|  | 60 | static struct clk cpu_clk = { | 
|  | 61 | .name		= "cpu_clk", | 
|  | 62 | .parent		= &master_clk, | 
|  | 63 | .flags		= CLK_ALWAYS_ENABLED, | 
|  | 64 | }; | 
|  | 65 |  | 
|  | 66 | /* | 
|  | 67 | * The ordering of these clocks matters, do not change it. | 
|  | 68 | */ | 
|  | 69 | static struct clk *onchip_clocks[] = { | 
|  | 70 | &master_clk, | 
|  | 71 | &module_clk, | 
|  | 72 | &bus_clk, | 
|  | 73 | &cpu_clk, | 
|  | 74 | }; | 
|  | 75 |  | 
|  | 76 | static void propagate_rate(struct clk *clk) | 
|  | 77 | { | 
|  | 78 | struct clk *clkp; | 
|  | 79 |  | 
|  | 80 | list_for_each_entry(clkp, &clock_list, node) { | 
|  | 81 | if (likely(clkp->parent != clk)) | 
|  | 82 | continue; | 
|  | 83 | if (likely(clkp->ops && clkp->ops->recalc)) | 
|  | 84 | clkp->ops->recalc(clkp); | 
|  | 85 | } | 
|  | 86 | } | 
|  | 87 |  | 
|  | 88 | int __clk_enable(struct clk *clk) | 
|  | 89 | { | 
|  | 90 | /* | 
|  | 91 | * See if this is the first time we're enabling the clock, some | 
|  | 92 | * clocks that are always enabled still require "special" | 
|  | 93 | * initialization. This is especially true if the clock mode | 
|  | 94 | * changes and the clock needs to hunt for the proper set of | 
|  | 95 | * divisors to use before it can effectively recalc. | 
|  | 96 | */ | 
|  | 97 | if (unlikely(atomic_read(&clk->kref.refcount) == 1)) | 
|  | 98 | if (clk->ops && clk->ops->init) | 
|  | 99 | clk->ops->init(clk); | 
|  | 100 |  | 
|  | 101 | if (clk->flags & CLK_ALWAYS_ENABLED) | 
|  | 102 | return 0; | 
|  | 103 |  | 
|  | 104 | if (likely(clk->ops && clk->ops->enable)) | 
|  | 105 | clk->ops->enable(clk); | 
|  | 106 |  | 
|  | 107 | kref_get(&clk->kref); | 
|  | 108 | return 0; | 
|  | 109 | } | 
|  | 110 |  | 
|  | 111 | int clk_enable(struct clk *clk) | 
|  | 112 | { | 
|  | 113 | unsigned long flags; | 
|  | 114 | int ret; | 
|  | 115 |  | 
|  | 116 | spin_lock_irqsave(&clock_lock, flags); | 
|  | 117 | ret = __clk_enable(clk); | 
|  | 118 | spin_unlock_irqrestore(&clock_lock, flags); | 
|  | 119 |  | 
|  | 120 | return ret; | 
|  | 121 | } | 
|  | 122 |  | 
|  | 123 | static void clk_kref_release(struct kref *kref) | 
|  | 124 | { | 
|  | 125 | /* Nothing to do */ | 
|  | 126 | } | 
|  | 127 |  | 
|  | 128 | void __clk_disable(struct clk *clk) | 
|  | 129 | { | 
|  | 130 | if (clk->flags & CLK_ALWAYS_ENABLED) | 
|  | 131 | return; | 
|  | 132 |  | 
|  | 133 | kref_put(&clk->kref, clk_kref_release); | 
|  | 134 | } | 
|  | 135 |  | 
|  | 136 | void clk_disable(struct clk *clk) | 
|  | 137 | { | 
|  | 138 | unsigned long flags; | 
|  | 139 |  | 
|  | 140 | spin_lock_irqsave(&clock_lock, flags); | 
|  | 141 | __clk_disable(clk); | 
|  | 142 | spin_unlock_irqrestore(&clock_lock, flags); | 
|  | 143 | } | 
|  | 144 |  | 
|  | 145 | int clk_register(struct clk *clk) | 
|  | 146 | { | 
| Paul Mundt | 237b98f | 2006-09-27 17:28:20 +0900 | [diff] [blame] | 147 | mutex_lock(&clock_list_sem); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 148 |  | 
|  | 149 | list_add(&clk->node, &clock_list); | 
|  | 150 | kref_init(&clk->kref); | 
|  | 151 |  | 
| Paul Mundt | 237b98f | 2006-09-27 17:28:20 +0900 | [diff] [blame] | 152 | mutex_unlock(&clock_list_sem); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 153 |  | 
|  | 154 | return 0; | 
|  | 155 | } | 
|  | 156 |  | 
|  | 157 | void clk_unregister(struct clk *clk) | 
|  | 158 | { | 
| Paul Mundt | 237b98f | 2006-09-27 17:28:20 +0900 | [diff] [blame] | 159 | mutex_lock(&clock_list_sem); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 160 | list_del(&clk->node); | 
| Paul Mundt | 237b98f | 2006-09-27 17:28:20 +0900 | [diff] [blame] | 161 | mutex_unlock(&clock_list_sem); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 162 | } | 
|  | 163 |  | 
|  | 164 | inline unsigned long clk_get_rate(struct clk *clk) | 
|  | 165 | { | 
|  | 166 | return clk->rate; | 
|  | 167 | } | 
|  | 168 |  | 
|  | 169 | int clk_set_rate(struct clk *clk, unsigned long rate) | 
|  | 170 | { | 
|  | 171 | int ret = -EOPNOTSUPP; | 
|  | 172 |  | 
|  | 173 | if (likely(clk->ops && clk->ops->set_rate)) { | 
|  | 174 | unsigned long flags; | 
|  | 175 |  | 
|  | 176 | spin_lock_irqsave(&clock_lock, flags); | 
|  | 177 | ret = clk->ops->set_rate(clk, rate); | 
|  | 178 | spin_unlock_irqrestore(&clock_lock, flags); | 
|  | 179 | } | 
|  | 180 |  | 
|  | 181 | if (unlikely(clk->flags & CLK_RATE_PROPAGATES)) | 
|  | 182 | propagate_rate(clk); | 
|  | 183 |  | 
|  | 184 | return ret; | 
|  | 185 | } | 
|  | 186 |  | 
|  | 187 | void clk_recalc_rate(struct clk *clk) | 
|  | 188 | { | 
|  | 189 | if (likely(clk->ops && clk->ops->recalc)) { | 
|  | 190 | unsigned long flags; | 
|  | 191 |  | 
|  | 192 | spin_lock_irqsave(&clock_lock, flags); | 
|  | 193 | clk->ops->recalc(clk); | 
|  | 194 | spin_unlock_irqrestore(&clock_lock, flags); | 
|  | 195 | } | 
|  | 196 |  | 
|  | 197 | if (unlikely(clk->flags & CLK_RATE_PROPAGATES)) | 
|  | 198 | propagate_rate(clk); | 
|  | 199 | } | 
|  | 200 |  | 
| Paul Mundt | 1d11856 | 2006-12-01 13:15:14 +0900 | [diff] [blame] | 201 | /* | 
|  | 202 | * Returns a clock. Note that we first try to use device id on the bus | 
|  | 203 | * and clock name. If this fails, we try to use clock name only. | 
|  | 204 | */ | 
|  | 205 | struct clk *clk_get(struct device *dev, const char *id) | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 206 | { | 
|  | 207 | struct clk *p, *clk = ERR_PTR(-ENOENT); | 
| Paul Mundt | 1d11856 | 2006-12-01 13:15:14 +0900 | [diff] [blame] | 208 | int idno; | 
|  | 209 |  | 
|  | 210 | if (dev == NULL || dev->bus != &platform_bus_type) | 
|  | 211 | idno = -1; | 
|  | 212 | else | 
|  | 213 | idno = to_platform_device(dev)->id; | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 214 |  | 
| Paul Mundt | 237b98f | 2006-09-27 17:28:20 +0900 | [diff] [blame] | 215 | mutex_lock(&clock_list_sem); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 216 | list_for_each_entry(p, &clock_list, node) { | 
| Paul Mundt | 1d11856 | 2006-12-01 13:15:14 +0900 | [diff] [blame] | 217 | if (p->id == idno && | 
|  | 218 | strcmp(id, p->name) == 0 && try_module_get(p->owner)) { | 
|  | 219 | clk = p; | 
|  | 220 | goto found; | 
|  | 221 | } | 
|  | 222 | } | 
|  | 223 |  | 
|  | 224 | list_for_each_entry(p, &clock_list, node) { | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 225 | if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) { | 
|  | 226 | clk = p; | 
|  | 227 | break; | 
|  | 228 | } | 
|  | 229 | } | 
| Paul Mundt | 1d11856 | 2006-12-01 13:15:14 +0900 | [diff] [blame] | 230 |  | 
|  | 231 | found: | 
| Paul Mundt | 237b98f | 2006-09-27 17:28:20 +0900 | [diff] [blame] | 232 | mutex_unlock(&clock_list_sem); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 233 |  | 
|  | 234 | return clk; | 
|  | 235 | } | 
|  | 236 |  | 
|  | 237 | void clk_put(struct clk *clk) | 
|  | 238 | { | 
|  | 239 | if (clk && !IS_ERR(clk)) | 
|  | 240 | module_put(clk->owner); | 
|  | 241 | } | 
|  | 242 |  | 
|  | 243 | void __init __attribute__ ((weak)) | 
|  | 244 | arch_init_clk_ops(struct clk_ops **ops, int type) | 
|  | 245 | { | 
|  | 246 | } | 
|  | 247 |  | 
|  | 248 | int __init clk_init(void) | 
|  | 249 | { | 
|  | 250 | int i, ret = 0; | 
|  | 251 |  | 
| Paul Mundt | e4c2cfe | 2006-09-27 12:31:01 +0900 | [diff] [blame] | 252 | BUG_ON(!master_clk.rate); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 253 |  | 
|  | 254 | for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) { | 
|  | 255 | struct clk *clk = onchip_clocks[i]; | 
|  | 256 |  | 
|  | 257 | arch_init_clk_ops(&clk->ops, i); | 
|  | 258 | ret |= clk_register(clk); | 
|  | 259 | clk_enable(clk); | 
|  | 260 | } | 
|  | 261 |  | 
|  | 262 | /* Kick the child clocks.. */ | 
|  | 263 | propagate_rate(&master_clk); | 
|  | 264 | propagate_rate(&bus_clk); | 
|  | 265 |  | 
|  | 266 | return ret; | 
|  | 267 | } | 
|  | 268 |  | 
|  | 269 | int show_clocks(struct seq_file *m) | 
|  | 270 | { | 
|  | 271 | struct clk *clk; | 
|  | 272 |  | 
|  | 273 | list_for_each_entry_reverse(clk, &clock_list, node) { | 
|  | 274 | unsigned long rate = clk_get_rate(clk); | 
|  | 275 |  | 
|  | 276 | /* | 
|  | 277 | * Don't bother listing dummy clocks with no ancestry | 
|  | 278 | * that only support enable and disable ops. | 
|  | 279 | */ | 
|  | 280 | if (unlikely(!rate && !clk->parent)) | 
|  | 281 | continue; | 
|  | 282 |  | 
|  | 283 | seq_printf(m, "%-12s\t: %ld.%02ldMHz\n", clk->name, | 
|  | 284 | rate / 1000000, (rate % 1000000) / 10000); | 
|  | 285 | } | 
|  | 286 |  | 
|  | 287 | return 0; | 
|  | 288 | } | 
|  | 289 |  | 
|  | 290 | EXPORT_SYMBOL_GPL(clk_register); | 
|  | 291 | EXPORT_SYMBOL_GPL(clk_unregister); | 
|  | 292 | EXPORT_SYMBOL_GPL(clk_get); | 
|  | 293 | EXPORT_SYMBOL_GPL(clk_put); | 
|  | 294 | EXPORT_SYMBOL_GPL(clk_enable); | 
|  | 295 | EXPORT_SYMBOL_GPL(clk_disable); | 
|  | 296 | EXPORT_SYMBOL_GPL(__clk_enable); | 
|  | 297 | EXPORT_SYMBOL_GPL(__clk_disable); | 
|  | 298 | EXPORT_SYMBOL_GPL(clk_get_rate); | 
|  | 299 | EXPORT_SYMBOL_GPL(clk_set_rate); | 
|  | 300 | EXPORT_SYMBOL_GPL(clk_recalc_rate); |