blob: 099373ae57d840f8cb6a16f9af5e4778727855aa [file] [log] [blame]
Paul Mundt36ddf312006-01-16 22:14:17 -08001/*
2 * arch/sh/kernel/cpu/clock.c - SuperH clock framework
3 *
Paul Mundtdb62e5b2007-04-26 12:17:20 +09004 * Copyright (C) 2005, 2006, 2007 Paul Mundt
Paul Mundt36ddf312006-01-16 22:14:17 -08005 *
6 * This clock framework is derived from the OMAP version by:
7 *
Paul Mundt1d118562006-12-01 13:15:14 +09008 * Copyright (C) 2004 - 2005 Nokia Corporation
Paul Mundt36ddf312006-01-16 22:14:17 -08009 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10 *
Paul Mundt1d118562006-12-01 13:15:14 +090011 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
12 *
Paul Mundt36ddf312006-01-16 22:14:17 -080013 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
15 * for more details.
16 */
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/module.h>
Paul Mundt237b98f2006-09-27 17:28:20 +090020#include <linux/mutex.h>
Paul Mundt36ddf312006-01-16 22:14:17 -080021#include <linux/list.h>
22#include <linux/kref.h>
Francesco VIRLINZI4a550262009-03-11 07:42:05 +000023#include <linux/kobject.h>
24#include <linux/sysdev.h>
Paul Mundt36ddf312006-01-16 22:14:17 -080025#include <linux/seq_file.h>
26#include <linux/err.h>
Paul Mundt1d118562006-12-01 13:15:14 +090027#include <linux/platform_device.h>
Paul Mundtdb62e5b2007-04-26 12:17:20 +090028#include <linux/proc_fs.h>
Paul Mundt36ddf312006-01-16 22:14:17 -080029#include <asm/clock.h>
30#include <asm/timer.h>
31
32static LIST_HEAD(clock_list);
33static DEFINE_SPINLOCK(clock_lock);
Paul Mundt237b98f2006-09-27 17:28:20 +090034static DEFINE_MUTEX(clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -080035
36/*
37 * Each subtype is expected to define the init routines for these clocks,
38 * as each subtype (or processor family) will have these clocks at the
39 * very least. These are all provided through the CPG, which even some of
40 * the more quirky parts (such as ST40, SH4-202, etc.) still have.
41 *
42 * The processor-specific code is expected to register any additional
43 * clock sources that are of interest.
44 */
45static struct clk master_clk = {
46 .name = "master_clk",
47 .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES,
Paul Mundt36ddf312006-01-16 22:14:17 -080048 .rate = CONFIG_SH_PCLK_FREQ,
Paul Mundt36ddf312006-01-16 22:14:17 -080049};
50
51static struct clk module_clk = {
52 .name = "module_clk",
53 .parent = &master_clk,
54 .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES,
55};
56
57static struct clk bus_clk = {
58 .name = "bus_clk",
59 .parent = &master_clk,
60 .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES,
61};
62
63static struct clk cpu_clk = {
64 .name = "cpu_clk",
65 .parent = &master_clk,
66 .flags = CLK_ALWAYS_ENABLED,
67};
68
69/*
70 * The ordering of these clocks matters, do not change it.
71 */
72static struct clk *onchip_clocks[] = {
73 &master_clk,
74 &module_clk,
75 &bus_clk,
76 &cpu_clk,
77};
78
79static void propagate_rate(struct clk *clk)
80{
81 struct clk *clkp;
82
83 list_for_each_entry(clkp, &clock_list, node) {
84 if (likely(clkp->parent != clk))
85 continue;
86 if (likely(clkp->ops && clkp->ops->recalc))
87 clkp->ops->recalc(clkp);
Stuart Menefy24eb17e2007-09-28 11:51:52 +090088 if (unlikely(clkp->flags & CLK_RATE_PROPAGATES))
89 propagate_rate(clkp);
Paul Mundt36ddf312006-01-16 22:14:17 -080090 }
91}
92
Adrian Bunk4c1cfab2008-06-18 03:36:50 +030093static int __clk_enable(struct clk *clk)
Paul Mundt36ddf312006-01-16 22:14:17 -080094{
95 /*
96 * See if this is the first time we're enabling the clock, some
97 * clocks that are always enabled still require "special"
98 * initialization. This is especially true if the clock mode
99 * changes and the clock needs to hunt for the proper set of
100 * divisors to use before it can effectively recalc.
101 */
Magnus Dammb3cacf32009-05-07 10:31:39 +0000102
103 if (clk->flags & CLK_ALWAYS_ENABLED) {
104 kref_get(&clk->kref);
105 return 0;
106 }
107
Paul Mundt36ddf312006-01-16 22:14:17 -0800108 if (unlikely(atomic_read(&clk->kref.refcount) == 1))
109 if (clk->ops && clk->ops->init)
110 clk->ops->init(clk);
111
dmitry pervushin1929cb32007-04-24 13:39:09 +0900112 kref_get(&clk->kref);
113
Paul Mundt36ddf312006-01-16 22:14:17 -0800114 if (likely(clk->ops && clk->ops->enable))
115 clk->ops->enable(clk);
116
Paul Mundt36ddf312006-01-16 22:14:17 -0800117 return 0;
118}
119
120int clk_enable(struct clk *clk)
121{
122 unsigned long flags;
123 int ret;
124
Magnus Dammd12cfac2008-10-31 20:13:32 +0900125 if (!clk)
126 return -EINVAL;
127
128 clk_enable(clk->parent);
129
Paul Mundt36ddf312006-01-16 22:14:17 -0800130 spin_lock_irqsave(&clock_lock, flags);
131 ret = __clk_enable(clk);
132 spin_unlock_irqrestore(&clock_lock, flags);
133
134 return ret;
135}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900136EXPORT_SYMBOL_GPL(clk_enable);
Paul Mundt36ddf312006-01-16 22:14:17 -0800137
138static void clk_kref_release(struct kref *kref)
139{
140 /* Nothing to do */
141}
142
Adrian Bunk4c1cfab2008-06-18 03:36:50 +0300143static void __clk_disable(struct clk *clk)
Paul Mundt36ddf312006-01-16 22:14:17 -0800144{
dmitry pervushin1929cb32007-04-24 13:39:09 +0900145 int count = kref_put(&clk->kref, clk_kref_release);
146
Paul Mundt36ddf312006-01-16 22:14:17 -0800147 if (clk->flags & CLK_ALWAYS_ENABLED)
148 return;
149
dmitry pervushin1929cb32007-04-24 13:39:09 +0900150 if (!count) { /* count reaches zero, disable the clock */
151 if (likely(clk->ops && clk->ops->disable))
152 clk->ops->disable(clk);
153 }
Paul Mundt36ddf312006-01-16 22:14:17 -0800154}
155
156void clk_disable(struct clk *clk)
157{
158 unsigned long flags;
159
Magnus Dammd12cfac2008-10-31 20:13:32 +0900160 if (!clk)
Paul Mundt00e825c2008-11-18 14:21:34 +0900161 return;
Magnus Dammd12cfac2008-10-31 20:13:32 +0900162
Paul Mundt36ddf312006-01-16 22:14:17 -0800163 spin_lock_irqsave(&clock_lock, flags);
164 __clk_disable(clk);
165 spin_unlock_irqrestore(&clock_lock, flags);
Magnus Dammd12cfac2008-10-31 20:13:32 +0900166
167 clk_disable(clk->parent);
Paul Mundt36ddf312006-01-16 22:14:17 -0800168}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900169EXPORT_SYMBOL_GPL(clk_disable);
Paul Mundt36ddf312006-01-16 22:14:17 -0800170
171int clk_register(struct clk *clk)
172{
Paul Mundt237b98f2006-09-27 17:28:20 +0900173 mutex_lock(&clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -0800174
175 list_add(&clk->node, &clock_list);
176 kref_init(&clk->kref);
177
Paul Mundt237b98f2006-09-27 17:28:20 +0900178 mutex_unlock(&clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -0800179
dmitry pervushin1929cb32007-04-24 13:39:09 +0900180 if (clk->flags & CLK_ALWAYS_ENABLED) {
181 pr_debug( "Clock '%s' is ALWAYS_ENABLED\n", clk->name);
182 if (clk->ops && clk->ops->init)
183 clk->ops->init(clk);
184 if (clk->ops && clk->ops->enable)
185 clk->ops->enable(clk);
186 pr_debug( "Enabled.");
187 }
188
Paul Mundt36ddf312006-01-16 22:14:17 -0800189 return 0;
190}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900191EXPORT_SYMBOL_GPL(clk_register);
Paul Mundt36ddf312006-01-16 22:14:17 -0800192
193void clk_unregister(struct clk *clk)
194{
Paul Mundt237b98f2006-09-27 17:28:20 +0900195 mutex_lock(&clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -0800196 list_del(&clk->node);
Paul Mundt237b98f2006-09-27 17:28:20 +0900197 mutex_unlock(&clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -0800198}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900199EXPORT_SYMBOL_GPL(clk_unregister);
Paul Mundt36ddf312006-01-16 22:14:17 -0800200
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900201unsigned long clk_get_rate(struct clk *clk)
Paul Mundt36ddf312006-01-16 22:14:17 -0800202{
203 return clk->rate;
204}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900205EXPORT_SYMBOL_GPL(clk_get_rate);
Paul Mundt36ddf312006-01-16 22:14:17 -0800206
207int clk_set_rate(struct clk *clk, unsigned long rate)
208{
dmitry pervushin1929cb32007-04-24 13:39:09 +0900209 return clk_set_rate_ex(clk, rate, 0);
210}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900211EXPORT_SYMBOL_GPL(clk_set_rate);
dmitry pervushin1929cb32007-04-24 13:39:09 +0900212
213int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
214{
Paul Mundt36ddf312006-01-16 22:14:17 -0800215 int ret = -EOPNOTSUPP;
216
217 if (likely(clk->ops && clk->ops->set_rate)) {
218 unsigned long flags;
219
220 spin_lock_irqsave(&clock_lock, flags);
dmitry pervushin1929cb32007-04-24 13:39:09 +0900221 ret = clk->ops->set_rate(clk, rate, algo_id);
Paul Mundt36ddf312006-01-16 22:14:17 -0800222 spin_unlock_irqrestore(&clock_lock, flags);
223 }
224
225 if (unlikely(clk->flags & CLK_RATE_PROPAGATES))
226 propagate_rate(clk);
227
228 return ret;
229}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900230EXPORT_SYMBOL_GPL(clk_set_rate_ex);
Paul Mundt36ddf312006-01-16 22:14:17 -0800231
232void clk_recalc_rate(struct clk *clk)
233{
234 if (likely(clk->ops && clk->ops->recalc)) {
235 unsigned long flags;
236
237 spin_lock_irqsave(&clock_lock, flags);
238 clk->ops->recalc(clk);
239 spin_unlock_irqrestore(&clock_lock, flags);
240 }
241
242 if (unlikely(clk->flags & CLK_RATE_PROPAGATES))
243 propagate_rate(clk);
244}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900245EXPORT_SYMBOL_GPL(clk_recalc_rate);
Paul Mundt36ddf312006-01-16 22:14:17 -0800246
Francesco VIRLINZId680c762009-03-11 07:40:54 +0000247int clk_set_parent(struct clk *clk, struct clk *parent)
248{
249 int ret = -EINVAL;
250 struct clk *old;
251
252 if (!parent || !clk)
253 return ret;
254
255 old = clk->parent;
256 if (likely(clk->ops && clk->ops->set_parent)) {
257 unsigned long flags;
258 spin_lock_irqsave(&clock_lock, flags);
259 ret = clk->ops->set_parent(clk, parent);
260 spin_unlock_irqrestore(&clock_lock, flags);
261 clk->parent = (ret ? old : parent);
262 }
263
264 if (unlikely(clk->flags & CLK_RATE_PROPAGATES))
265 propagate_rate(clk);
266 return ret;
267}
268EXPORT_SYMBOL_GPL(clk_set_parent);
269
270struct clk *clk_get_parent(struct clk *clk)
271{
272 return clk->parent;
273}
274EXPORT_SYMBOL_GPL(clk_get_parent);
275
Paul Mundtf6991b02007-07-20 13:29:09 +0900276long clk_round_rate(struct clk *clk, unsigned long rate)
277{
278 if (likely(clk->ops && clk->ops->round_rate)) {
279 unsigned long flags, rounded;
280
281 spin_lock_irqsave(&clock_lock, flags);
282 rounded = clk->ops->round_rate(clk, rate);
283 spin_unlock_irqrestore(&clock_lock, flags);
284
285 return rounded;
286 }
287
288 return clk_get_rate(clk);
289}
290EXPORT_SYMBOL_GPL(clk_round_rate);
291
Paul Mundt1d118562006-12-01 13:15:14 +0900292/*
293 * Returns a clock. Note that we first try to use device id on the bus
294 * and clock name. If this fails, we try to use clock name only.
295 */
296struct clk *clk_get(struct device *dev, const char *id)
Paul Mundt36ddf312006-01-16 22:14:17 -0800297{
298 struct clk *p, *clk = ERR_PTR(-ENOENT);
Paul Mundt1d118562006-12-01 13:15:14 +0900299 int idno;
300
301 if (dev == NULL || dev->bus != &platform_bus_type)
302 idno = -1;
303 else
304 idno = to_platform_device(dev)->id;
Paul Mundt36ddf312006-01-16 22:14:17 -0800305
Paul Mundt237b98f2006-09-27 17:28:20 +0900306 mutex_lock(&clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -0800307 list_for_each_entry(p, &clock_list, node) {
Paul Mundt1d118562006-12-01 13:15:14 +0900308 if (p->id == idno &&
309 strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
310 clk = p;
311 goto found;
312 }
313 }
314
315 list_for_each_entry(p, &clock_list, node) {
Paul Mundt36ddf312006-01-16 22:14:17 -0800316 if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
317 clk = p;
318 break;
319 }
320 }
Paul Mundt1d118562006-12-01 13:15:14 +0900321
322found:
Paul Mundt237b98f2006-09-27 17:28:20 +0900323 mutex_unlock(&clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -0800324
325 return clk;
326}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900327EXPORT_SYMBOL_GPL(clk_get);
Paul Mundt36ddf312006-01-16 22:14:17 -0800328
329void clk_put(struct clk *clk)
330{
331 if (clk && !IS_ERR(clk))
332 module_put(clk->owner);
333}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900334EXPORT_SYMBOL_GPL(clk_put);
Paul Mundt36ddf312006-01-16 22:14:17 -0800335
336void __init __attribute__ ((weak))
337arch_init_clk_ops(struct clk_ops **ops, int type)
338{
339}
340
Paul Mundtfa439722008-09-04 18:53:58 +0900341int __init __attribute__ ((weak))
dmitry pervushindfbbbe92007-05-15 08:42:22 +0900342arch_clk_init(void)
343{
Paul Mundtfa439722008-09-04 18:53:58 +0900344 return 0;
dmitry pervushindfbbbe92007-05-15 08:42:22 +0900345}
346
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900347static int show_clocks(char *buf, char **start, off_t off,
348 int len, int *eof, void *data)
349{
350 struct clk *clk;
351 char *p = buf;
352
353 list_for_each_entry_reverse(clk, &clock_list, node) {
354 unsigned long rate = clk_get_rate(clk);
355
Magnus Damm152fe362008-07-17 19:05:54 +0900356 p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name,
357 rate / 1000000, (rate % 1000000) / 10000,
358 ((clk->flags & CLK_ALWAYS_ENABLED) ||
359 (atomic_read(&clk->kref.refcount) != 1)) ?
360 "enabled" : "disabled");
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900361 }
362
363 return p - buf;
364}
365
Francesco VIRLINZI4a550262009-03-11 07:42:05 +0000366#ifdef CONFIG_PM
367static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
368{
369 static pm_message_t prev_state;
370 struct clk *clkp;
371
372 switch (state.event) {
373 case PM_EVENT_ON:
374 /* Resumeing from hibernation */
375 if (prev_state.event == PM_EVENT_FREEZE) {
376 list_for_each_entry(clkp, &clock_list, node)
377 if (likely(clkp->ops)) {
Francesco VIRLINZI50cca712009-03-13 08:08:01 +0000378 unsigned long rate = clkp->rate;
379
Francesco VIRLINZI4a550262009-03-11 07:42:05 +0000380 if (likely(clkp->ops->set_parent))
381 clkp->ops->set_parent(clkp,
382 clkp->parent);
383 if (likely(clkp->ops->set_rate))
384 clkp->ops->set_rate(clkp,
Francesco VIRLINZI50cca712009-03-13 08:08:01 +0000385 rate, NO_CHANGE);
Francesco VIRLINZI4a550262009-03-11 07:42:05 +0000386 else if (likely(clkp->ops->recalc))
387 clkp->ops->recalc(clkp);
388 }
389 }
390 break;
391 case PM_EVENT_FREEZE:
392 break;
393 case PM_EVENT_SUSPEND:
394 break;
395 }
396
397 prev_state = state;
398 return 0;
399}
400
401static int clks_sysdev_resume(struct sys_device *dev)
402{
403 return clks_sysdev_suspend(dev, PMSG_ON);
404}
405
406static struct sysdev_class clks_sysdev_class = {
407 .name = "clks",
408};
409
410static struct sysdev_driver clks_sysdev_driver = {
411 .suspend = clks_sysdev_suspend,
412 .resume = clks_sysdev_resume,
413};
414
415static struct sys_device clks_sysdev_dev = {
416 .cls = &clks_sysdev_class,
417};
418
419static int __init clk_sysdev_init(void)
420{
421 sysdev_class_register(&clks_sysdev_class);
422 sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
423 sysdev_register(&clks_sysdev_dev);
424
425 return 0;
426}
427subsys_initcall(clk_sysdev_init);
428#endif
429
Paul Mundt36ddf312006-01-16 22:14:17 -0800430int __init clk_init(void)
431{
432 int i, ret = 0;
433
Paul Mundte4c2cfe2006-09-27 12:31:01 +0900434 BUG_ON(!master_clk.rate);
Paul Mundt36ddf312006-01-16 22:14:17 -0800435
436 for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
437 struct clk *clk = onchip_clocks[i];
438
439 arch_init_clk_ops(&clk->ops, i);
440 ret |= clk_register(clk);
Paul Mundt36ddf312006-01-16 22:14:17 -0800441 }
442
Paul Mundtfa439722008-09-04 18:53:58 +0900443 ret |= arch_clk_init();
dmitry pervushindfbbbe92007-05-15 08:42:22 +0900444
Paul Mundt36ddf312006-01-16 22:14:17 -0800445 /* Kick the child clocks.. */
446 propagate_rate(&master_clk);
447 propagate_rate(&bus_clk);
448
449 return ret;
450}
451
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900452static int __init clk_proc_init(void)
Paul Mundt36ddf312006-01-16 22:14:17 -0800453{
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900454 struct proc_dir_entry *p;
455 p = create_proc_read_entry("clocks", S_IRUSR, NULL,
456 show_clocks, NULL);
457 if (unlikely(!p))
458 return -EINVAL;
Paul Mundt36ddf312006-01-16 22:14:17 -0800459
460 return 0;
461}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900462subsys_initcall(clk_proc_init);