blob: 033f4662b59d9fa2843a723661149f8302d76948 [file] [log] [blame]
Paul Mundt36ddf312006-01-16 22:14:17 -08001/*
2 * arch/sh/kernel/cpu/clock.c - SuperH clock framework
3 *
Paul Mundtb1f6cfe2009-05-12 04:27:43 +09004 * Copyright (C) 2005 - 2009 Paul Mundt
Paul Mundt36ddf312006-01-16 22:14:17 -08005 *
6 * This clock framework is derived from the OMAP version by:
7 *
Paul Mundtb1f6cfe2009-05-12 04:27:43 +09008 * Copyright (C) 2004 - 2008 Nokia Corporation
Paul Mundt36ddf312006-01-16 22:14:17 -08009 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10 *
Paul Mundt1d118562006-12-01 13:15:14 +090011 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
12 *
Paul Mundt0dae8952009-05-12 06:18:09 +090013 * With clkdev bits:
14 *
15 * Copyright (C) 2008 Russell King.
16 *
Paul Mundt36ddf312006-01-16 22:14:17 -080017 * This file is subject to the terms and conditions of the GNU General Public
18 * License. See the file "COPYING" in the main directory of this archive
19 * for more details.
20 */
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/module.h>
Paul Mundt237b98f2006-09-27 17:28:20 +090024#include <linux/mutex.h>
Paul Mundt36ddf312006-01-16 22:14:17 -080025#include <linux/list.h>
Francesco VIRLINZI4a550262009-03-11 07:42:05 +000026#include <linux/kobject.h>
27#include <linux/sysdev.h>
Paul Mundt36ddf312006-01-16 22:14:17 -080028#include <linux/seq_file.h>
29#include <linux/err.h>
Paul Mundt1d118562006-12-01 13:15:14 +090030#include <linux/platform_device.h>
Paul Mundtdb62e5b2007-04-26 12:17:20 +090031#include <linux/proc_fs.h>
Paul Mundt36ddf312006-01-16 22:14:17 -080032#include <asm/clock.h>
Paul Mundt253b0882009-05-13 17:38:11 +090033#include <asm/machvec.h>
Paul Mundt36ddf312006-01-16 22:14:17 -080034
35static LIST_HEAD(clock_list);
36static DEFINE_SPINLOCK(clock_lock);
Paul Mundt237b98f2006-09-27 17:28:20 +090037static DEFINE_MUTEX(clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -080038
Paul Mundta02cb232009-05-12 03:50:44 +090039/* Used for clocks that always have same value as the parent clock */
40unsigned long followparent_recalc(struct clk *clk)
41{
42 return clk->parent->rate;
43}
44
Paul Mundtaa87aa32009-05-12 05:51:05 +090045int clk_reparent(struct clk *child, struct clk *parent)
46{
47 list_del_init(&child->sibling);
48 if (parent)
49 list_add(&child->sibling, &parent->children);
50 child->parent = parent;
51
52 /* now do the debugfs renaming to reattach the child
53 to the proper parent */
54
55 return 0;
56}
57
Paul Mundtb1f6cfe2009-05-12 04:27:43 +090058/* Propagate rate to children */
59void propagate_rate(struct clk *tclk)
60{
61 struct clk *clkp;
62
63 list_for_each_entry(clkp, &tclk->children, sibling) {
Paul Mundtd672fef2009-05-13 17:03:09 +090064 if (clkp->ops && clkp->ops->recalc)
Paul Mundtb1f6cfe2009-05-12 04:27:43 +090065 clkp->rate = clkp->ops->recalc(clkp);
66 propagate_rate(clkp);
67 }
68}
69
Adrian Bunk4c1cfab2008-06-18 03:36:50 +030070static void __clk_disable(struct clk *clk)
Paul Mundt36ddf312006-01-16 22:14:17 -080071{
Paul Mundtae891a42009-05-12 05:30:10 +090072 if (clk->usecount == 0) {
73 printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
74 clk->name);
75 WARN_ON(1);
76 return;
77 }
78
79 if (!(--clk->usecount)) {
dmitry pervushin1929cb32007-04-24 13:39:09 +090080 if (likely(clk->ops && clk->ops->disable))
81 clk->ops->disable(clk);
Paul Mundt4ff29ff2009-05-12 05:14:53 +090082 if (likely(clk->parent))
83 __clk_disable(clk->parent);
dmitry pervushin1929cb32007-04-24 13:39:09 +090084 }
Paul Mundt36ddf312006-01-16 22:14:17 -080085}
86
87void clk_disable(struct clk *clk)
88{
89 unsigned long flags;
90
Paul Mundt4ff29ff2009-05-12 05:14:53 +090091 if (!clk)
92 return;
93
Paul Mundt36ddf312006-01-16 22:14:17 -080094 spin_lock_irqsave(&clock_lock, flags);
95 __clk_disable(clk);
96 spin_unlock_irqrestore(&clock_lock, flags);
97}
Paul Mundtdb62e5b2007-04-26 12:17:20 +090098EXPORT_SYMBOL_GPL(clk_disable);
Paul Mundt36ddf312006-01-16 22:14:17 -080099
Paul Mundtae891a42009-05-12 05:30:10 +0900100static int __clk_enable(struct clk *clk)
101{
102 int ret = 0;
103
104 if (clk->usecount++ == 0) {
105 if (clk->parent) {
106 ret = __clk_enable(clk->parent);
107 if (unlikely(ret))
108 goto err;
109 }
110
111 if (clk->ops && clk->ops->enable) {
112 ret = clk->ops->enable(clk);
113 if (ret) {
114 if (clk->parent)
115 __clk_disable(clk->parent);
116 goto err;
117 }
118 }
119 }
120
121 return ret;
122err:
123 clk->usecount--;
124 return ret;
125}
126
127int clk_enable(struct clk *clk)
128{
129 unsigned long flags;
130 int ret;
131
132 if (!clk)
133 return -EINVAL;
134
135 spin_lock_irqsave(&clock_lock, flags);
136 ret = __clk_enable(clk);
137 spin_unlock_irqrestore(&clock_lock, flags);
138
139 return ret;
140}
141EXPORT_SYMBOL_GPL(clk_enable);
142
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900143static LIST_HEAD(root_clks);
144
145/**
146 * recalculate_root_clocks - recalculate and propagate all root clocks
147 *
148 * Recalculates all root clocks (clocks with no parent), which if the
149 * clock's .recalc is set correctly, should also propagate their rates.
150 * Called at init.
151 */
152void recalculate_root_clocks(void)
153{
154 struct clk *clkp;
155
156 list_for_each_entry(clkp, &root_clks, sibling) {
Paul Mundtd672fef2009-05-13 17:03:09 +0900157 if (clkp->ops && clkp->ops->recalc)
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900158 clkp->rate = clkp->ops->recalc(clkp);
159 propagate_rate(clkp);
160 }
161}
162
Paul Mundt36ddf312006-01-16 22:14:17 -0800163int clk_register(struct clk *clk)
164{
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900165 if (clk == NULL || IS_ERR(clk))
166 return -EINVAL;
167
168 /*
169 * trap out already registered clocks
170 */
171 if (clk->node.next || clk->node.prev)
172 return 0;
173
Paul Mundt237b98f2006-09-27 17:28:20 +0900174 mutex_lock(&clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -0800175
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900176 INIT_LIST_HEAD(&clk->children);
Paul Mundt4ff29ff2009-05-12 05:14:53 +0900177 clk->usecount = 0;
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900178
179 if (clk->parent)
180 list_add(&clk->sibling, &clk->parent->children);
181 else
182 list_add(&clk->sibling, &root_clks);
183
Paul Mundt36ddf312006-01-16 22:14:17 -0800184 list_add(&clk->node, &clock_list);
Paul Mundtd672fef2009-05-13 17:03:09 +0900185 if (clk->ops && clk->ops->init)
Paul Mundt4ff29ff2009-05-12 05:14:53 +0900186 clk->ops->init(clk);
Paul Mundt237b98f2006-09-27 17:28:20 +0900187 mutex_unlock(&clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -0800188
189 return 0;
190}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900191EXPORT_SYMBOL_GPL(clk_register);
Paul Mundt36ddf312006-01-16 22:14:17 -0800192
193void clk_unregister(struct clk *clk)
194{
Paul Mundt237b98f2006-09-27 17:28:20 +0900195 mutex_lock(&clock_list_sem);
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900196 list_del(&clk->sibling);
Paul Mundt36ddf312006-01-16 22:14:17 -0800197 list_del(&clk->node);
Paul Mundt237b98f2006-09-27 17:28:20 +0900198 mutex_unlock(&clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -0800199}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900200EXPORT_SYMBOL_GPL(clk_unregister);
Paul Mundt36ddf312006-01-16 22:14:17 -0800201
Paul Mundt4ff29ff2009-05-12 05:14:53 +0900202static void clk_enable_init_clocks(void)
203{
204 struct clk *clkp;
205
206 list_for_each_entry(clkp, &clock_list, node)
207 if (clkp->flags & CLK_ENABLE_ON_INIT)
208 clk_enable(clkp);
209}
210
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900211unsigned long clk_get_rate(struct clk *clk)
Paul Mundt36ddf312006-01-16 22:14:17 -0800212{
213 return clk->rate;
214}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900215EXPORT_SYMBOL_GPL(clk_get_rate);
Paul Mundt36ddf312006-01-16 22:14:17 -0800216
217int clk_set_rate(struct clk *clk, unsigned long rate)
218{
dmitry pervushin1929cb32007-04-24 13:39:09 +0900219 return clk_set_rate_ex(clk, rate, 0);
220}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900221EXPORT_SYMBOL_GPL(clk_set_rate);
dmitry pervushin1929cb32007-04-24 13:39:09 +0900222
223int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
224{
Paul Mundt36ddf312006-01-16 22:14:17 -0800225 int ret = -EOPNOTSUPP;
Paul Mundt100890c2009-05-13 17:05:51 +0900226 unsigned long flags;
227
228 spin_lock_irqsave(&clock_lock, flags);
Paul Mundt36ddf312006-01-16 22:14:17 -0800229
230 if (likely(clk->ops && clk->ops->set_rate)) {
dmitry pervushin1929cb32007-04-24 13:39:09 +0900231 ret = clk->ops->set_rate(clk, rate, algo_id);
Paul Mundt100890c2009-05-13 17:05:51 +0900232 if (ret != 0)
233 goto out_unlock;
234 } else {
235 clk->rate = rate;
236 ret = 0;
Paul Mundt36ddf312006-01-16 22:14:17 -0800237 }
238
Paul Mundt100890c2009-05-13 17:05:51 +0900239 if (clk->ops && clk->ops->recalc)
240 clk->rate = clk->ops->recalc(clk);
241
242 propagate_rate(clk);
243
244out_unlock:
245 spin_unlock_irqrestore(&clock_lock, flags);
246
Paul Mundt36ddf312006-01-16 22:14:17 -0800247 return ret;
248}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900249EXPORT_SYMBOL_GPL(clk_set_rate_ex);
Paul Mundt36ddf312006-01-16 22:14:17 -0800250
Francesco VIRLINZId680c762009-03-11 07:40:54 +0000251int clk_set_parent(struct clk *clk, struct clk *parent)
252{
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900253 unsigned long flags;
Francesco VIRLINZId680c762009-03-11 07:40:54 +0000254 int ret = -EINVAL;
Francesco VIRLINZId680c762009-03-11 07:40:54 +0000255
256 if (!parent || !clk)
257 return ret;
Paul Mundtaa87aa32009-05-12 05:51:05 +0900258 if (clk->parent == parent)
259 return 0;
Francesco VIRLINZId680c762009-03-11 07:40:54 +0000260
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900261 spin_lock_irqsave(&clock_lock, flags);
262 if (clk->usecount == 0) {
263 if (clk->ops->set_parent)
264 ret = clk->ops->set_parent(clk, parent);
Paul Mundtaa87aa32009-05-12 05:51:05 +0900265 else
266 ret = clk_reparent(clk, parent);
267
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900268 if (ret == 0) {
Paul Mundtaa87aa32009-05-12 05:51:05 +0900269 pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
270 clk->name, clk->parent->name, clk->rate);
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900271 if (clk->ops->recalc)
272 clk->rate = clk->ops->recalc(clk);
273 propagate_rate(clk);
274 }
275 } else
276 ret = -EBUSY;
277 spin_unlock_irqrestore(&clock_lock, flags);
Francesco VIRLINZId680c762009-03-11 07:40:54 +0000278
Francesco VIRLINZId680c762009-03-11 07:40:54 +0000279 return ret;
280}
281EXPORT_SYMBOL_GPL(clk_set_parent);
282
283struct clk *clk_get_parent(struct clk *clk)
284{
285 return clk->parent;
286}
287EXPORT_SYMBOL_GPL(clk_get_parent);
288
Paul Mundtf6991b02007-07-20 13:29:09 +0900289long clk_round_rate(struct clk *clk, unsigned long rate)
290{
291 if (likely(clk->ops && clk->ops->round_rate)) {
292 unsigned long flags, rounded;
293
294 spin_lock_irqsave(&clock_lock, flags);
295 rounded = clk->ops->round_rate(clk, rate);
296 spin_unlock_irqrestore(&clock_lock, flags);
297
298 return rounded;
299 }
300
301 return clk_get_rate(clk);
302}
303EXPORT_SYMBOL_GPL(clk_round_rate);
304
Paul Mundt1d118562006-12-01 13:15:14 +0900305/*
Paul Mundt0dae8952009-05-12 06:18:09 +0900306 * Find the correct struct clk for the device and connection ID.
307 * We do slightly fuzzy matching here:
308 * An entry with a NULL ID is assumed to be a wildcard.
309 * If an entry has a device ID, it must match
310 * If an entry has a connection ID, it must match
311 * Then we take the most specific entry - with the following
312 * order of precidence: dev+con > dev only > con only.
313 */
314static struct clk *clk_find(const char *dev_id, const char *con_id)
315{
316 struct clk_lookup *p;
317 struct clk *clk = NULL;
318 int match, best = 0;
319
320 list_for_each_entry(p, &clock_list, node) {
321 match = 0;
322 if (p->dev_id) {
323 if (!dev_id || strcmp(p->dev_id, dev_id))
324 continue;
325 match += 2;
326 }
327 if (p->con_id) {
328 if (!con_id || strcmp(p->con_id, con_id))
329 continue;
330 match += 1;
331 }
332 if (match == 0)
333 continue;
334
335 if (match > best) {
336 clk = p->clk;
337 best = match;
338 }
339 }
340 return clk;
341}
342
343struct clk *clk_get_sys(const char *dev_id, const char *con_id)
344{
345 struct clk *clk;
346
347 mutex_lock(&clock_list_sem);
348 clk = clk_find(dev_id, con_id);
349 mutex_unlock(&clock_list_sem);
350
351 return clk ? clk : ERR_PTR(-ENOENT);
352}
353EXPORT_SYMBOL_GPL(clk_get_sys);
354
355/*
Paul Mundt1d118562006-12-01 13:15:14 +0900356 * Returns a clock. Note that we first try to use device id on the bus
357 * and clock name. If this fails, we try to use clock name only.
358 */
359struct clk *clk_get(struct device *dev, const char *id)
Paul Mundt36ddf312006-01-16 22:14:17 -0800360{
Paul Mundt0dae8952009-05-12 06:18:09 +0900361 const char *dev_id = dev ? dev_name(dev) : NULL;
Paul Mundt36ddf312006-01-16 22:14:17 -0800362 struct clk *p, *clk = ERR_PTR(-ENOENT);
Paul Mundt1d118562006-12-01 13:15:14 +0900363 int idno;
364
Paul Mundt0dae8952009-05-12 06:18:09 +0900365 clk = clk_get_sys(dev_id, id);
Paul Mundtf3f82902009-05-12 16:07:40 +0900366 if (clk && !IS_ERR(clk))
Paul Mundt0dae8952009-05-12 06:18:09 +0900367 return clk;
368
Paul Mundt1d118562006-12-01 13:15:14 +0900369 if (dev == NULL || dev->bus != &platform_bus_type)
370 idno = -1;
371 else
372 idno = to_platform_device(dev)->id;
Paul Mundt36ddf312006-01-16 22:14:17 -0800373
Paul Mundt237b98f2006-09-27 17:28:20 +0900374 mutex_lock(&clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -0800375 list_for_each_entry(p, &clock_list, node) {
Paul Mundt1d118562006-12-01 13:15:14 +0900376 if (p->id == idno &&
377 strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
378 clk = p;
379 goto found;
380 }
381 }
382
383 list_for_each_entry(p, &clock_list, node) {
Paul Mundt36ddf312006-01-16 22:14:17 -0800384 if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
385 clk = p;
386 break;
387 }
388 }
Paul Mundt1d118562006-12-01 13:15:14 +0900389
390found:
Paul Mundt237b98f2006-09-27 17:28:20 +0900391 mutex_unlock(&clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -0800392
393 return clk;
394}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900395EXPORT_SYMBOL_GPL(clk_get);
Paul Mundt36ddf312006-01-16 22:14:17 -0800396
397void clk_put(struct clk *clk)
398{
399 if (clk && !IS_ERR(clk))
400 module_put(clk->owner);
401}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900402EXPORT_SYMBOL_GPL(clk_put);
Paul Mundt36ddf312006-01-16 22:14:17 -0800403
dmitry pervushindfbbbe92007-05-15 08:42:22 +0900404
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900405static int show_clocks(char *buf, char **start, off_t off,
406 int len, int *eof, void *data)
407{
408 struct clk *clk;
409 char *p = buf;
410
411 list_for_each_entry_reverse(clk, &clock_list, node) {
412 unsigned long rate = clk_get_rate(clk);
413
Magnus Damm152fe362008-07-17 19:05:54 +0900414 p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name,
415 rate / 1000000, (rate % 1000000) / 10000,
Paul Mundt4ff29ff2009-05-12 05:14:53 +0900416 (clk->usecount > 0) ? "enabled" : "disabled");
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900417 }
418
419 return p - buf;
420}
421
Francesco VIRLINZI4a550262009-03-11 07:42:05 +0000422#ifdef CONFIG_PM
423static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
424{
425 static pm_message_t prev_state;
426 struct clk *clkp;
427
428 switch (state.event) {
429 case PM_EVENT_ON:
430 /* Resumeing from hibernation */
Paul Mundtb68d8202009-05-12 03:45:08 +0900431 if (prev_state.event != PM_EVENT_FREEZE)
432 break;
Francesco VIRLINZI50cca712009-03-13 08:08:01 +0000433
Paul Mundtb68d8202009-05-12 03:45:08 +0900434 list_for_each_entry(clkp, &clock_list, node) {
435 if (likely(clkp->ops)) {
436 unsigned long rate = clkp->rate;
437
438 if (likely(clkp->ops->set_parent))
439 clkp->ops->set_parent(clkp,
440 clkp->parent);
441 if (likely(clkp->ops->set_rate))
442 clkp->ops->set_rate(clkp,
443 rate, NO_CHANGE);
444 else if (likely(clkp->ops->recalc))
445 clkp->rate = clkp->ops->recalc(clkp);
446 }
Francesco VIRLINZI4a550262009-03-11 07:42:05 +0000447 }
448 break;
449 case PM_EVENT_FREEZE:
450 break;
451 case PM_EVENT_SUSPEND:
452 break;
453 }
454
455 prev_state = state;
456 return 0;
457}
458
459static int clks_sysdev_resume(struct sys_device *dev)
460{
461 return clks_sysdev_suspend(dev, PMSG_ON);
462}
463
464static struct sysdev_class clks_sysdev_class = {
465 .name = "clks",
466};
467
468static struct sysdev_driver clks_sysdev_driver = {
469 .suspend = clks_sysdev_suspend,
470 .resume = clks_sysdev_resume,
471};
472
473static struct sys_device clks_sysdev_dev = {
474 .cls = &clks_sysdev_class,
475};
476
477static int __init clk_sysdev_init(void)
478{
479 sysdev_class_register(&clks_sysdev_class);
480 sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
481 sysdev_register(&clks_sysdev_dev);
482
483 return 0;
484}
485subsys_initcall(clk_sysdev_init);
486#endif
487
Paul Mundt36ddf312006-01-16 22:14:17 -0800488int __init clk_init(void)
489{
Paul Mundt253b0882009-05-13 17:38:11 +0900490 int ret;
Paul Mundt36ddf312006-01-16 22:14:17 -0800491
Paul Mundt253b0882009-05-13 17:38:11 +0900492 ret = arch_clk_init();
493 if (unlikely(ret)) {
494 pr_err("%s: CPU clock registration failed.\n", __func__);
495 return ret;
Paul Mundt36ddf312006-01-16 22:14:17 -0800496 }
497
Paul Mundt253b0882009-05-13 17:38:11 +0900498 if (sh_mv.mv_clk_init) {
499 ret = sh_mv.mv_clk_init();
500 if (unlikely(ret)) {
501 pr_err("%s: machvec clock initialization failed.\n",
502 __func__);
503 return ret;
504 }
505 }
dmitry pervushindfbbbe92007-05-15 08:42:22 +0900506
Paul Mundt36ddf312006-01-16 22:14:17 -0800507 /* Kick the child clocks.. */
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900508 recalculate_root_clocks();
Paul Mundt36ddf312006-01-16 22:14:17 -0800509
Paul Mundt4ff29ff2009-05-12 05:14:53 +0900510 /* Enable the necessary init clocks */
511 clk_enable_init_clocks();
512
Paul Mundt36ddf312006-01-16 22:14:17 -0800513 return ret;
514}
515
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900516static int __init clk_proc_init(void)
Paul Mundt36ddf312006-01-16 22:14:17 -0800517{
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900518 struct proc_dir_entry *p;
519 p = create_proc_read_entry("clocks", S_IRUSR, NULL,
520 show_clocks, NULL);
521 if (unlikely(!p))
522 return -EINVAL;
Paul Mundt36ddf312006-01-16 22:14:17 -0800523
524 return 0;
525}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900526subsys_initcall(clk_proc_init);