blob: 0eedf93926471a8a180b70d485ed9c3ffb2f7854 [file] [log] [blame]
Paul Mundt36ddf312006-01-16 22:14:17 -08001/*
2 * arch/sh/kernel/cpu/clock.c - SuperH clock framework
3 *
Paul Mundtb1f6cfe2009-05-12 04:27:43 +09004 * Copyright (C) 2005 - 2009 Paul Mundt
Paul Mundt36ddf312006-01-16 22:14:17 -08005 *
6 * This clock framework is derived from the OMAP version by:
7 *
Paul Mundtb1f6cfe2009-05-12 04:27:43 +09008 * Copyright (C) 2004 - 2008 Nokia Corporation
Paul Mundt36ddf312006-01-16 22:14:17 -08009 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10 *
Paul Mundt1d118562006-12-01 13:15:14 +090011 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
12 *
Paul Mundt0dae8952009-05-12 06:18:09 +090013 * With clkdev bits:
14 *
15 * Copyright (C) 2008 Russell King.
16 *
Paul Mundt36ddf312006-01-16 22:14:17 -080017 * This file is subject to the terms and conditions of the GNU General Public
18 * License. See the file "COPYING" in the main directory of this archive
19 * for more details.
20 */
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/module.h>
Paul Mundt237b98f2006-09-27 17:28:20 +090024#include <linux/mutex.h>
Paul Mundt36ddf312006-01-16 22:14:17 -080025#include <linux/list.h>
Francesco VIRLINZI4a550262009-03-11 07:42:05 +000026#include <linux/kobject.h>
27#include <linux/sysdev.h>
Paul Mundt36ddf312006-01-16 22:14:17 -080028#include <linux/seq_file.h>
29#include <linux/err.h>
Paul Mundt1d118562006-12-01 13:15:14 +090030#include <linux/platform_device.h>
Paul Mundtdb62e5b2007-04-26 12:17:20 +090031#include <linux/proc_fs.h>
Paul Mundt36ddf312006-01-16 22:14:17 -080032#include <asm/clock.h>
Paul Mundt36ddf312006-01-16 22:14:17 -080033
34static LIST_HEAD(clock_list);
35static DEFINE_SPINLOCK(clock_lock);
Paul Mundt237b98f2006-09-27 17:28:20 +090036static DEFINE_MUTEX(clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -080037
38/*
39 * Each subtype is expected to define the init routines for these clocks,
40 * as each subtype (or processor family) will have these clocks at the
41 * very least. These are all provided through the CPG, which even some of
42 * the more quirky parts (such as ST40, SH4-202, etc.) still have.
43 *
44 * The processor-specific code is expected to register any additional
45 * clock sources that are of interest.
46 */
47static struct clk master_clk = {
48 .name = "master_clk",
Paul Mundt4ff29ff2009-05-12 05:14:53 +090049 .flags = CLK_ENABLE_ON_INIT,
Paul Mundt36ddf312006-01-16 22:14:17 -080050 .rate = CONFIG_SH_PCLK_FREQ,
Paul Mundt36ddf312006-01-16 22:14:17 -080051};
52
Paul Mundtaf777ce2009-05-13 16:59:40 +090053static struct clk peripheral_clk = {
54 .name = "peripheral_clk",
Paul Mundt36ddf312006-01-16 22:14:17 -080055 .parent = &master_clk,
Paul Mundt4ff29ff2009-05-12 05:14:53 +090056 .flags = CLK_ENABLE_ON_INIT,
Paul Mundt36ddf312006-01-16 22:14:17 -080057};
58
59static struct clk bus_clk = {
60 .name = "bus_clk",
61 .parent = &master_clk,
Paul Mundt4ff29ff2009-05-12 05:14:53 +090062 .flags = CLK_ENABLE_ON_INIT,
Paul Mundt36ddf312006-01-16 22:14:17 -080063};
64
65static struct clk cpu_clk = {
66 .name = "cpu_clk",
67 .parent = &master_clk,
Paul Mundt4ff29ff2009-05-12 05:14:53 +090068 .flags = CLK_ENABLE_ON_INIT,
Paul Mundt36ddf312006-01-16 22:14:17 -080069};
70
71/*
72 * The ordering of these clocks matters, do not change it.
73 */
74static struct clk *onchip_clocks[] = {
75 &master_clk,
Paul Mundtaf777ce2009-05-13 16:59:40 +090076 &peripheral_clk,
Paul Mundt36ddf312006-01-16 22:14:17 -080077 &bus_clk,
78 &cpu_clk,
79};
80
Paul Mundta02cb232009-05-12 03:50:44 +090081/* Used for clocks that always have same value as the parent clock */
82unsigned long followparent_recalc(struct clk *clk)
83{
84 return clk->parent->rate;
85}
86
Paul Mundtaa87aa32009-05-12 05:51:05 +090087int clk_reparent(struct clk *child, struct clk *parent)
88{
89 list_del_init(&child->sibling);
90 if (parent)
91 list_add(&child->sibling, &parent->children);
92 child->parent = parent;
93
94 /* now do the debugfs renaming to reattach the child
95 to the proper parent */
96
97 return 0;
98}
99
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900100/* Propagate rate to children */
101void propagate_rate(struct clk *tclk)
102{
103 struct clk *clkp;
104
105 list_for_each_entry(clkp, &tclk->children, sibling) {
106 if (clkp->ops->recalc)
107 clkp->rate = clkp->ops->recalc(clkp);
108 propagate_rate(clkp);
109 }
110}
111
Adrian Bunk4c1cfab2008-06-18 03:36:50 +0300112static void __clk_disable(struct clk *clk)
Paul Mundt36ddf312006-01-16 22:14:17 -0800113{
Paul Mundtae891a42009-05-12 05:30:10 +0900114 if (clk->usecount == 0) {
115 printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
116 clk->name);
117 WARN_ON(1);
118 return;
119 }
120
121 if (!(--clk->usecount)) {
dmitry pervushin1929cb32007-04-24 13:39:09 +0900122 if (likely(clk->ops && clk->ops->disable))
123 clk->ops->disable(clk);
Paul Mundt4ff29ff2009-05-12 05:14:53 +0900124 if (likely(clk->parent))
125 __clk_disable(clk->parent);
dmitry pervushin1929cb32007-04-24 13:39:09 +0900126 }
Paul Mundt36ddf312006-01-16 22:14:17 -0800127}
128
129void clk_disable(struct clk *clk)
130{
131 unsigned long flags;
132
Paul Mundt4ff29ff2009-05-12 05:14:53 +0900133 if (!clk)
134 return;
135
Paul Mundt36ddf312006-01-16 22:14:17 -0800136 spin_lock_irqsave(&clock_lock, flags);
137 __clk_disable(clk);
138 spin_unlock_irqrestore(&clock_lock, flags);
139}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900140EXPORT_SYMBOL_GPL(clk_disable);
Paul Mundt36ddf312006-01-16 22:14:17 -0800141
Paul Mundtae891a42009-05-12 05:30:10 +0900142static int __clk_enable(struct clk *clk)
143{
144 int ret = 0;
145
146 if (clk->usecount++ == 0) {
147 if (clk->parent) {
148 ret = __clk_enable(clk->parent);
149 if (unlikely(ret))
150 goto err;
151 }
152
153 if (clk->ops && clk->ops->enable) {
154 ret = clk->ops->enable(clk);
155 if (ret) {
156 if (clk->parent)
157 __clk_disable(clk->parent);
158 goto err;
159 }
160 }
161 }
162
163 return ret;
164err:
165 clk->usecount--;
166 return ret;
167}
168
169int clk_enable(struct clk *clk)
170{
171 unsigned long flags;
172 int ret;
173
174 if (!clk)
175 return -EINVAL;
176
177 spin_lock_irqsave(&clock_lock, flags);
178 ret = __clk_enable(clk);
179 spin_unlock_irqrestore(&clock_lock, flags);
180
181 return ret;
182}
183EXPORT_SYMBOL_GPL(clk_enable);
184
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900185static LIST_HEAD(root_clks);
186
187/**
188 * recalculate_root_clocks - recalculate and propagate all root clocks
189 *
190 * Recalculates all root clocks (clocks with no parent), which if the
191 * clock's .recalc is set correctly, should also propagate their rates.
192 * Called at init.
193 */
194void recalculate_root_clocks(void)
195{
196 struct clk *clkp;
197
198 list_for_each_entry(clkp, &root_clks, sibling) {
199 if (clkp->ops->recalc)
200 clkp->rate = clkp->ops->recalc(clkp);
201 propagate_rate(clkp);
202 }
203}
204
Paul Mundt36ddf312006-01-16 22:14:17 -0800205int clk_register(struct clk *clk)
206{
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900207 if (clk == NULL || IS_ERR(clk))
208 return -EINVAL;
209
210 /*
211 * trap out already registered clocks
212 */
213 if (clk->node.next || clk->node.prev)
214 return 0;
215
Paul Mundt237b98f2006-09-27 17:28:20 +0900216 mutex_lock(&clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -0800217
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900218 INIT_LIST_HEAD(&clk->children);
Paul Mundt4ff29ff2009-05-12 05:14:53 +0900219 clk->usecount = 0;
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900220
221 if (clk->parent)
222 list_add(&clk->sibling, &clk->parent->children);
223 else
224 list_add(&clk->sibling, &root_clks);
225
Paul Mundt36ddf312006-01-16 22:14:17 -0800226 list_add(&clk->node, &clock_list);
Paul Mundt4ff29ff2009-05-12 05:14:53 +0900227 if (clk->ops->init)
228 clk->ops->init(clk);
Paul Mundt237b98f2006-09-27 17:28:20 +0900229 mutex_unlock(&clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -0800230
231 return 0;
232}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900233EXPORT_SYMBOL_GPL(clk_register);
Paul Mundt36ddf312006-01-16 22:14:17 -0800234
235void clk_unregister(struct clk *clk)
236{
Paul Mundt237b98f2006-09-27 17:28:20 +0900237 mutex_lock(&clock_list_sem);
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900238 list_del(&clk->sibling);
Paul Mundt36ddf312006-01-16 22:14:17 -0800239 list_del(&clk->node);
Paul Mundt237b98f2006-09-27 17:28:20 +0900240 mutex_unlock(&clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -0800241}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900242EXPORT_SYMBOL_GPL(clk_unregister);
Paul Mundt36ddf312006-01-16 22:14:17 -0800243
Paul Mundt4ff29ff2009-05-12 05:14:53 +0900244static void clk_enable_init_clocks(void)
245{
246 struct clk *clkp;
247
248 list_for_each_entry(clkp, &clock_list, node)
249 if (clkp->flags & CLK_ENABLE_ON_INIT)
250 clk_enable(clkp);
251}
252
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900253unsigned long clk_get_rate(struct clk *clk)
Paul Mundt36ddf312006-01-16 22:14:17 -0800254{
255 return clk->rate;
256}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900257EXPORT_SYMBOL_GPL(clk_get_rate);
Paul Mundt36ddf312006-01-16 22:14:17 -0800258
259int clk_set_rate(struct clk *clk, unsigned long rate)
260{
dmitry pervushin1929cb32007-04-24 13:39:09 +0900261 return clk_set_rate_ex(clk, rate, 0);
262}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900263EXPORT_SYMBOL_GPL(clk_set_rate);
dmitry pervushin1929cb32007-04-24 13:39:09 +0900264
265int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
266{
Paul Mundt36ddf312006-01-16 22:14:17 -0800267 int ret = -EOPNOTSUPP;
268
269 if (likely(clk->ops && clk->ops->set_rate)) {
270 unsigned long flags;
271
272 spin_lock_irqsave(&clock_lock, flags);
dmitry pervushin1929cb32007-04-24 13:39:09 +0900273 ret = clk->ops->set_rate(clk, rate, algo_id);
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900274 if (ret == 0) {
275 if (clk->ops->recalc)
276 clk->rate = clk->ops->recalc(clk);
277 propagate_rate(clk);
278 }
Paul Mundt36ddf312006-01-16 22:14:17 -0800279 spin_unlock_irqrestore(&clock_lock, flags);
280 }
281
Paul Mundt36ddf312006-01-16 22:14:17 -0800282 return ret;
283}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900284EXPORT_SYMBOL_GPL(clk_set_rate_ex);
Paul Mundt36ddf312006-01-16 22:14:17 -0800285
Francesco VIRLINZId680c762009-03-11 07:40:54 +0000286int clk_set_parent(struct clk *clk, struct clk *parent)
287{
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900288 unsigned long flags;
Francesco VIRLINZId680c762009-03-11 07:40:54 +0000289 int ret = -EINVAL;
Francesco VIRLINZId680c762009-03-11 07:40:54 +0000290
291 if (!parent || !clk)
292 return ret;
Paul Mundtaa87aa32009-05-12 05:51:05 +0900293 if (clk->parent == parent)
294 return 0;
Francesco VIRLINZId680c762009-03-11 07:40:54 +0000295
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900296 spin_lock_irqsave(&clock_lock, flags);
297 if (clk->usecount == 0) {
298 if (clk->ops->set_parent)
299 ret = clk->ops->set_parent(clk, parent);
Paul Mundtaa87aa32009-05-12 05:51:05 +0900300 else
301 ret = clk_reparent(clk, parent);
302
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900303 if (ret == 0) {
Paul Mundtaa87aa32009-05-12 05:51:05 +0900304 pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
305 clk->name, clk->parent->name, clk->rate);
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900306 if (clk->ops->recalc)
307 clk->rate = clk->ops->recalc(clk);
308 propagate_rate(clk);
309 }
310 } else
311 ret = -EBUSY;
312 spin_unlock_irqrestore(&clock_lock, flags);
Francesco VIRLINZId680c762009-03-11 07:40:54 +0000313
Francesco VIRLINZId680c762009-03-11 07:40:54 +0000314 return ret;
315}
316EXPORT_SYMBOL_GPL(clk_set_parent);
317
318struct clk *clk_get_parent(struct clk *clk)
319{
320 return clk->parent;
321}
322EXPORT_SYMBOL_GPL(clk_get_parent);
323
Paul Mundtf6991b02007-07-20 13:29:09 +0900324long clk_round_rate(struct clk *clk, unsigned long rate)
325{
326 if (likely(clk->ops && clk->ops->round_rate)) {
327 unsigned long flags, rounded;
328
329 spin_lock_irqsave(&clock_lock, flags);
330 rounded = clk->ops->round_rate(clk, rate);
331 spin_unlock_irqrestore(&clock_lock, flags);
332
333 return rounded;
334 }
335
336 return clk_get_rate(clk);
337}
338EXPORT_SYMBOL_GPL(clk_round_rate);
339
Paul Mundt1d118562006-12-01 13:15:14 +0900340/*
Paul Mundt0dae8952009-05-12 06:18:09 +0900341 * Find the correct struct clk for the device and connection ID.
342 * We do slightly fuzzy matching here:
343 * An entry with a NULL ID is assumed to be a wildcard.
344 * If an entry has a device ID, it must match
345 * If an entry has a connection ID, it must match
346 * Then we take the most specific entry - with the following
347 * order of precidence: dev+con > dev only > con only.
348 */
349static struct clk *clk_find(const char *dev_id, const char *con_id)
350{
351 struct clk_lookup *p;
352 struct clk *clk = NULL;
353 int match, best = 0;
354
355 list_for_each_entry(p, &clock_list, node) {
356 match = 0;
357 if (p->dev_id) {
358 if (!dev_id || strcmp(p->dev_id, dev_id))
359 continue;
360 match += 2;
361 }
362 if (p->con_id) {
363 if (!con_id || strcmp(p->con_id, con_id))
364 continue;
365 match += 1;
366 }
367 if (match == 0)
368 continue;
369
370 if (match > best) {
371 clk = p->clk;
372 best = match;
373 }
374 }
375 return clk;
376}
377
378struct clk *clk_get_sys(const char *dev_id, const char *con_id)
379{
380 struct clk *clk;
381
382 mutex_lock(&clock_list_sem);
383 clk = clk_find(dev_id, con_id);
384 mutex_unlock(&clock_list_sem);
385
386 return clk ? clk : ERR_PTR(-ENOENT);
387}
388EXPORT_SYMBOL_GPL(clk_get_sys);
389
390/*
Paul Mundt1d118562006-12-01 13:15:14 +0900391 * Returns a clock. Note that we first try to use device id on the bus
392 * and clock name. If this fails, we try to use clock name only.
393 */
394struct clk *clk_get(struct device *dev, const char *id)
Paul Mundt36ddf312006-01-16 22:14:17 -0800395{
Paul Mundt0dae8952009-05-12 06:18:09 +0900396 const char *dev_id = dev ? dev_name(dev) : NULL;
Paul Mundt36ddf312006-01-16 22:14:17 -0800397 struct clk *p, *clk = ERR_PTR(-ENOENT);
Paul Mundt1d118562006-12-01 13:15:14 +0900398 int idno;
399
Paul Mundt0dae8952009-05-12 06:18:09 +0900400 clk = clk_get_sys(dev_id, id);
Paul Mundtf3f82902009-05-12 16:07:40 +0900401 if (clk && !IS_ERR(clk))
Paul Mundt0dae8952009-05-12 06:18:09 +0900402 return clk;
403
Paul Mundt1d118562006-12-01 13:15:14 +0900404 if (dev == NULL || dev->bus != &platform_bus_type)
405 idno = -1;
406 else
407 idno = to_platform_device(dev)->id;
Paul Mundt36ddf312006-01-16 22:14:17 -0800408
Paul Mundt237b98f2006-09-27 17:28:20 +0900409 mutex_lock(&clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -0800410 list_for_each_entry(p, &clock_list, node) {
Paul Mundt1d118562006-12-01 13:15:14 +0900411 if (p->id == idno &&
412 strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
413 clk = p;
414 goto found;
415 }
416 }
417
418 list_for_each_entry(p, &clock_list, node) {
Paul Mundt36ddf312006-01-16 22:14:17 -0800419 if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
420 clk = p;
421 break;
422 }
423 }
Paul Mundt1d118562006-12-01 13:15:14 +0900424
425found:
Paul Mundt237b98f2006-09-27 17:28:20 +0900426 mutex_unlock(&clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -0800427
428 return clk;
429}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900430EXPORT_SYMBOL_GPL(clk_get);
Paul Mundt36ddf312006-01-16 22:14:17 -0800431
432void clk_put(struct clk *clk)
433{
434 if (clk && !IS_ERR(clk))
435 module_put(clk->owner);
436}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900437EXPORT_SYMBOL_GPL(clk_put);
Paul Mundt36ddf312006-01-16 22:14:17 -0800438
Paul Mundt9fe5ee02009-05-12 19:29:04 +0900439int __init __weak arch_clk_init(void)
dmitry pervushindfbbbe92007-05-15 08:42:22 +0900440{
Paul Mundtfa439722008-09-04 18:53:58 +0900441 return 0;
dmitry pervushindfbbbe92007-05-15 08:42:22 +0900442}
443
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900444static int show_clocks(char *buf, char **start, off_t off,
445 int len, int *eof, void *data)
446{
447 struct clk *clk;
448 char *p = buf;
449
450 list_for_each_entry_reverse(clk, &clock_list, node) {
451 unsigned long rate = clk_get_rate(clk);
452
Magnus Damm152fe362008-07-17 19:05:54 +0900453 p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name,
454 rate / 1000000, (rate % 1000000) / 10000,
Paul Mundt4ff29ff2009-05-12 05:14:53 +0900455 (clk->usecount > 0) ? "enabled" : "disabled");
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900456 }
457
458 return p - buf;
459}
460
Francesco VIRLINZI4a550262009-03-11 07:42:05 +0000461#ifdef CONFIG_PM
462static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
463{
464 static pm_message_t prev_state;
465 struct clk *clkp;
466
467 switch (state.event) {
468 case PM_EVENT_ON:
469 /* Resumeing from hibernation */
Paul Mundtb68d8202009-05-12 03:45:08 +0900470 if (prev_state.event != PM_EVENT_FREEZE)
471 break;
Francesco VIRLINZI50cca712009-03-13 08:08:01 +0000472
Paul Mundtb68d8202009-05-12 03:45:08 +0900473 list_for_each_entry(clkp, &clock_list, node) {
474 if (likely(clkp->ops)) {
475 unsigned long rate = clkp->rate;
476
477 if (likely(clkp->ops->set_parent))
478 clkp->ops->set_parent(clkp,
479 clkp->parent);
480 if (likely(clkp->ops->set_rate))
481 clkp->ops->set_rate(clkp,
482 rate, NO_CHANGE);
483 else if (likely(clkp->ops->recalc))
484 clkp->rate = clkp->ops->recalc(clkp);
485 }
Francesco VIRLINZI4a550262009-03-11 07:42:05 +0000486 }
487 break;
488 case PM_EVENT_FREEZE:
489 break;
490 case PM_EVENT_SUSPEND:
491 break;
492 }
493
494 prev_state = state;
495 return 0;
496}
497
498static int clks_sysdev_resume(struct sys_device *dev)
499{
500 return clks_sysdev_suspend(dev, PMSG_ON);
501}
502
503static struct sysdev_class clks_sysdev_class = {
504 .name = "clks",
505};
506
507static struct sysdev_driver clks_sysdev_driver = {
508 .suspend = clks_sysdev_suspend,
509 .resume = clks_sysdev_resume,
510};
511
512static struct sys_device clks_sysdev_dev = {
513 .cls = &clks_sysdev_class,
514};
515
516static int __init clk_sysdev_init(void)
517{
518 sysdev_class_register(&clks_sysdev_class);
519 sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
520 sysdev_register(&clks_sysdev_dev);
521
522 return 0;
523}
524subsys_initcall(clk_sysdev_init);
525#endif
526
Paul Mundt36ddf312006-01-16 22:14:17 -0800527int __init clk_init(void)
528{
529 int i, ret = 0;
530
Paul Mundte4c2cfe2006-09-27 12:31:01 +0900531 BUG_ON(!master_clk.rate);
Paul Mundt36ddf312006-01-16 22:14:17 -0800532
533 for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
534 struct clk *clk = onchip_clocks[i];
535
536 arch_init_clk_ops(&clk->ops, i);
537 ret |= clk_register(clk);
Paul Mundt36ddf312006-01-16 22:14:17 -0800538 }
539
Paul Mundtfa439722008-09-04 18:53:58 +0900540 ret |= arch_clk_init();
dmitry pervushindfbbbe92007-05-15 08:42:22 +0900541
Paul Mundt36ddf312006-01-16 22:14:17 -0800542 /* Kick the child clocks.. */
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900543 recalculate_root_clocks();
Paul Mundt36ddf312006-01-16 22:14:17 -0800544
Paul Mundt4ff29ff2009-05-12 05:14:53 +0900545 /* Enable the necessary init clocks */
546 clk_enable_init_clocks();
547
Paul Mundt36ddf312006-01-16 22:14:17 -0800548 return ret;
549}
550
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900551static int __init clk_proc_init(void)
Paul Mundt36ddf312006-01-16 22:14:17 -0800552{
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900553 struct proc_dir_entry *p;
554 p = create_proc_read_entry("clocks", S_IRUSR, NULL,
555 show_clocks, NULL);
556 if (unlikely(!p))
557 return -EINVAL;
Paul Mundt36ddf312006-01-16 22:14:17 -0800558
559 return 0;
560}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900561subsys_initcall(clk_proc_init);