blob: 0a7755cc8a25b25115d755e0bfa5a5a10a41bd66 [file] [log] [blame]
Paul Mundt36ddf312006-01-16 22:14:17 -08001/*
2 * arch/sh/kernel/cpu/clock.c - SuperH clock framework
3 *
Paul Mundtb1f6cfe2009-05-12 04:27:43 +09004 * Copyright (C) 2005 - 2009 Paul Mundt
Paul Mundt36ddf312006-01-16 22:14:17 -08005 *
6 * This clock framework is derived from the OMAP version by:
7 *
Paul Mundtb1f6cfe2009-05-12 04:27:43 +09008 * Copyright (C) 2004 - 2008 Nokia Corporation
Paul Mundt36ddf312006-01-16 22:14:17 -08009 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10 *
Paul Mundt1d118562006-12-01 13:15:14 +090011 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
12 *
Paul Mundt0dae8952009-05-12 06:18:09 +090013 * With clkdev bits:
14 *
15 * Copyright (C) 2008 Russell King.
16 *
Paul Mundt36ddf312006-01-16 22:14:17 -080017 * This file is subject to the terms and conditions of the GNU General Public
18 * License. See the file "COPYING" in the main directory of this archive
19 * for more details.
20 */
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/module.h>
Paul Mundt237b98f2006-09-27 17:28:20 +090024#include <linux/mutex.h>
Paul Mundt36ddf312006-01-16 22:14:17 -080025#include <linux/list.h>
Francesco VIRLINZI4a550262009-03-11 07:42:05 +000026#include <linux/kobject.h>
27#include <linux/sysdev.h>
Paul Mundt36ddf312006-01-16 22:14:17 -080028#include <linux/seq_file.h>
29#include <linux/err.h>
Paul Mundt1d118562006-12-01 13:15:14 +090030#include <linux/platform_device.h>
Paul Mundtdb62e5b2007-04-26 12:17:20 +090031#include <linux/proc_fs.h>
Paul Mundt36ddf312006-01-16 22:14:17 -080032#include <asm/clock.h>
Paul Mundt36ddf312006-01-16 22:14:17 -080033
34static LIST_HEAD(clock_list);
35static DEFINE_SPINLOCK(clock_lock);
Paul Mundt237b98f2006-09-27 17:28:20 +090036static DEFINE_MUTEX(clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -080037
38/*
39 * Each subtype is expected to define the init routines for these clocks,
40 * as each subtype (or processor family) will have these clocks at the
41 * very least. These are all provided through the CPG, which even some of
42 * the more quirky parts (such as ST40, SH4-202, etc.) still have.
43 *
44 * The processor-specific code is expected to register any additional
45 * clock sources that are of interest.
46 */
47static struct clk master_clk = {
48 .name = "master_clk",
Paul Mundt4ff29ff2009-05-12 05:14:53 +090049 .flags = CLK_ENABLE_ON_INIT,
Paul Mundt36ddf312006-01-16 22:14:17 -080050 .rate = CONFIG_SH_PCLK_FREQ,
Paul Mundt36ddf312006-01-16 22:14:17 -080051};
52
Paul Mundtaf777ce2009-05-13 16:59:40 +090053static struct clk peripheral_clk = {
54 .name = "peripheral_clk",
Paul Mundt36ddf312006-01-16 22:14:17 -080055 .parent = &master_clk,
Paul Mundt4ff29ff2009-05-12 05:14:53 +090056 .flags = CLK_ENABLE_ON_INIT,
Paul Mundt36ddf312006-01-16 22:14:17 -080057};
58
59static struct clk bus_clk = {
60 .name = "bus_clk",
61 .parent = &master_clk,
Paul Mundt4ff29ff2009-05-12 05:14:53 +090062 .flags = CLK_ENABLE_ON_INIT,
Paul Mundt36ddf312006-01-16 22:14:17 -080063};
64
65static struct clk cpu_clk = {
66 .name = "cpu_clk",
67 .parent = &master_clk,
Paul Mundt4ff29ff2009-05-12 05:14:53 +090068 .flags = CLK_ENABLE_ON_INIT,
Paul Mundt36ddf312006-01-16 22:14:17 -080069};
70
71/*
72 * The ordering of these clocks matters, do not change it.
73 */
74static struct clk *onchip_clocks[] = {
75 &master_clk,
Paul Mundtaf777ce2009-05-13 16:59:40 +090076 &peripheral_clk,
Paul Mundt36ddf312006-01-16 22:14:17 -080077 &bus_clk,
78 &cpu_clk,
79};
80
Paul Mundta02cb232009-05-12 03:50:44 +090081/* Used for clocks that always have same value as the parent clock */
82unsigned long followparent_recalc(struct clk *clk)
83{
84 return clk->parent->rate;
85}
86
Paul Mundtaa87aa32009-05-12 05:51:05 +090087int clk_reparent(struct clk *child, struct clk *parent)
88{
89 list_del_init(&child->sibling);
90 if (parent)
91 list_add(&child->sibling, &parent->children);
92 child->parent = parent;
93
94 /* now do the debugfs renaming to reattach the child
95 to the proper parent */
96
97 return 0;
98}
99
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900100/* Propagate rate to children */
101void propagate_rate(struct clk *tclk)
102{
103 struct clk *clkp;
104
105 list_for_each_entry(clkp, &tclk->children, sibling) {
Paul Mundtd672fef2009-05-13 17:03:09 +0900106 if (clkp->ops && clkp->ops->recalc)
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900107 clkp->rate = clkp->ops->recalc(clkp);
108 propagate_rate(clkp);
109 }
110}
111
Adrian Bunk4c1cfab2008-06-18 03:36:50 +0300112static void __clk_disable(struct clk *clk)
Paul Mundt36ddf312006-01-16 22:14:17 -0800113{
Paul Mundtae891a42009-05-12 05:30:10 +0900114 if (clk->usecount == 0) {
115 printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
116 clk->name);
117 WARN_ON(1);
118 return;
119 }
120
121 if (!(--clk->usecount)) {
dmitry pervushin1929cb32007-04-24 13:39:09 +0900122 if (likely(clk->ops && clk->ops->disable))
123 clk->ops->disable(clk);
Paul Mundt4ff29ff2009-05-12 05:14:53 +0900124 if (likely(clk->parent))
125 __clk_disable(clk->parent);
dmitry pervushin1929cb32007-04-24 13:39:09 +0900126 }
Paul Mundt36ddf312006-01-16 22:14:17 -0800127}
128
129void clk_disable(struct clk *clk)
130{
131 unsigned long flags;
132
Paul Mundt4ff29ff2009-05-12 05:14:53 +0900133 if (!clk)
134 return;
135
Paul Mundt36ddf312006-01-16 22:14:17 -0800136 spin_lock_irqsave(&clock_lock, flags);
137 __clk_disable(clk);
138 spin_unlock_irqrestore(&clock_lock, flags);
139}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900140EXPORT_SYMBOL_GPL(clk_disable);
Paul Mundt36ddf312006-01-16 22:14:17 -0800141
Paul Mundtae891a42009-05-12 05:30:10 +0900142static int __clk_enable(struct clk *clk)
143{
144 int ret = 0;
145
146 if (clk->usecount++ == 0) {
147 if (clk->parent) {
148 ret = __clk_enable(clk->parent);
149 if (unlikely(ret))
150 goto err;
151 }
152
153 if (clk->ops && clk->ops->enable) {
154 ret = clk->ops->enable(clk);
155 if (ret) {
156 if (clk->parent)
157 __clk_disable(clk->parent);
158 goto err;
159 }
160 }
161 }
162
163 return ret;
164err:
165 clk->usecount--;
166 return ret;
167}
168
169int clk_enable(struct clk *clk)
170{
171 unsigned long flags;
172 int ret;
173
174 if (!clk)
175 return -EINVAL;
176
177 spin_lock_irqsave(&clock_lock, flags);
178 ret = __clk_enable(clk);
179 spin_unlock_irqrestore(&clock_lock, flags);
180
181 return ret;
182}
183EXPORT_SYMBOL_GPL(clk_enable);
184
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900185static LIST_HEAD(root_clks);
186
187/**
188 * recalculate_root_clocks - recalculate and propagate all root clocks
189 *
190 * Recalculates all root clocks (clocks with no parent), which if the
191 * clock's .recalc is set correctly, should also propagate their rates.
192 * Called at init.
193 */
194void recalculate_root_clocks(void)
195{
196 struct clk *clkp;
197
198 list_for_each_entry(clkp, &root_clks, sibling) {
Paul Mundtd672fef2009-05-13 17:03:09 +0900199 if (clkp->ops && clkp->ops->recalc)
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900200 clkp->rate = clkp->ops->recalc(clkp);
201 propagate_rate(clkp);
202 }
203}
204
Paul Mundt36ddf312006-01-16 22:14:17 -0800205int clk_register(struct clk *clk)
206{
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900207 if (clk == NULL || IS_ERR(clk))
208 return -EINVAL;
209
210 /*
211 * trap out already registered clocks
212 */
213 if (clk->node.next || clk->node.prev)
214 return 0;
215
Paul Mundt237b98f2006-09-27 17:28:20 +0900216 mutex_lock(&clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -0800217
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900218 INIT_LIST_HEAD(&clk->children);
Paul Mundt4ff29ff2009-05-12 05:14:53 +0900219 clk->usecount = 0;
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900220
221 if (clk->parent)
222 list_add(&clk->sibling, &clk->parent->children);
223 else
224 list_add(&clk->sibling, &root_clks);
225
Paul Mundt36ddf312006-01-16 22:14:17 -0800226 list_add(&clk->node, &clock_list);
Paul Mundtd672fef2009-05-13 17:03:09 +0900227 if (clk->ops && clk->ops->init)
Paul Mundt4ff29ff2009-05-12 05:14:53 +0900228 clk->ops->init(clk);
Paul Mundt237b98f2006-09-27 17:28:20 +0900229 mutex_unlock(&clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -0800230
231 return 0;
232}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900233EXPORT_SYMBOL_GPL(clk_register);
Paul Mundt36ddf312006-01-16 22:14:17 -0800234
235void clk_unregister(struct clk *clk)
236{
Paul Mundt237b98f2006-09-27 17:28:20 +0900237 mutex_lock(&clock_list_sem);
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900238 list_del(&clk->sibling);
Paul Mundt36ddf312006-01-16 22:14:17 -0800239 list_del(&clk->node);
Paul Mundt237b98f2006-09-27 17:28:20 +0900240 mutex_unlock(&clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -0800241}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900242EXPORT_SYMBOL_GPL(clk_unregister);
Paul Mundt36ddf312006-01-16 22:14:17 -0800243
Paul Mundt4ff29ff2009-05-12 05:14:53 +0900244static void clk_enable_init_clocks(void)
245{
246 struct clk *clkp;
247
248 list_for_each_entry(clkp, &clock_list, node)
249 if (clkp->flags & CLK_ENABLE_ON_INIT)
250 clk_enable(clkp);
251}
252
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900253unsigned long clk_get_rate(struct clk *clk)
Paul Mundt36ddf312006-01-16 22:14:17 -0800254{
255 return clk->rate;
256}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900257EXPORT_SYMBOL_GPL(clk_get_rate);
Paul Mundt36ddf312006-01-16 22:14:17 -0800258
259int clk_set_rate(struct clk *clk, unsigned long rate)
260{
dmitry pervushin1929cb32007-04-24 13:39:09 +0900261 return clk_set_rate_ex(clk, rate, 0);
262}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900263EXPORT_SYMBOL_GPL(clk_set_rate);
dmitry pervushin1929cb32007-04-24 13:39:09 +0900264
265int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
266{
Paul Mundt36ddf312006-01-16 22:14:17 -0800267 int ret = -EOPNOTSUPP;
Paul Mundt100890c2009-05-13 17:05:51 +0900268 unsigned long flags;
269
270 spin_lock_irqsave(&clock_lock, flags);
Paul Mundt36ddf312006-01-16 22:14:17 -0800271
272 if (likely(clk->ops && clk->ops->set_rate)) {
dmitry pervushin1929cb32007-04-24 13:39:09 +0900273 ret = clk->ops->set_rate(clk, rate, algo_id);
Paul Mundt100890c2009-05-13 17:05:51 +0900274 if (ret != 0)
275 goto out_unlock;
276 } else {
277 clk->rate = rate;
278 ret = 0;
Paul Mundt36ddf312006-01-16 22:14:17 -0800279 }
280
Paul Mundt100890c2009-05-13 17:05:51 +0900281 if (clk->ops && clk->ops->recalc)
282 clk->rate = clk->ops->recalc(clk);
283
284 propagate_rate(clk);
285
286out_unlock:
287 spin_unlock_irqrestore(&clock_lock, flags);
288
Paul Mundt36ddf312006-01-16 22:14:17 -0800289 return ret;
290}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900291EXPORT_SYMBOL_GPL(clk_set_rate_ex);
Paul Mundt36ddf312006-01-16 22:14:17 -0800292
Francesco VIRLINZId680c762009-03-11 07:40:54 +0000293int clk_set_parent(struct clk *clk, struct clk *parent)
294{
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900295 unsigned long flags;
Francesco VIRLINZId680c762009-03-11 07:40:54 +0000296 int ret = -EINVAL;
Francesco VIRLINZId680c762009-03-11 07:40:54 +0000297
298 if (!parent || !clk)
299 return ret;
Paul Mundtaa87aa32009-05-12 05:51:05 +0900300 if (clk->parent == parent)
301 return 0;
Francesco VIRLINZId680c762009-03-11 07:40:54 +0000302
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900303 spin_lock_irqsave(&clock_lock, flags);
304 if (clk->usecount == 0) {
305 if (clk->ops->set_parent)
306 ret = clk->ops->set_parent(clk, parent);
Paul Mundtaa87aa32009-05-12 05:51:05 +0900307 else
308 ret = clk_reparent(clk, parent);
309
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900310 if (ret == 0) {
Paul Mundtaa87aa32009-05-12 05:51:05 +0900311 pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
312 clk->name, clk->parent->name, clk->rate);
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900313 if (clk->ops->recalc)
314 clk->rate = clk->ops->recalc(clk);
315 propagate_rate(clk);
316 }
317 } else
318 ret = -EBUSY;
319 spin_unlock_irqrestore(&clock_lock, flags);
Francesco VIRLINZId680c762009-03-11 07:40:54 +0000320
Francesco VIRLINZId680c762009-03-11 07:40:54 +0000321 return ret;
322}
323EXPORT_SYMBOL_GPL(clk_set_parent);
324
325struct clk *clk_get_parent(struct clk *clk)
326{
327 return clk->parent;
328}
329EXPORT_SYMBOL_GPL(clk_get_parent);
330
Paul Mundtf6991b02007-07-20 13:29:09 +0900331long clk_round_rate(struct clk *clk, unsigned long rate)
332{
333 if (likely(clk->ops && clk->ops->round_rate)) {
334 unsigned long flags, rounded;
335
336 spin_lock_irqsave(&clock_lock, flags);
337 rounded = clk->ops->round_rate(clk, rate);
338 spin_unlock_irqrestore(&clock_lock, flags);
339
340 return rounded;
341 }
342
343 return clk_get_rate(clk);
344}
345EXPORT_SYMBOL_GPL(clk_round_rate);
346
Paul Mundt1d118562006-12-01 13:15:14 +0900347/*
Paul Mundt0dae8952009-05-12 06:18:09 +0900348 * Find the correct struct clk for the device and connection ID.
349 * We do slightly fuzzy matching here:
350 * An entry with a NULL ID is assumed to be a wildcard.
351 * If an entry has a device ID, it must match
352 * If an entry has a connection ID, it must match
353 * Then we take the most specific entry - with the following
354 * order of precidence: dev+con > dev only > con only.
355 */
356static struct clk *clk_find(const char *dev_id, const char *con_id)
357{
358 struct clk_lookup *p;
359 struct clk *clk = NULL;
360 int match, best = 0;
361
362 list_for_each_entry(p, &clock_list, node) {
363 match = 0;
364 if (p->dev_id) {
365 if (!dev_id || strcmp(p->dev_id, dev_id))
366 continue;
367 match += 2;
368 }
369 if (p->con_id) {
370 if (!con_id || strcmp(p->con_id, con_id))
371 continue;
372 match += 1;
373 }
374 if (match == 0)
375 continue;
376
377 if (match > best) {
378 clk = p->clk;
379 best = match;
380 }
381 }
382 return clk;
383}
384
385struct clk *clk_get_sys(const char *dev_id, const char *con_id)
386{
387 struct clk *clk;
388
389 mutex_lock(&clock_list_sem);
390 clk = clk_find(dev_id, con_id);
391 mutex_unlock(&clock_list_sem);
392
393 return clk ? clk : ERR_PTR(-ENOENT);
394}
395EXPORT_SYMBOL_GPL(clk_get_sys);
396
397/*
Paul Mundt1d118562006-12-01 13:15:14 +0900398 * Returns a clock. Note that we first try to use device id on the bus
399 * and clock name. If this fails, we try to use clock name only.
400 */
401struct clk *clk_get(struct device *dev, const char *id)
Paul Mundt36ddf312006-01-16 22:14:17 -0800402{
Paul Mundt0dae8952009-05-12 06:18:09 +0900403 const char *dev_id = dev ? dev_name(dev) : NULL;
Paul Mundt36ddf312006-01-16 22:14:17 -0800404 struct clk *p, *clk = ERR_PTR(-ENOENT);
Paul Mundt1d118562006-12-01 13:15:14 +0900405 int idno;
406
Paul Mundt0dae8952009-05-12 06:18:09 +0900407 clk = clk_get_sys(dev_id, id);
Paul Mundtf3f82902009-05-12 16:07:40 +0900408 if (clk && !IS_ERR(clk))
Paul Mundt0dae8952009-05-12 06:18:09 +0900409 return clk;
410
Paul Mundt1d118562006-12-01 13:15:14 +0900411 if (dev == NULL || dev->bus != &platform_bus_type)
412 idno = -1;
413 else
414 idno = to_platform_device(dev)->id;
Paul Mundt36ddf312006-01-16 22:14:17 -0800415
Paul Mundt237b98f2006-09-27 17:28:20 +0900416 mutex_lock(&clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -0800417 list_for_each_entry(p, &clock_list, node) {
Paul Mundt1d118562006-12-01 13:15:14 +0900418 if (p->id == idno &&
419 strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
420 clk = p;
421 goto found;
422 }
423 }
424
425 list_for_each_entry(p, &clock_list, node) {
Paul Mundt36ddf312006-01-16 22:14:17 -0800426 if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
427 clk = p;
428 break;
429 }
430 }
Paul Mundt1d118562006-12-01 13:15:14 +0900431
432found:
Paul Mundt237b98f2006-09-27 17:28:20 +0900433 mutex_unlock(&clock_list_sem);
Paul Mundt36ddf312006-01-16 22:14:17 -0800434
435 return clk;
436}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900437EXPORT_SYMBOL_GPL(clk_get);
Paul Mundt36ddf312006-01-16 22:14:17 -0800438
439void clk_put(struct clk *clk)
440{
441 if (clk && !IS_ERR(clk))
442 module_put(clk->owner);
443}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900444EXPORT_SYMBOL_GPL(clk_put);
Paul Mundt36ddf312006-01-16 22:14:17 -0800445
Paul Mundt9fe5ee02009-05-12 19:29:04 +0900446int __init __weak arch_clk_init(void)
dmitry pervushindfbbbe92007-05-15 08:42:22 +0900447{
Paul Mundtfa439722008-09-04 18:53:58 +0900448 return 0;
dmitry pervushindfbbbe92007-05-15 08:42:22 +0900449}
450
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900451static int show_clocks(char *buf, char **start, off_t off,
452 int len, int *eof, void *data)
453{
454 struct clk *clk;
455 char *p = buf;
456
457 list_for_each_entry_reverse(clk, &clock_list, node) {
458 unsigned long rate = clk_get_rate(clk);
459
Magnus Damm152fe362008-07-17 19:05:54 +0900460 p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name,
461 rate / 1000000, (rate % 1000000) / 10000,
Paul Mundt4ff29ff2009-05-12 05:14:53 +0900462 (clk->usecount > 0) ? "enabled" : "disabled");
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900463 }
464
465 return p - buf;
466}
467
Francesco VIRLINZI4a550262009-03-11 07:42:05 +0000468#ifdef CONFIG_PM
469static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
470{
471 static pm_message_t prev_state;
472 struct clk *clkp;
473
474 switch (state.event) {
475 case PM_EVENT_ON:
476 /* Resumeing from hibernation */
Paul Mundtb68d8202009-05-12 03:45:08 +0900477 if (prev_state.event != PM_EVENT_FREEZE)
478 break;
Francesco VIRLINZI50cca712009-03-13 08:08:01 +0000479
Paul Mundtb68d8202009-05-12 03:45:08 +0900480 list_for_each_entry(clkp, &clock_list, node) {
481 if (likely(clkp->ops)) {
482 unsigned long rate = clkp->rate;
483
484 if (likely(clkp->ops->set_parent))
485 clkp->ops->set_parent(clkp,
486 clkp->parent);
487 if (likely(clkp->ops->set_rate))
488 clkp->ops->set_rate(clkp,
489 rate, NO_CHANGE);
490 else if (likely(clkp->ops->recalc))
491 clkp->rate = clkp->ops->recalc(clkp);
492 }
Francesco VIRLINZI4a550262009-03-11 07:42:05 +0000493 }
494 break;
495 case PM_EVENT_FREEZE:
496 break;
497 case PM_EVENT_SUSPEND:
498 break;
499 }
500
501 prev_state = state;
502 return 0;
503}
504
505static int clks_sysdev_resume(struct sys_device *dev)
506{
507 return clks_sysdev_suspend(dev, PMSG_ON);
508}
509
510static struct sysdev_class clks_sysdev_class = {
511 .name = "clks",
512};
513
514static struct sysdev_driver clks_sysdev_driver = {
515 .suspend = clks_sysdev_suspend,
516 .resume = clks_sysdev_resume,
517};
518
519static struct sys_device clks_sysdev_dev = {
520 .cls = &clks_sysdev_class,
521};
522
523static int __init clk_sysdev_init(void)
524{
525 sysdev_class_register(&clks_sysdev_class);
526 sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
527 sysdev_register(&clks_sysdev_dev);
528
529 return 0;
530}
531subsys_initcall(clk_sysdev_init);
532#endif
533
Paul Mundt36ddf312006-01-16 22:14:17 -0800534int __init clk_init(void)
535{
536 int i, ret = 0;
537
Paul Mundte4c2cfe2006-09-27 12:31:01 +0900538 BUG_ON(!master_clk.rate);
Paul Mundt36ddf312006-01-16 22:14:17 -0800539
540 for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
541 struct clk *clk = onchip_clocks[i];
542
543 arch_init_clk_ops(&clk->ops, i);
544 ret |= clk_register(clk);
Paul Mundt36ddf312006-01-16 22:14:17 -0800545 }
546
Paul Mundtfa439722008-09-04 18:53:58 +0900547 ret |= arch_clk_init();
dmitry pervushindfbbbe92007-05-15 08:42:22 +0900548
Paul Mundt36ddf312006-01-16 22:14:17 -0800549 /* Kick the child clocks.. */
Paul Mundtb1f6cfe2009-05-12 04:27:43 +0900550 recalculate_root_clocks();
Paul Mundt36ddf312006-01-16 22:14:17 -0800551
Paul Mundt4ff29ff2009-05-12 05:14:53 +0900552 /* Enable the necessary init clocks */
553 clk_enable_init_clocks();
554
Paul Mundt36ddf312006-01-16 22:14:17 -0800555 return ret;
556}
557
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900558static int __init clk_proc_init(void)
Paul Mundt36ddf312006-01-16 22:14:17 -0800559{
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900560 struct proc_dir_entry *p;
561 p = create_proc_read_entry("clocks", S_IRUSR, NULL,
562 show_clocks, NULL);
563 if (unlikely(!p))
564 return -EINVAL;
Paul Mundt36ddf312006-01-16 22:14:17 -0800565
566 return 0;
567}
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900568subsys_initcall(clk_proc_init);