blob: 1b40d757500dd71aa4d5cd5228c1cee236c6ce24 [file] [log] [blame]
Paul Walmsley543d9372008-03-18 10:22:06 +02001/*
2 * linux/arch/arm/mach-omap2/clock.c
3 *
Tony Lindgrena16e9702008-03-18 11:56:39 +02004 * Copyright (C) 2005-2008 Texas Instruments, Inc.
5 * Copyright (C) 2004-2008 Nokia Corporation
6 *
7 * Contacts:
Paul Walmsley543d9372008-03-18 10:22:06 +02008 * Richard Woodruff <r-woodruff2@ti.com>
Paul Walmsley543d9372008-03-18 10:22:06 +02009 * Paul Walmsley
10 *
Paul Walmsley543d9372008-03-18 10:22:06 +020011 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15#undef DEBUG
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/device.h>
20#include <linux/list.h>
21#include <linux/errno.h>
22#include <linux/delay.h>
23#include <linux/clk.h>
Russell Kingfced80c2008-09-06 12:10:45 +010024#include <linux/io.h>
Russell Kingfbd3bdb2008-09-06 12:13:59 +010025#include <linux/bitops.h>
Paul Walmsley543d9372008-03-18 10:22:06 +020026
Russell Kinga09e64f2008-08-05 16:14:15 +010027#include <mach/clock.h>
Paul Walmsley333943b2008-08-19 11:08:45 +030028#include <mach/clockdomain.h>
Russell Kinga09e64f2008-08-05 16:14:15 +010029#include <mach/cpu.h>
Paul Walmsley543d9372008-03-18 10:22:06 +020030#include <asm/div64.h>
31
Paul Walmsleyf8de9b22009-01-28 12:27:31 -070032#include <mach/sdrc.h>
Paul Walmsley543d9372008-03-18 10:22:06 +020033#include "sdrc.h"
34#include "clock.h"
35#include "prm.h"
36#include "prm-regbits-24xx.h"
37#include "cm.h"
38#include "cm-regbits-24xx.h"
39#include "cm-regbits-34xx.h"
40
41#define MAX_CLOCK_ENABLE_WAIT 100000
42
Paul Walmsley88b8ba92008-07-03 12:24:46 +030043/* DPLL rate rounding: minimum DPLL multiplier, divider values */
44#define DPLL_MIN_MULTIPLIER 1
45#define DPLL_MIN_DIVIDER 1
46
47/* Possible error results from _dpll_test_mult */
Paul Walmsley85a5f782009-01-28 12:08:41 -070048#define DPLL_MULT_UNDERFLOW -1
Paul Walmsley88b8ba92008-07-03 12:24:46 +030049
50/*
51 * Scale factor to mitigate roundoff errors in DPLL rate rounding.
52 * The higher the scale factor, the greater the risk of arithmetic overflow,
53 * but the closer the rounded rate to the target rate. DPLL_SCALE_FACTOR
54 * must be a power of DPLL_SCALE_BASE.
55 */
56#define DPLL_SCALE_FACTOR 64
57#define DPLL_SCALE_BASE 2
58#define DPLL_ROUNDING_VAL ((DPLL_SCALE_BASE / 2) * \
59 (DPLL_SCALE_FACTOR / DPLL_SCALE_BASE))
60
Paul Walmsley95f538a2009-01-28 12:08:44 -070061/* DPLL valid Fint frequency band limits - from 34xx TRM Section 4.7.6.2 */
62#define DPLL_FINT_BAND1_MIN 750000
63#define DPLL_FINT_BAND1_MAX 2100000
64#define DPLL_FINT_BAND2_MIN 7500000
65#define DPLL_FINT_BAND2_MAX 21000000
66
67/* _dpll_test_fint() return codes */
68#define DPLL_FINT_UNDERFLOW -1
69#define DPLL_FINT_INVALID -2
70
Paul Walmsley543d9372008-03-18 10:22:06 +020071u8 cpu_mask;
72
73/*-------------------------------------------------------------------------
Paul Walmsley333943b2008-08-19 11:08:45 +030074 * OMAP2/3 specific clock functions
Paul Walmsley543d9372008-03-18 10:22:06 +020075 *-------------------------------------------------------------------------*/
76
Paul Walmsley439764c2009-01-28 12:35:03 -070077/**
78 * _omap2xxx_clk_commit - commit clock parent/rate changes in hardware
79 * @clk: struct clk *
80 *
81 * If @clk has the DELAYED_APP flag set, meaning that parent/rate changes
82 * don't take effect until the VALID_CONFIG bit is written, write the
83 * VALID_CONFIG bit and wait for the write to complete. No return value.
84 */
85static void _omap2xxx_clk_commit(struct clk *clk)
86{
87 if (!cpu_is_omap24xx())
88 return;
89
90 if (!(clk->flags & DELAYED_APP))
91 return;
92
93 prm_write_mod_reg(OMAP24XX_VALID_CONFIG, OMAP24XX_GR_MOD,
94 OMAP24XX_PRCM_CLKCFG_CTRL_OFFSET);
95 /* OCP barrier */
96 prm_read_mod_reg(OMAP24XX_GR_MOD, OMAP24XX_PRCM_CLKCFG_CTRL_OFFSET);
97}
98
Paul Walmsley95f538a2009-01-28 12:08:44 -070099/*
100 * _dpll_test_fint - test whether an Fint value is valid for the DPLL
101 * @clk: DPLL struct clk to test
102 * @n: divider value (N) to test
103 *
104 * Tests whether a particular divider @n will result in a valid DPLL
105 * internal clock frequency Fint. See the 34xx TRM 4.7.6.2 "DPLL Jitter
106 * Correction". Returns 0 if OK, -1 if the enclosing loop can terminate
107 * (assuming that it is counting N upwards), or -2 if the enclosing loop
108 * should skip to the next iteration (again assuming N is increasing).
109 */
110static int _dpll_test_fint(struct clk *clk, u8 n)
111{
112 struct dpll_data *dd;
113 long fint;
114 int ret = 0;
115
116 dd = clk->dpll_data;
117
118 /* DPLL divider must result in a valid jitter correction val */
119 fint = clk->parent->rate / (n + 1);
120 if (fint < DPLL_FINT_BAND1_MIN) {
121
122 pr_debug("rejecting n=%d due to Fint failure, "
123 "lowering max_divider\n", n);
124 dd->max_divider = n;
125 ret = DPLL_FINT_UNDERFLOW;
126
127 } else if (fint > DPLL_FINT_BAND1_MAX &&
128 fint < DPLL_FINT_BAND2_MIN) {
129
130 pr_debug("rejecting n=%d due to Fint failure\n", n);
131 ret = DPLL_FINT_INVALID;
132
133 } else if (fint > DPLL_FINT_BAND2_MAX) {
134
135 pr_debug("rejecting n=%d due to Fint failure, "
136 "boosting min_divider\n", n);
137 dd->min_divider = n;
138 ret = DPLL_FINT_INVALID;
139
140 }
141
142 return ret;
143}
144
Paul Walmsley543d9372008-03-18 10:22:06 +0200145/**
Paul Walmsley333943b2008-08-19 11:08:45 +0300146 * omap2_init_clk_clkdm - look up a clockdomain name, store pointer in clk
147 * @clk: OMAP clock struct ptr to use
148 *
149 * Convert a clockdomain name stored in a struct clk 'clk' into a
150 * clockdomain pointer, and save it into the struct clk. Intended to be
151 * called during clk_register(). No return value.
152 */
153void omap2_init_clk_clkdm(struct clk *clk)
154{
155 struct clockdomain *clkdm;
156
157 if (!clk->clkdm_name)
158 return;
159
160 clkdm = clkdm_lookup(clk->clkdm_name);
161 if (clkdm) {
162 pr_debug("clock: associated clk %s to clkdm %s\n",
163 clk->name, clk->clkdm_name);
164 clk->clkdm = clkdm;
165 } else {
166 pr_debug("clock: could not associate clk %s to "
167 "clkdm %s\n", clk->name, clk->clkdm_name);
168 }
169}
170
171/**
Paul Walmsley543d9372008-03-18 10:22:06 +0200172 * omap2_init_clksel_parent - set a clksel clk's parent field from the hardware
173 * @clk: OMAP clock struct ptr to use
174 *
175 * Given a pointer to a source-selectable struct clk, read the hardware
176 * register and determine what its parent is currently set to. Update the
177 * clk->parent field with the appropriate clk ptr.
178 */
179void omap2_init_clksel_parent(struct clk *clk)
180{
181 const struct clksel *clks;
182 const struct clksel_rate *clkr;
183 u32 r, found = 0;
184
185 if (!clk->clksel)
186 return;
187
188 r = __raw_readl(clk->clksel_reg) & clk->clksel_mask;
189 r >>= __ffs(clk->clksel_mask);
190
191 for (clks = clk->clksel; clks->parent && !found; clks++) {
192 for (clkr = clks->rates; clkr->div && !found; clkr++) {
193 if ((clkr->flags & cpu_mask) && (clkr->val == r)) {
194 if (clk->parent != clks->parent) {
195 pr_debug("clock: inited %s parent "
196 "to %s (was %s)\n",
197 clk->name, clks->parent->name,
198 ((clk->parent) ?
199 clk->parent->name : "NULL"));
Russell King3f0a8202009-01-31 10:05:51 +0000200 clk_reparent(clk, clks->parent);
Paul Walmsley543d9372008-03-18 10:22:06 +0200201 };
202 found = 1;
203 }
204 }
205 }
206
207 if (!found)
208 printk(KERN_ERR "clock: init parent: could not find "
209 "regval %0x for clock %s\n", r, clk->name);
210
211 return;
212}
213
214/* Returns the DPLL rate */
215u32 omap2_get_dpll_rate(struct clk *clk)
216{
217 long long dpll_clk;
218 u32 dpll_mult, dpll_div, dpll;
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300219 struct dpll_data *dd;
Paul Walmsley543d9372008-03-18 10:22:06 +0200220
221 dd = clk->dpll_data;
222 /* REVISIT: What do we return on error? */
223 if (!dd)
224 return 0;
225
226 dpll = __raw_readl(dd->mult_div1_reg);
227 dpll_mult = dpll & dd->mult_mask;
228 dpll_mult >>= __ffs(dd->mult_mask);
229 dpll_div = dpll & dd->div1_mask;
230 dpll_div >>= __ffs(dd->div1_mask);
231
232 dpll_clk = (long long)clk->parent->rate * dpll_mult;
233 do_div(dpll_clk, dpll_div + 1);
234
Paul Walmsley543d9372008-03-18 10:22:06 +0200235 return dpll_clk;
236}
237
238/*
239 * Used for clocks that have the same value as the parent clock,
240 * divided by some factor
241 */
242void omap2_fixed_divisor_recalc(struct clk *clk)
243{
244 WARN_ON(!clk->fixed_div);
245
246 clk->rate = clk->parent->rate / clk->fixed_div;
Paul Walmsley543d9372008-03-18 10:22:06 +0200247}
248
249/**
250 * omap2_wait_clock_ready - wait for clock to enable
251 * @reg: physical address of clock IDLEST register
252 * @mask: value to mask against to determine if the clock is active
253 * @name: name of the clock (for printk)
254 *
255 * Returns 1 if the clock enabled in time, or 0 if it failed to enable
256 * in roughly MAX_CLOCK_ENABLE_WAIT microseconds.
257 */
258int omap2_wait_clock_ready(void __iomem *reg, u32 mask, const char *name)
259{
260 int i = 0;
261 int ena = 0;
262
263 /*
264 * 24xx uses 0 to indicate not ready, and 1 to indicate ready.
265 * 34xx reverses this, just to keep us on our toes
266 */
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700267 if (cpu_mask & (RATE_IN_242X | RATE_IN_243X))
Paul Walmsley543d9372008-03-18 10:22:06 +0200268 ena = mask;
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700269 else if (cpu_mask & RATE_IN_343X)
Paul Walmsley543d9372008-03-18 10:22:06 +0200270 ena = 0;
Paul Walmsley543d9372008-03-18 10:22:06 +0200271
272 /* Wait for lock */
273 while (((__raw_readl(reg) & mask) != ena) &&
274 (i++ < MAX_CLOCK_ENABLE_WAIT)) {
275 udelay(1);
276 }
277
278 if (i < MAX_CLOCK_ENABLE_WAIT)
279 pr_debug("Clock %s stable after %d loops\n", name, i);
280 else
281 printk(KERN_ERR "Clock %s didn't enable in %d tries\n",
282 name, MAX_CLOCK_ENABLE_WAIT);
283
284
285 return (i < MAX_CLOCK_ENABLE_WAIT) ? 1 : 0;
286};
287
288
289/*
290 * Note: We don't need special code here for INVERT_ENABLE
291 * for the time being since INVERT_ENABLE only applies to clocks enabled by
292 * CM_CLKEN_PLL
293 */
294static void omap2_clk_wait_ready(struct clk *clk)
295{
296 void __iomem *reg, *other_reg, *st_reg;
297 u32 bit;
298
299 /*
300 * REVISIT: This code is pretty ugly. It would be nice to generalize
301 * it and pull it into struct clk itself somehow.
302 */
303 reg = clk->enable_reg;
Russell Kingc1168dc2008-11-04 21:24:00 +0000304
305 /*
306 * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes
307 * it's just a matter of XORing the bits.
308 */
309 other_reg = (void __iomem *)((u32)reg ^ (CM_FCLKEN ^ CM_ICLKEN));
Paul Walmsley543d9372008-03-18 10:22:06 +0200310
Paul Walmsley543d9372008-03-18 10:22:06 +0200311 /* Check if both functional and interface clocks
312 * are running. */
313 bit = 1 << clk->enable_bit;
314 if (!(__raw_readl(other_reg) & bit))
315 return;
316 st_reg = (void __iomem *)(((u32)other_reg & ~0xf0) | 0x20); /* CM_IDLEST* */
317
318 omap2_wait_clock_ready(st_reg, bit, clk->name);
319}
320
Russell Kingbc51da42008-11-04 18:59:32 +0000321static int omap2_dflt_clk_enable(struct clk *clk)
Paul Walmsley543d9372008-03-18 10:22:06 +0200322{
Paul Walmsleyee1eec32009-01-28 12:18:19 -0700323 u32 v;
Paul Walmsley543d9372008-03-18 10:22:06 +0200324
Russell Kingc0fc18c2008-09-05 15:10:27 +0100325 if (unlikely(clk->enable_reg == NULL)) {
Paul Walmsley543d9372008-03-18 10:22:06 +0200326 printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
327 clk->name);
328 return 0; /* REVISIT: -EINVAL */
329 }
330
Paul Walmsleyee1eec32009-01-28 12:18:19 -0700331 v = __raw_readl(clk->enable_reg);
Paul Walmsley543d9372008-03-18 10:22:06 +0200332 if (clk->flags & INVERT_ENABLE)
Paul Walmsleyee1eec32009-01-28 12:18:19 -0700333 v &= ~(1 << clk->enable_bit);
Paul Walmsley543d9372008-03-18 10:22:06 +0200334 else
Paul Walmsleyee1eec32009-01-28 12:18:19 -0700335 v |= (1 << clk->enable_bit);
336 __raw_writel(v, clk->enable_reg);
Paul Walmsleyf11fda62009-01-28 12:35:06 -0700337 v = __raw_readl(clk->enable_reg); /* OCP barrier */
Paul Walmsley543d9372008-03-18 10:22:06 +0200338
Paul Walmsley543d9372008-03-18 10:22:06 +0200339 return 0;
340}
341
Russell Kingbc51da42008-11-04 18:59:32 +0000342static int omap2_dflt_clk_enable_wait(struct clk *clk)
343{
344 int ret;
345
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700346 if (!clk->enable_reg) {
Russell Kingbc51da42008-11-04 18:59:32 +0000347 printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
348 clk->name);
349 return 0; /* REVISIT: -EINVAL */
350 }
351
352 ret = omap2_dflt_clk_enable(clk);
353 if (ret == 0)
354 omap2_clk_wait_ready(clk);
355 return ret;
356}
357
Russell Kingb36ee722008-11-04 17:59:52 +0000358static void omap2_dflt_clk_disable(struct clk *clk)
Paul Walmsley543d9372008-03-18 10:22:06 +0200359{
Paul Walmsleyee1eec32009-01-28 12:18:19 -0700360 u32 v;
Paul Walmsley543d9372008-03-18 10:22:06 +0200361
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700362 if (!clk->enable_reg) {
Paul Walmsley543d9372008-03-18 10:22:06 +0200363 /*
364 * 'Independent' here refers to a clock which is not
365 * controlled by its parent.
366 */
367 printk(KERN_ERR "clock: clk_disable called on independent "
368 "clock %s which has no enable_reg\n", clk->name);
369 return;
370 }
371
Paul Walmsleyee1eec32009-01-28 12:18:19 -0700372 v = __raw_readl(clk->enable_reg);
Paul Walmsley543d9372008-03-18 10:22:06 +0200373 if (clk->flags & INVERT_ENABLE)
Paul Walmsleyee1eec32009-01-28 12:18:19 -0700374 v |= (1 << clk->enable_bit);
Paul Walmsley543d9372008-03-18 10:22:06 +0200375 else
Paul Walmsleyee1eec32009-01-28 12:18:19 -0700376 v &= ~(1 << clk->enable_bit);
377 __raw_writel(v, clk->enable_reg);
Paul Walmsleyde07fed2009-01-28 12:35:01 -0700378 /* No OCP barrier needed here since it is a disable operation */
Paul Walmsley543d9372008-03-18 10:22:06 +0200379}
380
Russell Kingb36ee722008-11-04 17:59:52 +0000381const struct clkops clkops_omap2_dflt_wait = {
382 .enable = omap2_dflt_clk_enable_wait,
383 .disable = omap2_dflt_clk_disable,
384};
385
Russell Kingbc51da42008-11-04 18:59:32 +0000386const struct clkops clkops_omap2_dflt = {
387 .enable = omap2_dflt_clk_enable,
388 .disable = omap2_dflt_clk_disable,
389};
390
Russell Kingb36ee722008-11-04 17:59:52 +0000391/* Enables clock without considering parent dependencies or use count
392 * REVISIT: Maybe change this to use clk->enable like on omap1?
393 */
394static int _omap2_clk_enable(struct clk *clk)
395{
396 return clk->ops->enable(clk);
397}
398
399/* Disables clock without considering parent dependencies or use count */
400static void _omap2_clk_disable(struct clk *clk)
401{
402 clk->ops->disable(clk);
403}
404
Paul Walmsley543d9372008-03-18 10:22:06 +0200405void omap2_clk_disable(struct clk *clk)
406{
407 if (clk->usecount > 0 && !(--clk->usecount)) {
408 _omap2_clk_disable(clk);
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700409 if (clk->parent)
Paul Walmsley543d9372008-03-18 10:22:06 +0200410 omap2_clk_disable(clk->parent);
Paul Walmsley333943b2008-08-19 11:08:45 +0300411 if (clk->clkdm)
412 omap2_clkdm_clk_disable(clk->clkdm, clk);
413
Paul Walmsley543d9372008-03-18 10:22:06 +0200414 }
415}
416
417int omap2_clk_enable(struct clk *clk)
418{
419 int ret = 0;
420
421 if (clk->usecount++ == 0) {
Russell King8263e5b2009-01-31 11:02:37 +0000422 if (clk->clkdm)
423 omap2_clkdm_clk_enable(clk->clkdm, clk);
424
Russell Kinga7f8c592009-01-31 11:00:17 +0000425 if (clk->parent) {
Paul Walmsley543d9372008-03-18 10:22:06 +0200426 ret = omap2_clk_enable(clk->parent);
Russell Kinga7f8c592009-01-31 11:00:17 +0000427 if (ret)
428 goto err;
Paul Walmsley543d9372008-03-18 10:22:06 +0200429 }
430
431 ret = _omap2_clk_enable(clk);
Russell Kinga7f8c592009-01-31 11:00:17 +0000432 if (ret) {
Russell Kinga7f8c592009-01-31 11:00:17 +0000433 if (clk->parent)
Paul Walmsley333943b2008-08-19 11:08:45 +0300434 omap2_clk_disable(clk->parent);
Russell Kinga7f8c592009-01-31 11:00:17 +0000435
436 goto err;
Paul Walmsley543d9372008-03-18 10:22:06 +0200437 }
438 }
Russell Kinga7f8c592009-01-31 11:00:17 +0000439 return ret;
Paul Walmsley543d9372008-03-18 10:22:06 +0200440
Russell Kinga7f8c592009-01-31 11:00:17 +0000441err:
Russell King8263e5b2009-01-31 11:02:37 +0000442 if (clk->clkdm)
443 omap2_clkdm_clk_disable(clk->clkdm, clk);
Russell Kinga7f8c592009-01-31 11:00:17 +0000444 clk->usecount--;
Paul Walmsley543d9372008-03-18 10:22:06 +0200445 return ret;
446}
447
448/*
449 * Used for clocks that are part of CLKSEL_xyz governed clocks.
450 * REVISIT: Maybe change to use clk->enable() functions like on omap1?
451 */
452void omap2_clksel_recalc(struct clk *clk)
453{
454 u32 div = 0;
455
456 pr_debug("clock: recalc'ing clksel clk %s\n", clk->name);
457
458 div = omap2_clksel_get_divisor(clk);
459 if (div == 0)
460 return;
461
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700462 if (clk->rate == (clk->parent->rate / div))
Paul Walmsley543d9372008-03-18 10:22:06 +0200463 return;
464 clk->rate = clk->parent->rate / div;
465
466 pr_debug("clock: new clock rate is %ld (div %d)\n", clk->rate, div);
Paul Walmsley543d9372008-03-18 10:22:06 +0200467}
468
469/**
470 * omap2_get_clksel_by_parent - return clksel struct for a given clk & parent
471 * @clk: OMAP struct clk ptr to inspect
472 * @src_clk: OMAP struct clk ptr of the parent clk to search for
473 *
474 * Scan the struct clksel array associated with the clock to find
475 * the element associated with the supplied parent clock address.
476 * Returns a pointer to the struct clksel on success or NULL on error.
477 */
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700478static const struct clksel *omap2_get_clksel_by_parent(struct clk *clk,
479 struct clk *src_clk)
Paul Walmsley543d9372008-03-18 10:22:06 +0200480{
481 const struct clksel *clks;
482
483 if (!clk->clksel)
484 return NULL;
485
486 for (clks = clk->clksel; clks->parent; clks++) {
487 if (clks->parent == src_clk)
488 break; /* Found the requested parent */
489 }
490
491 if (!clks->parent) {
492 printk(KERN_ERR "clock: Could not find parent clock %s in "
493 "clksel array of clock %s\n", src_clk->name,
494 clk->name);
495 return NULL;
496 }
497
498 return clks;
499}
500
501/**
502 * omap2_clksel_round_rate_div - find divisor for the given clock and rate
503 * @clk: OMAP struct clk to use
504 * @target_rate: desired clock rate
505 * @new_div: ptr to where we should store the divisor
506 *
507 * Finds 'best' divider value in an array based on the source and target
508 * rates. The divider array must be sorted with smallest divider first.
509 * Note that this will not work for clocks which are part of CONFIG_PARTICIPANT,
510 * they are only settable as part of virtual_prcm set.
511 *
512 * Returns the rounded clock rate or returns 0xffffffff on error.
513 */
514u32 omap2_clksel_round_rate_div(struct clk *clk, unsigned long target_rate,
515 u32 *new_div)
516{
517 unsigned long test_rate;
518 const struct clksel *clks;
519 const struct clksel_rate *clkr;
520 u32 last_div = 0;
521
522 printk(KERN_INFO "clock: clksel_round_rate_div: %s target_rate %ld\n",
523 clk->name, target_rate);
524
525 *new_div = 1;
526
527 clks = omap2_get_clksel_by_parent(clk, clk->parent);
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700528 if (!clks)
Paul Walmsley543d9372008-03-18 10:22:06 +0200529 return ~0;
530
531 for (clkr = clks->rates; clkr->div; clkr++) {
532 if (!(clkr->flags & cpu_mask))
533 continue;
534
535 /* Sanity check */
536 if (clkr->div <= last_div)
537 printk(KERN_ERR "clock: clksel_rate table not sorted "
538 "for clock %s", clk->name);
539
540 last_div = clkr->div;
541
542 test_rate = clk->parent->rate / clkr->div;
543
544 if (test_rate <= target_rate)
545 break; /* found it */
546 }
547
548 if (!clkr->div) {
549 printk(KERN_ERR "clock: Could not find divisor for target "
550 "rate %ld for clock %s parent %s\n", target_rate,
551 clk->name, clk->parent->name);
552 return ~0;
553 }
554
555 *new_div = clkr->div;
556
557 printk(KERN_INFO "clock: new_div = %d, new_rate = %ld\n", *new_div,
558 (clk->parent->rate / clkr->div));
559
560 return (clk->parent->rate / clkr->div);
561}
562
563/**
564 * omap2_clksel_round_rate - find rounded rate for the given clock and rate
565 * @clk: OMAP struct clk to use
566 * @target_rate: desired clock rate
567 *
568 * Compatibility wrapper for OMAP clock framework
569 * Finds best target rate based on the source clock and possible dividers.
570 * rates. The divider array must be sorted with smallest divider first.
571 * Note that this will not work for clocks which are part of CONFIG_PARTICIPANT,
572 * they are only settable as part of virtual_prcm set.
573 *
574 * Returns the rounded clock rate or returns 0xffffffff on error.
575 */
576long omap2_clksel_round_rate(struct clk *clk, unsigned long target_rate)
577{
578 u32 new_div;
579
580 return omap2_clksel_round_rate_div(clk, target_rate, &new_div);
581}
582
583
584/* Given a clock and a rate apply a clock specific rounding function */
585long omap2_clk_round_rate(struct clk *clk, unsigned long rate)
586{
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700587 if (clk->round_rate)
Paul Walmsley543d9372008-03-18 10:22:06 +0200588 return clk->round_rate(clk, rate);
589
590 if (clk->flags & RATE_FIXED)
591 printk(KERN_ERR "clock: generic omap2_clk_round_rate called "
592 "on fixed-rate clock %s\n", clk->name);
593
594 return clk->rate;
595}
596
597/**
598 * omap2_clksel_to_divisor() - turn clksel field value into integer divider
599 * @clk: OMAP struct clk to use
600 * @field_val: register field value to find
601 *
602 * Given a struct clk of a rate-selectable clksel clock, and a register field
603 * value to search for, find the corresponding clock divisor. The register
604 * field value should be pre-masked and shifted down so the LSB is at bit 0
605 * before calling. Returns 0 on error
606 */
607u32 omap2_clksel_to_divisor(struct clk *clk, u32 field_val)
608{
609 const struct clksel *clks;
610 const struct clksel_rate *clkr;
611
612 clks = omap2_get_clksel_by_parent(clk, clk->parent);
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700613 if (!clks)
Paul Walmsley543d9372008-03-18 10:22:06 +0200614 return 0;
615
616 for (clkr = clks->rates; clkr->div; clkr++) {
617 if ((clkr->flags & cpu_mask) && (clkr->val == field_val))
618 break;
619 }
620
621 if (!clkr->div) {
622 printk(KERN_ERR "clock: Could not find fieldval %d for "
623 "clock %s parent %s\n", field_val, clk->name,
624 clk->parent->name);
625 return 0;
626 }
627
628 return clkr->div;
629}
630
631/**
632 * omap2_divisor_to_clksel() - turn clksel integer divisor into a field value
633 * @clk: OMAP struct clk to use
634 * @div: integer divisor to search for
635 *
636 * Given a struct clk of a rate-selectable clksel clock, and a clock divisor,
637 * find the corresponding register field value. The return register value is
638 * the value before left-shifting. Returns 0xffffffff on error
639 */
640u32 omap2_divisor_to_clksel(struct clk *clk, u32 div)
641{
642 const struct clksel *clks;
643 const struct clksel_rate *clkr;
644
645 /* should never happen */
646 WARN_ON(div == 0);
647
648 clks = omap2_get_clksel_by_parent(clk, clk->parent);
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700649 if (!clks)
Paul Walmsley543d9372008-03-18 10:22:06 +0200650 return 0;
651
652 for (clkr = clks->rates; clkr->div; clkr++) {
653 if ((clkr->flags & cpu_mask) && (clkr->div == div))
654 break;
655 }
656
657 if (!clkr->div) {
658 printk(KERN_ERR "clock: Could not find divisor %d for "
659 "clock %s parent %s\n", div, clk->name,
660 clk->parent->name);
661 return 0;
662 }
663
664 return clkr->val;
665}
666
667/**
Paul Walmsley543d9372008-03-18 10:22:06 +0200668 * omap2_clksel_get_divisor - get current divider applied to parent clock.
669 * @clk: OMAP struct clk to use.
670 *
671 * Returns the integer divisor upon success or 0 on error.
672 */
673u32 omap2_clksel_get_divisor(struct clk *clk)
674{
Paul Walmsleyee1eec32009-01-28 12:18:19 -0700675 u32 v;
Paul Walmsley543d9372008-03-18 10:22:06 +0200676
Paul Walmsleyee1eec32009-01-28 12:18:19 -0700677 if (!clk->clksel_mask)
Paul Walmsley543d9372008-03-18 10:22:06 +0200678 return 0;
679
Paul Walmsleyee1eec32009-01-28 12:18:19 -0700680 v = __raw_readl(clk->clksel_reg) & clk->clksel_mask;
681 v >>= __ffs(clk->clksel_mask);
Paul Walmsley543d9372008-03-18 10:22:06 +0200682
Paul Walmsleyee1eec32009-01-28 12:18:19 -0700683 return omap2_clksel_to_divisor(clk, v);
Paul Walmsley543d9372008-03-18 10:22:06 +0200684}
685
686int omap2_clksel_set_rate(struct clk *clk, unsigned long rate)
687{
Paul Walmsleyee1eec32009-01-28 12:18:19 -0700688 u32 v, field_val, validrate, new_div = 0;
689
690 if (!clk->clksel_mask)
691 return -EINVAL;
Paul Walmsley543d9372008-03-18 10:22:06 +0200692
693 validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
694 if (validrate != rate)
695 return -EINVAL;
696
Paul Walmsley543d9372008-03-18 10:22:06 +0200697 field_val = omap2_divisor_to_clksel(clk, new_div);
698 if (field_val == ~0)
699 return -EINVAL;
700
Paul Walmsleyee1eec32009-01-28 12:18:19 -0700701 v = __raw_readl(clk->clksel_reg);
702 v &= ~clk->clksel_mask;
703 v |= field_val << __ffs(clk->clksel_mask);
704 __raw_writel(v, clk->clksel_reg);
Paul Walmsleyf11fda62009-01-28 12:35:06 -0700705 v = __raw_readl(clk->clksel_reg); /* OCP barrier */
Paul Walmsley543d9372008-03-18 10:22:06 +0200706
707 clk->rate = clk->parent->rate / new_div;
708
Paul Walmsley439764c2009-01-28 12:35:03 -0700709 _omap2xxx_clk_commit(clk);
Paul Walmsley543d9372008-03-18 10:22:06 +0200710
711 return 0;
712}
713
714
715/* Set the clock rate for a clock source */
716int omap2_clk_set_rate(struct clk *clk, unsigned long rate)
717{
718 int ret = -EINVAL;
719
720 pr_debug("clock: set_rate for clock %s to rate %ld\n", clk->name, rate);
721
722 /* CONFIG_PARTICIPANT clocks are changed only in sets via the
723 rate table mechanism, driven by mpu_speed */
724 if (clk->flags & CONFIG_PARTICIPANT)
725 return -EINVAL;
726
727 /* dpll_ck, core_ck, virt_prcm_set; plus all clksel clocks */
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700728 if (clk->set_rate)
Paul Walmsley543d9372008-03-18 10:22:06 +0200729 ret = clk->set_rate(clk, rate);
730
Paul Walmsley543d9372008-03-18 10:22:06 +0200731 return ret;
732}
733
734/*
735 * Converts encoded control register address into a full address
Paul Walmsleyee1eec32009-01-28 12:18:19 -0700736 * On error, the return value (parent_div) will be 0.
Paul Walmsley543d9372008-03-18 10:22:06 +0200737 */
Paul Walmsleyee1eec32009-01-28 12:18:19 -0700738static u32 _omap2_clksel_get_src_field(struct clk *src_clk, struct clk *clk,
739 u32 *field_val)
Paul Walmsley543d9372008-03-18 10:22:06 +0200740{
741 const struct clksel *clks;
742 const struct clksel_rate *clkr;
743
Paul Walmsley543d9372008-03-18 10:22:06 +0200744 clks = omap2_get_clksel_by_parent(clk, src_clk);
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700745 if (!clks)
Paul Walmsley543d9372008-03-18 10:22:06 +0200746 return 0;
747
748 for (clkr = clks->rates; clkr->div; clkr++) {
749 if (clkr->flags & (cpu_mask | DEFAULT_RATE))
750 break; /* Found the default rate for this platform */
751 }
752
753 if (!clkr->div) {
754 printk(KERN_ERR "clock: Could not find default rate for "
755 "clock %s parent %s\n", clk->name,
756 src_clk->parent->name);
757 return 0;
758 }
759
760 /* Should never happen. Add a clksel mask to the struct clk. */
761 WARN_ON(clk->clksel_mask == 0);
762
Paul Walmsleyee1eec32009-01-28 12:18:19 -0700763 *field_val = clkr->val;
Paul Walmsley543d9372008-03-18 10:22:06 +0200764
Paul Walmsleyee1eec32009-01-28 12:18:19 -0700765 return clkr->div;
Paul Walmsley543d9372008-03-18 10:22:06 +0200766}
767
768int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
769{
Paul Walmsleyee1eec32009-01-28 12:18:19 -0700770 u32 field_val, v, parent_div;
Paul Walmsley543d9372008-03-18 10:22:06 +0200771
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700772 if (clk->flags & CONFIG_PARTICIPANT)
Paul Walmsley543d9372008-03-18 10:22:06 +0200773 return -EINVAL;
774
775 if (!clk->clksel)
776 return -EINVAL;
777
Paul Walmsleyee1eec32009-01-28 12:18:19 -0700778 parent_div = _omap2_clksel_get_src_field(new_parent, clk, &field_val);
779 if (!parent_div)
Paul Walmsley543d9372008-03-18 10:22:06 +0200780 return -EINVAL;
781
782 if (clk->usecount > 0)
783 _omap2_clk_disable(clk);
784
785 /* Set new source value (previous dividers if any in effect) */
Paul Walmsleyee1eec32009-01-28 12:18:19 -0700786 v = __raw_readl(clk->clksel_reg);
787 v &= ~clk->clksel_mask;
788 v |= field_val << __ffs(clk->clksel_mask);
789 __raw_writel(v, clk->clksel_reg);
Paul Walmsleyf11fda62009-01-28 12:35:06 -0700790 v = __raw_readl(clk->clksel_reg); /* OCP barrier */
Paul Walmsley543d9372008-03-18 10:22:06 +0200791
Paul Walmsley439764c2009-01-28 12:35:03 -0700792 _omap2xxx_clk_commit(clk);
Paul Walmsley543d9372008-03-18 10:22:06 +0200793
794 if (clk->usecount > 0)
795 _omap2_clk_enable(clk);
796
Russell King3f0a8202009-01-31 10:05:51 +0000797 clk_reparent(clk, new_parent);
Paul Walmsley543d9372008-03-18 10:22:06 +0200798
799 /* CLKSEL clocks follow their parents' rates, divided by a divisor */
800 clk->rate = new_parent->rate;
801
802 if (parent_div > 0)
803 clk->rate /= parent_div;
804
805 pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
806 clk->name, clk->parent->name, clk->rate);
807
Paul Walmsley543d9372008-03-18 10:22:06 +0200808 return 0;
809}
810
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300811/* DPLL rate rounding code */
812
813/**
814 * omap2_dpll_set_rate_tolerance: set the error tolerance during rate rounding
815 * @clk: struct clk * of the DPLL
816 * @tolerance: maximum rate error tolerance
817 *
818 * Set the maximum DPLL rate error tolerance for the rate rounding
819 * algorithm. The rate tolerance is an attempt to balance DPLL power
820 * saving (the least divider value "n") vs. rate fidelity (the least
821 * difference between the desired DPLL target rate and the rounded
822 * rate out of the algorithm). So, increasing the tolerance is likely
823 * to decrease DPLL power consumption and increase DPLL rate error.
824 * Returns -EINVAL if provided a null clock ptr or a clk that is not a
825 * DPLL; or 0 upon success.
826 */
827int omap2_dpll_set_rate_tolerance(struct clk *clk, unsigned int tolerance)
828{
829 if (!clk || !clk->dpll_data)
830 return -EINVAL;
831
832 clk->dpll_data->rate_tolerance = tolerance;
833
834 return 0;
835}
836
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700837static unsigned long _dpll_compute_new_rate(unsigned long parent_rate,
838 unsigned int m, unsigned int n)
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300839{
840 unsigned long long num;
841
842 num = (unsigned long long)parent_rate * m;
843 do_div(num, n);
844 return num;
845}
846
847/*
848 * _dpll_test_mult - test a DPLL multiplier value
849 * @m: pointer to the DPLL m (multiplier) value under test
850 * @n: current DPLL n (divider) value under test
851 * @new_rate: pointer to storage for the resulting rounded rate
852 * @target_rate: the desired DPLL rate
853 * @parent_rate: the DPLL's parent clock rate
854 *
855 * This code tests a DPLL multiplier value, ensuring that the
856 * resulting rate will not be higher than the target_rate, and that
857 * the multiplier value itself is valid for the DPLL. Initially, the
858 * integer pointed to by the m argument should be prescaled by
859 * multiplying by DPLL_SCALE_FACTOR. The code will replace this with
860 * a non-scaled m upon return. This non-scaled m will result in a
861 * new_rate as close as possible to target_rate (but not greater than
862 * target_rate) given the current (parent_rate, n, prescaled m)
863 * triple. Returns DPLL_MULT_UNDERFLOW in the event that the
864 * non-scaled m attempted to underflow, which can allow the calling
865 * function to bail out early; or 0 upon success.
866 */
867static int _dpll_test_mult(int *m, int n, unsigned long *new_rate,
868 unsigned long target_rate,
869 unsigned long parent_rate)
870{
Paul Walmsley85a5f782009-01-28 12:08:41 -0700871 int r = 0, carry = 0;
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300872
873 /* Unscale m and round if necessary */
874 if (*m % DPLL_SCALE_FACTOR >= DPLL_ROUNDING_VAL)
875 carry = 1;
876 *m = (*m / DPLL_SCALE_FACTOR) + carry;
877
878 /*
879 * The new rate must be <= the target rate to avoid programming
880 * a rate that is impossible for the hardware to handle
881 */
882 *new_rate = _dpll_compute_new_rate(parent_rate, *m, n);
883 if (*new_rate > target_rate) {
884 (*m)--;
885 *new_rate = 0;
886 }
887
888 /* Guard against m underflow */
889 if (*m < DPLL_MIN_MULTIPLIER) {
890 *m = DPLL_MIN_MULTIPLIER;
891 *new_rate = 0;
Paul Walmsley85a5f782009-01-28 12:08:41 -0700892 r = DPLL_MULT_UNDERFLOW;
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300893 }
894
895 if (*new_rate == 0)
896 *new_rate = _dpll_compute_new_rate(parent_rate, *m, n);
897
Paul Walmsley85a5f782009-01-28 12:08:41 -0700898 return r;
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300899}
900
901/**
902 * omap2_dpll_round_rate - round a target rate for an OMAP DPLL
903 * @clk: struct clk * for a DPLL
904 * @target_rate: desired DPLL clock rate
905 *
906 * Given a DPLL, a desired target rate, and a rate tolerance, round
907 * the target rate to a possible, programmable rate for this DPLL.
908 * Rate tolerance is assumed to be set by the caller before this
909 * function is called. Attempts to select the minimum possible n
910 * within the tolerance to reduce power consumption. Stores the
911 * computed (m, n) in the DPLL's dpll_data structure so set_rate()
912 * will not need to call this (expensive) function again. Returns ~0
913 * if the target rate cannot be rounded, either because the rate is
914 * too low or because the rate tolerance is set too tightly; or the
915 * rounded rate upon success.
916 */
917long omap2_dpll_round_rate(struct clk *clk, unsigned long target_rate)
918{
919 int m, n, r, e, scaled_max_m;
920 unsigned long scaled_rt_rp, new_rate;
921 int min_e = -1, min_e_m = -1, min_e_n = -1;
Paul Walmsleyb3245042009-01-28 12:08:38 -0700922 struct dpll_data *dd;
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300923
924 if (!clk || !clk->dpll_data)
925 return ~0;
926
Paul Walmsleyb3245042009-01-28 12:08:38 -0700927 dd = clk->dpll_data;
928
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300929 pr_debug("clock: starting DPLL round_rate for clock %s, target rate "
930 "%ld\n", clk->name, target_rate);
931
932 scaled_rt_rp = target_rate / (clk->parent->rate / DPLL_SCALE_FACTOR);
Paul Walmsleyb3245042009-01-28 12:08:38 -0700933 scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR;
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300934
Paul Walmsleyb3245042009-01-28 12:08:38 -0700935 dd->last_rounded_rate = 0;
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300936
Paul Walmsley95f538a2009-01-28 12:08:44 -0700937 for (n = dd->min_divider; n <= dd->max_divider; n++) {
938
939 /* Is the (input clk, divider) pair valid for the DPLL? */
940 r = _dpll_test_fint(clk, n);
941 if (r == DPLL_FINT_UNDERFLOW)
942 break;
943 else if (r == DPLL_FINT_INVALID)
944 continue;
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300945
946 /* Compute the scaled DPLL multiplier, based on the divider */
947 m = scaled_rt_rp * n;
948
949 /*
Paul Walmsley85a5f782009-01-28 12:08:41 -0700950 * Since we're counting n up, a m overflow means we
951 * can bail out completely (since as n increases in
952 * the next iteration, there's no way that m can
953 * increase beyond the current m)
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300954 */
955 if (m > scaled_max_m)
Paul Walmsley85a5f782009-01-28 12:08:41 -0700956 break;
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300957
958 r = _dpll_test_mult(&m, n, &new_rate, target_rate,
959 clk->parent->rate);
960
Paul Walmsley85a5f782009-01-28 12:08:41 -0700961 /* m can't be set low enough for this n - try with a larger n */
962 if (r == DPLL_MULT_UNDERFLOW)
963 continue;
964
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300965 e = target_rate - new_rate;
966 pr_debug("clock: n = %d: m = %d: rate error is %d "
967 "(new_rate = %ld)\n", n, m, e, new_rate);
968
969 if (min_e == -1 ||
Paul Walmsleyb3245042009-01-28 12:08:38 -0700970 min_e >= (int)(abs(e) - dd->rate_tolerance)) {
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300971 min_e = e;
972 min_e_m = m;
973 min_e_n = n;
974
975 pr_debug("clock: found new least error %d\n", min_e);
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300976
Paul Walmsley85a5f782009-01-28 12:08:41 -0700977 /* We found good settings -- bail out now */
Paul Walmsley95f538a2009-01-28 12:08:44 -0700978 if (min_e <= dd->rate_tolerance)
Paul Walmsley85a5f782009-01-28 12:08:41 -0700979 break;
980 }
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300981 }
982
983 if (min_e < 0) {
984 pr_debug("clock: error: target rate or tolerance too low\n");
985 return ~0;
986 }
987
Paul Walmsleyb3245042009-01-28 12:08:38 -0700988 dd->last_rounded_m = min_e_m;
989 dd->last_rounded_n = min_e_n;
990 dd->last_rounded_rate = _dpll_compute_new_rate(clk->parent->rate,
991 min_e_m, min_e_n);
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300992
993 pr_debug("clock: final least error: e = %d, m = %d, n = %d\n",
994 min_e, min_e_m, min_e_n);
995 pr_debug("clock: final rate: %ld (target rate: %ld)\n",
Paul Walmsleyb3245042009-01-28 12:08:38 -0700996 dd->last_rounded_rate, target_rate);
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300997
Paul Walmsleyb3245042009-01-28 12:08:38 -0700998 return dd->last_rounded_rate;
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300999}
1000
Paul Walmsley543d9372008-03-18 10:22:06 +02001001/*-------------------------------------------------------------------------
1002 * Omap2 clock reset and init functions
1003 *-------------------------------------------------------------------------*/
1004
1005#ifdef CONFIG_OMAP_RESET_CLOCKS
1006void omap2_clk_disable_unused(struct clk *clk)
1007{
1008 u32 regval32, v;
1009
1010 v = (clk->flags & INVERT_ENABLE) ? (1 << clk->enable_bit) : 0;
1011
1012 regval32 = __raw_readl(clk->enable_reg);
1013 if ((regval32 & (1 << clk->enable_bit)) == v)
1014 return;
1015
1016 printk(KERN_INFO "Disabling unused clock \"%s\"\n", clk->name);
Tero Kristo8463e202009-01-28 12:27:45 -07001017 if (cpu_is_omap34xx()) {
1018 omap2_clk_enable(clk);
1019 omap2_clk_disable(clk);
1020 } else
1021 _omap2_clk_disable(clk);
Paul Walmsley543d9372008-03-18 10:22:06 +02001022}
1023#endif