blob: 752e34787f215f3d5c23a975b32aeff4e4001de5 [file] [log] [blame]
Paul Walmsley543d9372008-03-18 10:22:06 +02001/*
2 * linux/arch/arm/mach-omap2/clock.c
3 *
Tony Lindgrena16e9702008-03-18 11:56:39 +02004 * Copyright (C) 2005-2008 Texas Instruments, Inc.
5 * Copyright (C) 2004-2008 Nokia Corporation
6 *
7 * Contacts:
Paul Walmsley543d9372008-03-18 10:22:06 +02008 * Richard Woodruff <r-woodruff2@ti.com>
Paul Walmsley543d9372008-03-18 10:22:06 +02009 * Paul Walmsley
10 *
Paul Walmsley543d9372008-03-18 10:22:06 +020011 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15#undef DEBUG
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/device.h>
20#include <linux/list.h>
21#include <linux/errno.h>
22#include <linux/delay.h>
23#include <linux/clk.h>
Russell Kingfced80c2008-09-06 12:10:45 +010024#include <linux/io.h>
Russell Kingfbd3bdb2008-09-06 12:13:59 +010025#include <linux/bitops.h>
Paul Walmsley543d9372008-03-18 10:22:06 +020026
Russell Kinga09e64f2008-08-05 16:14:15 +010027#include <mach/clock.h>
Paul Walmsley333943b2008-08-19 11:08:45 +030028#include <mach/clockdomain.h>
Russell Kinga09e64f2008-08-05 16:14:15 +010029#include <mach/cpu.h>
Paul Walmsley543d9372008-03-18 10:22:06 +020030#include <asm/div64.h>
31
32#include "memory.h"
33#include "sdrc.h"
34#include "clock.h"
35#include "prm.h"
36#include "prm-regbits-24xx.h"
37#include "cm.h"
38#include "cm-regbits-24xx.h"
39#include "cm-regbits-34xx.h"
40
41#define MAX_CLOCK_ENABLE_WAIT 100000
42
Paul Walmsley88b8ba92008-07-03 12:24:46 +030043/* DPLL rate rounding: minimum DPLL multiplier, divider values */
44#define DPLL_MIN_MULTIPLIER 1
45#define DPLL_MIN_DIVIDER 1
46
47/* Possible error results from _dpll_test_mult */
Paul Walmsley85a5f782009-01-28 12:08:41 -070048#define DPLL_MULT_UNDERFLOW -1
Paul Walmsley88b8ba92008-07-03 12:24:46 +030049
50/*
51 * Scale factor to mitigate roundoff errors in DPLL rate rounding.
52 * The higher the scale factor, the greater the risk of arithmetic overflow,
53 * but the closer the rounded rate to the target rate. DPLL_SCALE_FACTOR
54 * must be a power of DPLL_SCALE_BASE.
55 */
56#define DPLL_SCALE_FACTOR 64
57#define DPLL_SCALE_BASE 2
58#define DPLL_ROUNDING_VAL ((DPLL_SCALE_BASE / 2) * \
59 (DPLL_SCALE_FACTOR / DPLL_SCALE_BASE))
60
Paul Walmsley95f538a2009-01-28 12:08:44 -070061/* DPLL valid Fint frequency band limits - from 34xx TRM Section 4.7.6.2 */
62#define DPLL_FINT_BAND1_MIN 750000
63#define DPLL_FINT_BAND1_MAX 2100000
64#define DPLL_FINT_BAND2_MIN 7500000
65#define DPLL_FINT_BAND2_MAX 21000000
66
67/* _dpll_test_fint() return codes */
68#define DPLL_FINT_UNDERFLOW -1
69#define DPLL_FINT_INVALID -2
70
Paul Walmsley543d9372008-03-18 10:22:06 +020071u8 cpu_mask;
72
73/*-------------------------------------------------------------------------
Paul Walmsley333943b2008-08-19 11:08:45 +030074 * OMAP2/3 specific clock functions
Paul Walmsley543d9372008-03-18 10:22:06 +020075 *-------------------------------------------------------------------------*/
76
Paul Walmsley95f538a2009-01-28 12:08:44 -070077/*
78 * _dpll_test_fint - test whether an Fint value is valid for the DPLL
79 * @clk: DPLL struct clk to test
80 * @n: divider value (N) to test
81 *
82 * Tests whether a particular divider @n will result in a valid DPLL
83 * internal clock frequency Fint. See the 34xx TRM 4.7.6.2 "DPLL Jitter
84 * Correction". Returns 0 if OK, -1 if the enclosing loop can terminate
85 * (assuming that it is counting N upwards), or -2 if the enclosing loop
86 * should skip to the next iteration (again assuming N is increasing).
87 */
88static int _dpll_test_fint(struct clk *clk, u8 n)
89{
90 struct dpll_data *dd;
91 long fint;
92 int ret = 0;
93
94 dd = clk->dpll_data;
95
96 /* DPLL divider must result in a valid jitter correction val */
97 fint = clk->parent->rate / (n + 1);
98 if (fint < DPLL_FINT_BAND1_MIN) {
99
100 pr_debug("rejecting n=%d due to Fint failure, "
101 "lowering max_divider\n", n);
102 dd->max_divider = n;
103 ret = DPLL_FINT_UNDERFLOW;
104
105 } else if (fint > DPLL_FINT_BAND1_MAX &&
106 fint < DPLL_FINT_BAND2_MIN) {
107
108 pr_debug("rejecting n=%d due to Fint failure\n", n);
109 ret = DPLL_FINT_INVALID;
110
111 } else if (fint > DPLL_FINT_BAND2_MAX) {
112
113 pr_debug("rejecting n=%d due to Fint failure, "
114 "boosting min_divider\n", n);
115 dd->min_divider = n;
116 ret = DPLL_FINT_INVALID;
117
118 }
119
120 return ret;
121}
122
Paul Walmsley543d9372008-03-18 10:22:06 +0200123/**
Paul Walmsley333943b2008-08-19 11:08:45 +0300124 * omap2_init_clk_clkdm - look up a clockdomain name, store pointer in clk
125 * @clk: OMAP clock struct ptr to use
126 *
127 * Convert a clockdomain name stored in a struct clk 'clk' into a
128 * clockdomain pointer, and save it into the struct clk. Intended to be
129 * called during clk_register(). No return value.
130 */
131void omap2_init_clk_clkdm(struct clk *clk)
132{
133 struct clockdomain *clkdm;
134
135 if (!clk->clkdm_name)
136 return;
137
138 clkdm = clkdm_lookup(clk->clkdm_name);
139 if (clkdm) {
140 pr_debug("clock: associated clk %s to clkdm %s\n",
141 clk->name, clk->clkdm_name);
142 clk->clkdm = clkdm;
143 } else {
144 pr_debug("clock: could not associate clk %s to "
145 "clkdm %s\n", clk->name, clk->clkdm_name);
146 }
147}
148
149/**
Paul Walmsley543d9372008-03-18 10:22:06 +0200150 * omap2_init_clksel_parent - set a clksel clk's parent field from the hardware
151 * @clk: OMAP clock struct ptr to use
152 *
153 * Given a pointer to a source-selectable struct clk, read the hardware
154 * register and determine what its parent is currently set to. Update the
155 * clk->parent field with the appropriate clk ptr.
156 */
157void omap2_init_clksel_parent(struct clk *clk)
158{
159 const struct clksel *clks;
160 const struct clksel_rate *clkr;
161 u32 r, found = 0;
162
163 if (!clk->clksel)
164 return;
165
166 r = __raw_readl(clk->clksel_reg) & clk->clksel_mask;
167 r >>= __ffs(clk->clksel_mask);
168
169 for (clks = clk->clksel; clks->parent && !found; clks++) {
170 for (clkr = clks->rates; clkr->div && !found; clkr++) {
171 if ((clkr->flags & cpu_mask) && (clkr->val == r)) {
172 if (clk->parent != clks->parent) {
173 pr_debug("clock: inited %s parent "
174 "to %s (was %s)\n",
175 clk->name, clks->parent->name,
176 ((clk->parent) ?
177 clk->parent->name : "NULL"));
178 clk->parent = clks->parent;
179 };
180 found = 1;
181 }
182 }
183 }
184
185 if (!found)
186 printk(KERN_ERR "clock: init parent: could not find "
187 "regval %0x for clock %s\n", r, clk->name);
188
189 return;
190}
191
192/* Returns the DPLL rate */
193u32 omap2_get_dpll_rate(struct clk *clk)
194{
195 long long dpll_clk;
196 u32 dpll_mult, dpll_div, dpll;
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300197 struct dpll_data *dd;
Paul Walmsley543d9372008-03-18 10:22:06 +0200198
199 dd = clk->dpll_data;
200 /* REVISIT: What do we return on error? */
201 if (!dd)
202 return 0;
203
204 dpll = __raw_readl(dd->mult_div1_reg);
205 dpll_mult = dpll & dd->mult_mask;
206 dpll_mult >>= __ffs(dd->mult_mask);
207 dpll_div = dpll & dd->div1_mask;
208 dpll_div >>= __ffs(dd->div1_mask);
209
210 dpll_clk = (long long)clk->parent->rate * dpll_mult;
211 do_div(dpll_clk, dpll_div + 1);
212
Paul Walmsley543d9372008-03-18 10:22:06 +0200213 return dpll_clk;
214}
215
216/*
217 * Used for clocks that have the same value as the parent clock,
218 * divided by some factor
219 */
220void omap2_fixed_divisor_recalc(struct clk *clk)
221{
222 WARN_ON(!clk->fixed_div);
223
224 clk->rate = clk->parent->rate / clk->fixed_div;
Paul Walmsley543d9372008-03-18 10:22:06 +0200225}
226
227/**
228 * omap2_wait_clock_ready - wait for clock to enable
229 * @reg: physical address of clock IDLEST register
230 * @mask: value to mask against to determine if the clock is active
231 * @name: name of the clock (for printk)
232 *
233 * Returns 1 if the clock enabled in time, or 0 if it failed to enable
234 * in roughly MAX_CLOCK_ENABLE_WAIT microseconds.
235 */
236int omap2_wait_clock_ready(void __iomem *reg, u32 mask, const char *name)
237{
238 int i = 0;
239 int ena = 0;
240
241 /*
242 * 24xx uses 0 to indicate not ready, and 1 to indicate ready.
243 * 34xx reverses this, just to keep us on our toes
244 */
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700245 if (cpu_mask & (RATE_IN_242X | RATE_IN_243X))
Paul Walmsley543d9372008-03-18 10:22:06 +0200246 ena = mask;
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700247 else if (cpu_mask & RATE_IN_343X)
Paul Walmsley543d9372008-03-18 10:22:06 +0200248 ena = 0;
Paul Walmsley543d9372008-03-18 10:22:06 +0200249
250 /* Wait for lock */
251 while (((__raw_readl(reg) & mask) != ena) &&
252 (i++ < MAX_CLOCK_ENABLE_WAIT)) {
253 udelay(1);
254 }
255
256 if (i < MAX_CLOCK_ENABLE_WAIT)
257 pr_debug("Clock %s stable after %d loops\n", name, i);
258 else
259 printk(KERN_ERR "Clock %s didn't enable in %d tries\n",
260 name, MAX_CLOCK_ENABLE_WAIT);
261
262
263 return (i < MAX_CLOCK_ENABLE_WAIT) ? 1 : 0;
264};
265
266
267/*
268 * Note: We don't need special code here for INVERT_ENABLE
269 * for the time being since INVERT_ENABLE only applies to clocks enabled by
270 * CM_CLKEN_PLL
271 */
272static void omap2_clk_wait_ready(struct clk *clk)
273{
274 void __iomem *reg, *other_reg, *st_reg;
275 u32 bit;
276
277 /*
278 * REVISIT: This code is pretty ugly. It would be nice to generalize
279 * it and pull it into struct clk itself somehow.
280 */
281 reg = clk->enable_reg;
Russell Kingc1168dc2008-11-04 21:24:00 +0000282
283 /*
284 * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes
285 * it's just a matter of XORing the bits.
286 */
287 other_reg = (void __iomem *)((u32)reg ^ (CM_FCLKEN ^ CM_ICLKEN));
Paul Walmsley543d9372008-03-18 10:22:06 +0200288
Paul Walmsley543d9372008-03-18 10:22:06 +0200289 /* Check if both functional and interface clocks
290 * are running. */
291 bit = 1 << clk->enable_bit;
292 if (!(__raw_readl(other_reg) & bit))
293 return;
294 st_reg = (void __iomem *)(((u32)other_reg & ~0xf0) | 0x20); /* CM_IDLEST* */
295
296 omap2_wait_clock_ready(st_reg, bit, clk->name);
297}
298
Russell Kingbc51da42008-11-04 18:59:32 +0000299static int omap2_dflt_clk_enable(struct clk *clk)
Paul Walmsley543d9372008-03-18 10:22:06 +0200300{
301 u32 regval32;
302
Russell Kingc0fc18c2008-09-05 15:10:27 +0100303 if (unlikely(clk->enable_reg == NULL)) {
Paul Walmsley543d9372008-03-18 10:22:06 +0200304 printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
305 clk->name);
306 return 0; /* REVISIT: -EINVAL */
307 }
308
309 regval32 = __raw_readl(clk->enable_reg);
310 if (clk->flags & INVERT_ENABLE)
311 regval32 &= ~(1 << clk->enable_bit);
312 else
313 regval32 |= (1 << clk->enable_bit);
314 __raw_writel(regval32, clk->enable_reg);
315 wmb();
316
Paul Walmsley543d9372008-03-18 10:22:06 +0200317 return 0;
318}
319
Russell Kingbc51da42008-11-04 18:59:32 +0000320static int omap2_dflt_clk_enable_wait(struct clk *clk)
321{
322 int ret;
323
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700324 if (!clk->enable_reg) {
Russell Kingbc51da42008-11-04 18:59:32 +0000325 printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
326 clk->name);
327 return 0; /* REVISIT: -EINVAL */
328 }
329
330 ret = omap2_dflt_clk_enable(clk);
331 if (ret == 0)
332 omap2_clk_wait_ready(clk);
333 return ret;
334}
335
Russell Kingb36ee722008-11-04 17:59:52 +0000336static void omap2_dflt_clk_disable(struct clk *clk)
Paul Walmsley543d9372008-03-18 10:22:06 +0200337{
338 u32 regval32;
339
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700340 if (!clk->enable_reg) {
Paul Walmsley543d9372008-03-18 10:22:06 +0200341 /*
342 * 'Independent' here refers to a clock which is not
343 * controlled by its parent.
344 */
345 printk(KERN_ERR "clock: clk_disable called on independent "
346 "clock %s which has no enable_reg\n", clk->name);
347 return;
348 }
349
350 regval32 = __raw_readl(clk->enable_reg);
351 if (clk->flags & INVERT_ENABLE)
352 regval32 |= (1 << clk->enable_bit);
353 else
354 regval32 &= ~(1 << clk->enable_bit);
355 __raw_writel(regval32, clk->enable_reg);
356 wmb();
357}
358
Russell Kingb36ee722008-11-04 17:59:52 +0000359const struct clkops clkops_omap2_dflt_wait = {
360 .enable = omap2_dflt_clk_enable_wait,
361 .disable = omap2_dflt_clk_disable,
362};
363
Russell Kingbc51da42008-11-04 18:59:32 +0000364const struct clkops clkops_omap2_dflt = {
365 .enable = omap2_dflt_clk_enable,
366 .disable = omap2_dflt_clk_disable,
367};
368
Russell Kingb36ee722008-11-04 17:59:52 +0000369/* Enables clock without considering parent dependencies or use count
370 * REVISIT: Maybe change this to use clk->enable like on omap1?
371 */
372static int _omap2_clk_enable(struct clk *clk)
373{
374 return clk->ops->enable(clk);
375}
376
377/* Disables clock without considering parent dependencies or use count */
378static void _omap2_clk_disable(struct clk *clk)
379{
380 clk->ops->disable(clk);
381}
382
Paul Walmsley543d9372008-03-18 10:22:06 +0200383void omap2_clk_disable(struct clk *clk)
384{
385 if (clk->usecount > 0 && !(--clk->usecount)) {
386 _omap2_clk_disable(clk);
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700387 if (clk->parent)
Paul Walmsley543d9372008-03-18 10:22:06 +0200388 omap2_clk_disable(clk->parent);
Paul Walmsley333943b2008-08-19 11:08:45 +0300389 if (clk->clkdm)
390 omap2_clkdm_clk_disable(clk->clkdm, clk);
391
Paul Walmsley543d9372008-03-18 10:22:06 +0200392 }
393}
394
395int omap2_clk_enable(struct clk *clk)
396{
397 int ret = 0;
398
399 if (clk->usecount++ == 0) {
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700400 if (clk->parent)
Paul Walmsley543d9372008-03-18 10:22:06 +0200401 ret = omap2_clk_enable(clk->parent);
402
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700403 if (ret != 0) {
Paul Walmsley543d9372008-03-18 10:22:06 +0200404 clk->usecount--;
405 return ret;
406 }
407
Paul Walmsley333943b2008-08-19 11:08:45 +0300408 if (clk->clkdm)
409 omap2_clkdm_clk_enable(clk->clkdm, clk);
410
Paul Walmsley543d9372008-03-18 10:22:06 +0200411 ret = _omap2_clk_enable(clk);
412
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700413 if (ret != 0) {
Paul Walmsley333943b2008-08-19 11:08:45 +0300414 if (clk->clkdm)
415 omap2_clkdm_clk_disable(clk->clkdm, clk);
416
417 if (clk->parent) {
418 omap2_clk_disable(clk->parent);
419 clk->usecount--;
420 }
Paul Walmsley543d9372008-03-18 10:22:06 +0200421 }
422 }
423
424 return ret;
425}
426
427/*
428 * Used for clocks that are part of CLKSEL_xyz governed clocks.
429 * REVISIT: Maybe change to use clk->enable() functions like on omap1?
430 */
431void omap2_clksel_recalc(struct clk *clk)
432{
433 u32 div = 0;
434
435 pr_debug("clock: recalc'ing clksel clk %s\n", clk->name);
436
437 div = omap2_clksel_get_divisor(clk);
438 if (div == 0)
439 return;
440
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700441 if (clk->rate == (clk->parent->rate / div))
Paul Walmsley543d9372008-03-18 10:22:06 +0200442 return;
443 clk->rate = clk->parent->rate / div;
444
445 pr_debug("clock: new clock rate is %ld (div %d)\n", clk->rate, div);
Paul Walmsley543d9372008-03-18 10:22:06 +0200446}
447
448/**
449 * omap2_get_clksel_by_parent - return clksel struct for a given clk & parent
450 * @clk: OMAP struct clk ptr to inspect
451 * @src_clk: OMAP struct clk ptr of the parent clk to search for
452 *
453 * Scan the struct clksel array associated with the clock to find
454 * the element associated with the supplied parent clock address.
455 * Returns a pointer to the struct clksel on success or NULL on error.
456 */
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700457static const struct clksel *omap2_get_clksel_by_parent(struct clk *clk,
458 struct clk *src_clk)
Paul Walmsley543d9372008-03-18 10:22:06 +0200459{
460 const struct clksel *clks;
461
462 if (!clk->clksel)
463 return NULL;
464
465 for (clks = clk->clksel; clks->parent; clks++) {
466 if (clks->parent == src_clk)
467 break; /* Found the requested parent */
468 }
469
470 if (!clks->parent) {
471 printk(KERN_ERR "clock: Could not find parent clock %s in "
472 "clksel array of clock %s\n", src_clk->name,
473 clk->name);
474 return NULL;
475 }
476
477 return clks;
478}
479
480/**
481 * omap2_clksel_round_rate_div - find divisor for the given clock and rate
482 * @clk: OMAP struct clk to use
483 * @target_rate: desired clock rate
484 * @new_div: ptr to where we should store the divisor
485 *
486 * Finds 'best' divider value in an array based on the source and target
487 * rates. The divider array must be sorted with smallest divider first.
488 * Note that this will not work for clocks which are part of CONFIG_PARTICIPANT,
489 * they are only settable as part of virtual_prcm set.
490 *
491 * Returns the rounded clock rate or returns 0xffffffff on error.
492 */
493u32 omap2_clksel_round_rate_div(struct clk *clk, unsigned long target_rate,
494 u32 *new_div)
495{
496 unsigned long test_rate;
497 const struct clksel *clks;
498 const struct clksel_rate *clkr;
499 u32 last_div = 0;
500
501 printk(KERN_INFO "clock: clksel_round_rate_div: %s target_rate %ld\n",
502 clk->name, target_rate);
503
504 *new_div = 1;
505
506 clks = omap2_get_clksel_by_parent(clk, clk->parent);
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700507 if (!clks)
Paul Walmsley543d9372008-03-18 10:22:06 +0200508 return ~0;
509
510 for (clkr = clks->rates; clkr->div; clkr++) {
511 if (!(clkr->flags & cpu_mask))
512 continue;
513
514 /* Sanity check */
515 if (clkr->div <= last_div)
516 printk(KERN_ERR "clock: clksel_rate table not sorted "
517 "for clock %s", clk->name);
518
519 last_div = clkr->div;
520
521 test_rate = clk->parent->rate / clkr->div;
522
523 if (test_rate <= target_rate)
524 break; /* found it */
525 }
526
527 if (!clkr->div) {
528 printk(KERN_ERR "clock: Could not find divisor for target "
529 "rate %ld for clock %s parent %s\n", target_rate,
530 clk->name, clk->parent->name);
531 return ~0;
532 }
533
534 *new_div = clkr->div;
535
536 printk(KERN_INFO "clock: new_div = %d, new_rate = %ld\n", *new_div,
537 (clk->parent->rate / clkr->div));
538
539 return (clk->parent->rate / clkr->div);
540}
541
542/**
543 * omap2_clksel_round_rate - find rounded rate for the given clock and rate
544 * @clk: OMAP struct clk to use
545 * @target_rate: desired clock rate
546 *
547 * Compatibility wrapper for OMAP clock framework
548 * Finds best target rate based on the source clock and possible dividers.
549 * rates. The divider array must be sorted with smallest divider first.
550 * Note that this will not work for clocks which are part of CONFIG_PARTICIPANT,
551 * they are only settable as part of virtual_prcm set.
552 *
553 * Returns the rounded clock rate or returns 0xffffffff on error.
554 */
555long omap2_clksel_round_rate(struct clk *clk, unsigned long target_rate)
556{
557 u32 new_div;
558
559 return omap2_clksel_round_rate_div(clk, target_rate, &new_div);
560}
561
562
563/* Given a clock and a rate apply a clock specific rounding function */
564long omap2_clk_round_rate(struct clk *clk, unsigned long rate)
565{
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700566 if (clk->round_rate)
Paul Walmsley543d9372008-03-18 10:22:06 +0200567 return clk->round_rate(clk, rate);
568
569 if (clk->flags & RATE_FIXED)
570 printk(KERN_ERR "clock: generic omap2_clk_round_rate called "
571 "on fixed-rate clock %s\n", clk->name);
572
573 return clk->rate;
574}
575
576/**
577 * omap2_clksel_to_divisor() - turn clksel field value into integer divider
578 * @clk: OMAP struct clk to use
579 * @field_val: register field value to find
580 *
581 * Given a struct clk of a rate-selectable clksel clock, and a register field
582 * value to search for, find the corresponding clock divisor. The register
583 * field value should be pre-masked and shifted down so the LSB is at bit 0
584 * before calling. Returns 0 on error
585 */
586u32 omap2_clksel_to_divisor(struct clk *clk, u32 field_val)
587{
588 const struct clksel *clks;
589 const struct clksel_rate *clkr;
590
591 clks = omap2_get_clksel_by_parent(clk, clk->parent);
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700592 if (!clks)
Paul Walmsley543d9372008-03-18 10:22:06 +0200593 return 0;
594
595 for (clkr = clks->rates; clkr->div; clkr++) {
596 if ((clkr->flags & cpu_mask) && (clkr->val == field_val))
597 break;
598 }
599
600 if (!clkr->div) {
601 printk(KERN_ERR "clock: Could not find fieldval %d for "
602 "clock %s parent %s\n", field_val, clk->name,
603 clk->parent->name);
604 return 0;
605 }
606
607 return clkr->div;
608}
609
610/**
611 * omap2_divisor_to_clksel() - turn clksel integer divisor into a field value
612 * @clk: OMAP struct clk to use
613 * @div: integer divisor to search for
614 *
615 * Given a struct clk of a rate-selectable clksel clock, and a clock divisor,
616 * find the corresponding register field value. The return register value is
617 * the value before left-shifting. Returns 0xffffffff on error
618 */
619u32 omap2_divisor_to_clksel(struct clk *clk, u32 div)
620{
621 const struct clksel *clks;
622 const struct clksel_rate *clkr;
623
624 /* should never happen */
625 WARN_ON(div == 0);
626
627 clks = omap2_get_clksel_by_parent(clk, clk->parent);
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700628 if (!clks)
Paul Walmsley543d9372008-03-18 10:22:06 +0200629 return 0;
630
631 for (clkr = clks->rates; clkr->div; clkr++) {
632 if ((clkr->flags & cpu_mask) && (clkr->div == div))
633 break;
634 }
635
636 if (!clkr->div) {
637 printk(KERN_ERR "clock: Could not find divisor %d for "
638 "clock %s parent %s\n", div, clk->name,
639 clk->parent->name);
640 return 0;
641 }
642
643 return clkr->val;
644}
645
646/**
647 * omap2_get_clksel - find clksel register addr & field mask for a clk
648 * @clk: struct clk to use
649 * @field_mask: ptr to u32 to store the register field mask
650 *
651 * Returns the address of the clksel register upon success or NULL on error.
652 */
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700653static void __iomem *omap2_get_clksel(struct clk *clk, u32 *field_mask)
Paul Walmsley543d9372008-03-18 10:22:06 +0200654{
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700655 if (!clk->clksel_reg || (clk->clksel_mask == 0))
Paul Walmsley543d9372008-03-18 10:22:06 +0200656 return NULL;
657
658 *field_mask = clk->clksel_mask;
659
660 return clk->clksel_reg;
661}
662
663/**
664 * omap2_clksel_get_divisor - get current divider applied to parent clock.
665 * @clk: OMAP struct clk to use.
666 *
667 * Returns the integer divisor upon success or 0 on error.
668 */
669u32 omap2_clksel_get_divisor(struct clk *clk)
670{
671 u32 field_mask, field_val;
672 void __iomem *div_addr;
673
674 div_addr = omap2_get_clksel(clk, &field_mask);
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700675 if (!div_addr)
Paul Walmsley543d9372008-03-18 10:22:06 +0200676 return 0;
677
678 field_val = __raw_readl(div_addr) & field_mask;
679 field_val >>= __ffs(field_mask);
680
681 return omap2_clksel_to_divisor(clk, field_val);
682}
683
684int omap2_clksel_set_rate(struct clk *clk, unsigned long rate)
685{
686 u32 field_mask, field_val, reg_val, validrate, new_div = 0;
687 void __iomem *div_addr;
688
689 validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
690 if (validrate != rate)
691 return -EINVAL;
692
693 div_addr = omap2_get_clksel(clk, &field_mask);
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700694 if (!div_addr)
Paul Walmsley543d9372008-03-18 10:22:06 +0200695 return -EINVAL;
696
697 field_val = omap2_divisor_to_clksel(clk, new_div);
698 if (field_val == ~0)
699 return -EINVAL;
700
701 reg_val = __raw_readl(div_addr);
702 reg_val &= ~field_mask;
703 reg_val |= (field_val << __ffs(field_mask));
704 __raw_writel(reg_val, div_addr);
705 wmb();
706
707 clk->rate = clk->parent->rate / new_div;
708
709 if (clk->flags & DELAYED_APP && cpu_is_omap24xx()) {
Tony Lindgrenc2d43e32008-07-03 12:24:38 +0300710 prm_write_mod_reg(OMAP24XX_VALID_CONFIG,
711 OMAP24XX_GR_MOD, OMAP24XX_PRCM_CLKCFG_CTRL_OFFSET);
Paul Walmsley543d9372008-03-18 10:22:06 +0200712 wmb();
713 }
714
715 return 0;
716}
717
718
719/* Set the clock rate for a clock source */
720int omap2_clk_set_rate(struct clk *clk, unsigned long rate)
721{
722 int ret = -EINVAL;
723
724 pr_debug("clock: set_rate for clock %s to rate %ld\n", clk->name, rate);
725
726 /* CONFIG_PARTICIPANT clocks are changed only in sets via the
727 rate table mechanism, driven by mpu_speed */
728 if (clk->flags & CONFIG_PARTICIPANT)
729 return -EINVAL;
730
731 /* dpll_ck, core_ck, virt_prcm_set; plus all clksel clocks */
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700732 if (clk->set_rate)
Paul Walmsley543d9372008-03-18 10:22:06 +0200733 ret = clk->set_rate(clk, rate);
734
Paul Walmsley543d9372008-03-18 10:22:06 +0200735 return ret;
736}
737
738/*
739 * Converts encoded control register address into a full address
740 * On error, *src_addr will be returned as 0.
741 */
742static u32 omap2_clksel_get_src_field(void __iomem **src_addr,
743 struct clk *src_clk, u32 *field_mask,
744 struct clk *clk, u32 *parent_div)
745{
746 const struct clksel *clks;
747 const struct clksel_rate *clkr;
748
749 *parent_div = 0;
Russell Kingc0fc18c2008-09-05 15:10:27 +0100750 *src_addr = NULL;
Paul Walmsley543d9372008-03-18 10:22:06 +0200751
752 clks = omap2_get_clksel_by_parent(clk, src_clk);
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700753 if (!clks)
Paul Walmsley543d9372008-03-18 10:22:06 +0200754 return 0;
755
756 for (clkr = clks->rates; clkr->div; clkr++) {
757 if (clkr->flags & (cpu_mask | DEFAULT_RATE))
758 break; /* Found the default rate for this platform */
759 }
760
761 if (!clkr->div) {
762 printk(KERN_ERR "clock: Could not find default rate for "
763 "clock %s parent %s\n", clk->name,
764 src_clk->parent->name);
765 return 0;
766 }
767
768 /* Should never happen. Add a clksel mask to the struct clk. */
769 WARN_ON(clk->clksel_mask == 0);
770
771 *field_mask = clk->clksel_mask;
772 *src_addr = clk->clksel_reg;
773 *parent_div = clkr->div;
774
775 return clkr->val;
776}
777
778int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
779{
780 void __iomem *src_addr;
781 u32 field_val, field_mask, reg_val, parent_div;
782
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700783 if (clk->flags & CONFIG_PARTICIPANT)
Paul Walmsley543d9372008-03-18 10:22:06 +0200784 return -EINVAL;
785
786 if (!clk->clksel)
787 return -EINVAL;
788
789 field_val = omap2_clksel_get_src_field(&src_addr, new_parent,
790 &field_mask, clk, &parent_div);
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700791 if (!src_addr)
Paul Walmsley543d9372008-03-18 10:22:06 +0200792 return -EINVAL;
793
794 if (clk->usecount > 0)
795 _omap2_clk_disable(clk);
796
797 /* Set new source value (previous dividers if any in effect) */
798 reg_val = __raw_readl(src_addr) & ~field_mask;
799 reg_val |= (field_val << __ffs(field_mask));
800 __raw_writel(reg_val, src_addr);
801 wmb();
802
803 if (clk->flags & DELAYED_APP && cpu_is_omap24xx()) {
804 __raw_writel(OMAP24XX_VALID_CONFIG, OMAP24XX_PRCM_CLKCFG_CTRL);
805 wmb();
806 }
807
808 if (clk->usecount > 0)
809 _omap2_clk_enable(clk);
810
811 clk->parent = new_parent;
812
813 /* CLKSEL clocks follow their parents' rates, divided by a divisor */
814 clk->rate = new_parent->rate;
815
816 if (parent_div > 0)
817 clk->rate /= parent_div;
818
819 pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
820 clk->name, clk->parent->name, clk->rate);
821
Paul Walmsley543d9372008-03-18 10:22:06 +0200822 return 0;
823}
824
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300825/* DPLL rate rounding code */
826
827/**
828 * omap2_dpll_set_rate_tolerance: set the error tolerance during rate rounding
829 * @clk: struct clk * of the DPLL
830 * @tolerance: maximum rate error tolerance
831 *
832 * Set the maximum DPLL rate error tolerance for the rate rounding
833 * algorithm. The rate tolerance is an attempt to balance DPLL power
834 * saving (the least divider value "n") vs. rate fidelity (the least
835 * difference between the desired DPLL target rate and the rounded
836 * rate out of the algorithm). So, increasing the tolerance is likely
837 * to decrease DPLL power consumption and increase DPLL rate error.
838 * Returns -EINVAL if provided a null clock ptr or a clk that is not a
839 * DPLL; or 0 upon success.
840 */
841int omap2_dpll_set_rate_tolerance(struct clk *clk, unsigned int tolerance)
842{
843 if (!clk || !clk->dpll_data)
844 return -EINVAL;
845
846 clk->dpll_data->rate_tolerance = tolerance;
847
848 return 0;
849}
850
Paul Walmsleyfecb4942009-01-27 19:12:50 -0700851static unsigned long _dpll_compute_new_rate(unsigned long parent_rate,
852 unsigned int m, unsigned int n)
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300853{
854 unsigned long long num;
855
856 num = (unsigned long long)parent_rate * m;
857 do_div(num, n);
858 return num;
859}
860
861/*
862 * _dpll_test_mult - test a DPLL multiplier value
863 * @m: pointer to the DPLL m (multiplier) value under test
864 * @n: current DPLL n (divider) value under test
865 * @new_rate: pointer to storage for the resulting rounded rate
866 * @target_rate: the desired DPLL rate
867 * @parent_rate: the DPLL's parent clock rate
868 *
869 * This code tests a DPLL multiplier value, ensuring that the
870 * resulting rate will not be higher than the target_rate, and that
871 * the multiplier value itself is valid for the DPLL. Initially, the
872 * integer pointed to by the m argument should be prescaled by
873 * multiplying by DPLL_SCALE_FACTOR. The code will replace this with
874 * a non-scaled m upon return. This non-scaled m will result in a
875 * new_rate as close as possible to target_rate (but not greater than
876 * target_rate) given the current (parent_rate, n, prescaled m)
877 * triple. Returns DPLL_MULT_UNDERFLOW in the event that the
878 * non-scaled m attempted to underflow, which can allow the calling
879 * function to bail out early; or 0 upon success.
880 */
881static int _dpll_test_mult(int *m, int n, unsigned long *new_rate,
882 unsigned long target_rate,
883 unsigned long parent_rate)
884{
Paul Walmsley85a5f782009-01-28 12:08:41 -0700885 int r = 0, carry = 0;
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300886
887 /* Unscale m and round if necessary */
888 if (*m % DPLL_SCALE_FACTOR >= DPLL_ROUNDING_VAL)
889 carry = 1;
890 *m = (*m / DPLL_SCALE_FACTOR) + carry;
891
892 /*
893 * The new rate must be <= the target rate to avoid programming
894 * a rate that is impossible for the hardware to handle
895 */
896 *new_rate = _dpll_compute_new_rate(parent_rate, *m, n);
897 if (*new_rate > target_rate) {
898 (*m)--;
899 *new_rate = 0;
900 }
901
902 /* Guard against m underflow */
903 if (*m < DPLL_MIN_MULTIPLIER) {
904 *m = DPLL_MIN_MULTIPLIER;
905 *new_rate = 0;
Paul Walmsley85a5f782009-01-28 12:08:41 -0700906 r = DPLL_MULT_UNDERFLOW;
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300907 }
908
909 if (*new_rate == 0)
910 *new_rate = _dpll_compute_new_rate(parent_rate, *m, n);
911
Paul Walmsley85a5f782009-01-28 12:08:41 -0700912 return r;
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300913}
914
915/**
916 * omap2_dpll_round_rate - round a target rate for an OMAP DPLL
917 * @clk: struct clk * for a DPLL
918 * @target_rate: desired DPLL clock rate
919 *
920 * Given a DPLL, a desired target rate, and a rate tolerance, round
921 * the target rate to a possible, programmable rate for this DPLL.
922 * Rate tolerance is assumed to be set by the caller before this
923 * function is called. Attempts to select the minimum possible n
924 * within the tolerance to reduce power consumption. Stores the
925 * computed (m, n) in the DPLL's dpll_data structure so set_rate()
926 * will not need to call this (expensive) function again. Returns ~0
927 * if the target rate cannot be rounded, either because the rate is
928 * too low or because the rate tolerance is set too tightly; or the
929 * rounded rate upon success.
930 */
931long omap2_dpll_round_rate(struct clk *clk, unsigned long target_rate)
932{
933 int m, n, r, e, scaled_max_m;
934 unsigned long scaled_rt_rp, new_rate;
935 int min_e = -1, min_e_m = -1, min_e_n = -1;
Paul Walmsleyb3245042009-01-28 12:08:38 -0700936 struct dpll_data *dd;
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300937
938 if (!clk || !clk->dpll_data)
939 return ~0;
940
Paul Walmsleyb3245042009-01-28 12:08:38 -0700941 dd = clk->dpll_data;
942
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300943 pr_debug("clock: starting DPLL round_rate for clock %s, target rate "
944 "%ld\n", clk->name, target_rate);
945
946 scaled_rt_rp = target_rate / (clk->parent->rate / DPLL_SCALE_FACTOR);
Paul Walmsleyb3245042009-01-28 12:08:38 -0700947 scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR;
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300948
Paul Walmsleyb3245042009-01-28 12:08:38 -0700949 dd->last_rounded_rate = 0;
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300950
Paul Walmsley95f538a2009-01-28 12:08:44 -0700951 for (n = dd->min_divider; n <= dd->max_divider; n++) {
952
953 /* Is the (input clk, divider) pair valid for the DPLL? */
954 r = _dpll_test_fint(clk, n);
955 if (r == DPLL_FINT_UNDERFLOW)
956 break;
957 else if (r == DPLL_FINT_INVALID)
958 continue;
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300959
960 /* Compute the scaled DPLL multiplier, based on the divider */
961 m = scaled_rt_rp * n;
962
963 /*
Paul Walmsley85a5f782009-01-28 12:08:41 -0700964 * Since we're counting n up, a m overflow means we
965 * can bail out completely (since as n increases in
966 * the next iteration, there's no way that m can
967 * increase beyond the current m)
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300968 */
969 if (m > scaled_max_m)
Paul Walmsley85a5f782009-01-28 12:08:41 -0700970 break;
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300971
972 r = _dpll_test_mult(&m, n, &new_rate, target_rate,
973 clk->parent->rate);
974
Paul Walmsley85a5f782009-01-28 12:08:41 -0700975 /* m can't be set low enough for this n - try with a larger n */
976 if (r == DPLL_MULT_UNDERFLOW)
977 continue;
978
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300979 e = target_rate - new_rate;
980 pr_debug("clock: n = %d: m = %d: rate error is %d "
981 "(new_rate = %ld)\n", n, m, e, new_rate);
982
983 if (min_e == -1 ||
Paul Walmsleyb3245042009-01-28 12:08:38 -0700984 min_e >= (int)(abs(e) - dd->rate_tolerance)) {
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300985 min_e = e;
986 min_e_m = m;
987 min_e_n = n;
988
989 pr_debug("clock: found new least error %d\n", min_e);
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300990
Paul Walmsley85a5f782009-01-28 12:08:41 -0700991 /* We found good settings -- bail out now */
Paul Walmsley95f538a2009-01-28 12:08:44 -0700992 if (min_e <= dd->rate_tolerance)
Paul Walmsley85a5f782009-01-28 12:08:41 -0700993 break;
994 }
Paul Walmsley88b8ba92008-07-03 12:24:46 +0300995 }
996
997 if (min_e < 0) {
998 pr_debug("clock: error: target rate or tolerance too low\n");
999 return ~0;
1000 }
1001
Paul Walmsleyb3245042009-01-28 12:08:38 -07001002 dd->last_rounded_m = min_e_m;
1003 dd->last_rounded_n = min_e_n;
1004 dd->last_rounded_rate = _dpll_compute_new_rate(clk->parent->rate,
1005 min_e_m, min_e_n);
Paul Walmsley88b8ba92008-07-03 12:24:46 +03001006
1007 pr_debug("clock: final least error: e = %d, m = %d, n = %d\n",
1008 min_e, min_e_m, min_e_n);
1009 pr_debug("clock: final rate: %ld (target rate: %ld)\n",
Paul Walmsleyb3245042009-01-28 12:08:38 -07001010 dd->last_rounded_rate, target_rate);
Paul Walmsley88b8ba92008-07-03 12:24:46 +03001011
Paul Walmsleyb3245042009-01-28 12:08:38 -07001012 return dd->last_rounded_rate;
Paul Walmsley88b8ba92008-07-03 12:24:46 +03001013}
1014
Paul Walmsley543d9372008-03-18 10:22:06 +02001015/*-------------------------------------------------------------------------
1016 * Omap2 clock reset and init functions
1017 *-------------------------------------------------------------------------*/
1018
1019#ifdef CONFIG_OMAP_RESET_CLOCKS
1020void omap2_clk_disable_unused(struct clk *clk)
1021{
1022 u32 regval32, v;
1023
1024 v = (clk->flags & INVERT_ENABLE) ? (1 << clk->enable_bit) : 0;
1025
1026 regval32 = __raw_readl(clk->enable_reg);
1027 if ((regval32 & (1 << clk->enable_bit)) == v)
1028 return;
1029
1030 printk(KERN_INFO "Disabling unused clock \"%s\"\n", clk->name);
1031 _omap2_clk_disable(clk);
1032}
1033#endif