Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 1 | /* |
| 2 | * OMAP3-specific clock framework functions |
| 3 | * |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 4 | * Copyright (C) 2007-2008 Texas Instruments, Inc. |
Paul Walmsley | 3c82e22 | 2009-07-24 19:44:06 -0600 | [diff] [blame] | 5 | * Copyright (C) 2007-2009 Nokia Corporation |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 6 | * |
| 7 | * Written by Paul Walmsley |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 8 | * Testing and integration fixes by Jouni Högander |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 9 | * |
| 10 | * Parts of this code are based on code written by |
| 11 | * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu |
| 12 | * |
| 13 | * This program is free software; you can redistribute it and/or modify |
| 14 | * it under the terms of the GNU General Public License version 2 as |
| 15 | * published by the Free Software Foundation. |
| 16 | */ |
| 17 | #undef DEBUG |
| 18 | |
| 19 | #include <linux/module.h> |
| 20 | #include <linux/kernel.h> |
| 21 | #include <linux/device.h> |
| 22 | #include <linux/list.h> |
| 23 | #include <linux/errno.h> |
| 24 | #include <linux/delay.h> |
| 25 | #include <linux/clk.h> |
| 26 | #include <linux/io.h> |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 27 | #include <linux/limits.h> |
Russell King | fbd3bdb | 2008-09-06 12:13:59 +0100 | [diff] [blame] | 28 | #include <linux/bitops.h> |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 29 | |
Tony Lindgren | ce491cf | 2009-10-20 09:40:47 -0700 | [diff] [blame] | 30 | #include <plat/cpu.h> |
| 31 | #include <plat/clock.h> |
| 32 | #include <plat/sram.h> |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 33 | #include <plat/sdrc.h> |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 34 | #include <asm/div64.h> |
Russell King | 44dc9d0 | 2009-01-19 15:51:11 +0000 | [diff] [blame] | 35 | #include <asm/clkdev.h> |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 36 | |
Tony Lindgren | ce491cf | 2009-10-20 09:40:47 -0700 | [diff] [blame] | 37 | #include <plat/sdrc.h> |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 38 | #include "clock.h" |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 39 | #include "clock34xx.h" |
| 40 | #include "sdrc.h" |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 41 | #include "prm.h" |
| 42 | #include "prm-regbits-34xx.h" |
| 43 | #include "cm.h" |
| 44 | #include "cm-regbits-34xx.h" |
| 45 | |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 46 | /* CM_AUTOIDLE_PLL*.AUTO_* bit values */ |
| 47 | #define DPLL_AUTOIDLE_DISABLE 0x0 |
| 48 | #define DPLL_AUTOIDLE_LOW_POWER_STOP 0x1 |
| 49 | |
| 50 | #define MAX_DPLL_WAIT_TRIES 1000000 |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 51 | |
Paul Walmsley | c9812d0 | 2009-06-19 19:08:26 -0600 | [diff] [blame] | 52 | #define CYCLES_PER_MHZ 1000000 |
| 53 | |
Rajendra Nayak | 7a66a39 | 2009-10-05 13:31:44 -0700 | [diff] [blame] | 54 | /* |
| 55 | * DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks |
| 56 | * that are sourced by DPLL5, and both of these require this clock |
| 57 | * to be at 120 MHz for proper operation. |
| 58 | */ |
| 59 | #define DPLL5_FREQ_FOR_USBHOST 120000000 |
| 60 | |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 61 | /* needed by omap3_core_dpll_m2_set_rate() */ |
| 62 | struct clk *sdrc_ick_p, *arm_fck_p; |
| 63 | |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 64 | /** |
Paul Walmsley | 3c82e22 | 2009-07-24 19:44:06 -0600 | [diff] [blame] | 65 | * omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI |
| 66 | * @clk: struct clk * being enabled |
| 67 | * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into |
| 68 | * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into |
| 69 | * |
| 70 | * The OMAP3430ES2 SSI target CM_IDLEST bit is at a different shift |
| 71 | * from the CM_{I,F}CLKEN bit. Pass back the correct info via |
| 72 | * @idlest_reg and @idlest_bit. No return value. |
| 73 | */ |
| 74 | static void omap3430es2_clk_ssi_find_idlest(struct clk *clk, |
| 75 | void __iomem **idlest_reg, |
| 76 | u8 *idlest_bit) |
| 77 | { |
| 78 | u32 r; |
| 79 | |
| 80 | r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); |
| 81 | *idlest_reg = (__force void __iomem *)r; |
| 82 | *idlest_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT; |
| 83 | } |
| 84 | |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 85 | const struct clkops clkops_omap3430es2_ssi_wait = { |
| 86 | .enable = omap2_dflt_clk_enable, |
| 87 | .disable = omap2_dflt_clk_disable, |
| 88 | .find_idlest = omap3430es2_clk_ssi_find_idlest, |
| 89 | .find_companion = omap2_clk_dflt_find_companion, |
| 90 | }; |
| 91 | |
Paul Walmsley | 3c82e22 | 2009-07-24 19:44:06 -0600 | [diff] [blame] | 92 | /** |
| 93 | * omap3430es2_clk_dss_usbhost_find_idlest - CM_IDLEST info for DSS, USBHOST |
| 94 | * @clk: struct clk * being enabled |
| 95 | * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into |
| 96 | * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into |
| 97 | * |
| 98 | * Some OMAP modules on OMAP3 ES2+ chips have both initiator and |
| 99 | * target IDLEST bits. For our purposes, we are concerned with the |
| 100 | * target IDLEST bits, which exist at a different bit position than |
| 101 | * the *CLKEN bit position for these modules (DSS and USBHOST) (The |
| 102 | * default find_idlest code assumes that they are at the same |
| 103 | * position.) No return value. |
| 104 | */ |
| 105 | static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk *clk, |
| 106 | void __iomem **idlest_reg, |
| 107 | u8 *idlest_bit) |
| 108 | { |
| 109 | u32 r; |
| 110 | |
| 111 | r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); |
| 112 | *idlest_reg = (__force void __iomem *)r; |
| 113 | /* USBHOST_IDLE has same shift */ |
| 114 | *idlest_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT; |
| 115 | } |
| 116 | |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 117 | const struct clkops clkops_omap3430es2_dss_usbhost_wait = { |
| 118 | .enable = omap2_dflt_clk_enable, |
| 119 | .disable = omap2_dflt_clk_disable, |
| 120 | .find_idlest = omap3430es2_clk_dss_usbhost_find_idlest, |
| 121 | .find_companion = omap2_clk_dflt_find_companion, |
| 122 | }; |
| 123 | |
Paul Walmsley | 3c82e22 | 2009-07-24 19:44:06 -0600 | [diff] [blame] | 124 | /** |
| 125 | * omap3430es2_clk_hsotgusb_find_idlest - return CM_IDLEST info for HSOTGUSB |
| 126 | * @clk: struct clk * being enabled |
| 127 | * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into |
| 128 | * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into |
| 129 | * |
| 130 | * The OMAP3430ES2 HSOTGUSB target CM_IDLEST bit is at a different |
| 131 | * shift from the CM_{I,F}CLKEN bit. Pass back the correct info via |
| 132 | * @idlest_reg and @idlest_bit. No return value. |
| 133 | */ |
| 134 | static void omap3430es2_clk_hsotgusb_find_idlest(struct clk *clk, |
| 135 | void __iomem **idlest_reg, |
| 136 | u8 *idlest_bit) |
| 137 | { |
| 138 | u32 r; |
| 139 | |
| 140 | r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); |
| 141 | *idlest_reg = (__force void __iomem *)r; |
| 142 | *idlest_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT; |
| 143 | } |
| 144 | |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 145 | const struct clkops clkops_omap3430es2_hsotgusb_wait = { |
| 146 | .enable = omap2_dflt_clk_enable, |
| 147 | .disable = omap2_dflt_clk_disable, |
| 148 | .find_idlest = omap3430es2_clk_hsotgusb_find_idlest, |
| 149 | .find_companion = omap2_clk_dflt_find_companion, |
| 150 | }; |
| 151 | |
Paul Walmsley | 3c82e22 | 2009-07-24 19:44:06 -0600 | [diff] [blame] | 152 | /** |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 153 | * omap3_dpll_recalc - recalculate DPLL rate |
| 154 | * @clk: DPLL struct clk |
| 155 | * |
| 156 | * Recalculate and propagate the DPLL rate. |
| 157 | */ |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 158 | unsigned long omap3_dpll_recalc(struct clk *clk) |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 159 | { |
Russell King | 8b9dbc1 | 2009-02-12 10:12:59 +0000 | [diff] [blame] | 160 | return omap2_get_dpll_rate(clk); |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 161 | } |
| 162 | |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 163 | /* _omap3_dpll_write_clken - write clken_bits arg to a DPLL's enable bits */ |
| 164 | static void _omap3_dpll_write_clken(struct clk *clk, u8 clken_bits) |
| 165 | { |
| 166 | const struct dpll_data *dd; |
Paul Walmsley | ad67ef6 | 2008-08-19 11:08:40 +0300 | [diff] [blame] | 167 | u32 v; |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 168 | |
| 169 | dd = clk->dpll_data; |
| 170 | |
Paul Walmsley | ad67ef6 | 2008-08-19 11:08:40 +0300 | [diff] [blame] | 171 | v = __raw_readl(dd->control_reg); |
| 172 | v &= ~dd->enable_mask; |
| 173 | v |= clken_bits << __ffs(dd->enable_mask); |
| 174 | __raw_writel(v, dd->control_reg); |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | /* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */ |
| 178 | static int _omap3_wait_dpll_status(struct clk *clk, u8 state) |
| 179 | { |
| 180 | const struct dpll_data *dd; |
| 181 | int i = 0; |
| 182 | int ret = -EINVAL; |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 183 | |
| 184 | dd = clk->dpll_data; |
| 185 | |
Paul Walmsley | c1bd7aa | 2009-01-28 12:08:17 -0700 | [diff] [blame] | 186 | state <<= __ffs(dd->idlest_mask); |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 187 | |
Paul Walmsley | c1bd7aa | 2009-01-28 12:08:17 -0700 | [diff] [blame] | 188 | while (((__raw_readl(dd->idlest_reg) & dd->idlest_mask) != state) && |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 189 | i < MAX_DPLL_WAIT_TRIES) { |
| 190 | i++; |
| 191 | udelay(1); |
| 192 | } |
| 193 | |
| 194 | if (i == MAX_DPLL_WAIT_TRIES) { |
| 195 | printk(KERN_ERR "clock: %s failed transition to '%s'\n", |
| 196 | clk->name, (state) ? "locked" : "bypassed"); |
| 197 | } else { |
| 198 | pr_debug("clock: %s transition to '%s' in %d loops\n", |
| 199 | clk->name, (state) ? "locked" : "bypassed", i); |
| 200 | |
| 201 | ret = 0; |
| 202 | } |
| 203 | |
| 204 | return ret; |
| 205 | } |
| 206 | |
Paul Walmsley | 16c90f0 | 2009-01-27 19:12:47 -0700 | [diff] [blame] | 207 | /* From 3430 TRM ES2 4.7.6.2 */ |
| 208 | static u16 _omap3_dpll_compute_freqsel(struct clk *clk, u8 n) |
| 209 | { |
| 210 | unsigned long fint; |
| 211 | u16 f = 0; |
| 212 | |
Rajendra Nayak | 9346f48 | 2009-11-16 13:36:53 +0000 | [diff] [blame] | 213 | fint = clk->dpll_data->clk_ref->rate / n; |
Paul Walmsley | 16c90f0 | 2009-01-27 19:12:47 -0700 | [diff] [blame] | 214 | |
| 215 | pr_debug("clock: fint is %lu\n", fint); |
| 216 | |
| 217 | if (fint >= 750000 && fint <= 1000000) |
| 218 | f = 0x3; |
| 219 | else if (fint > 1000000 && fint <= 1250000) |
| 220 | f = 0x4; |
| 221 | else if (fint > 1250000 && fint <= 1500000) |
| 222 | f = 0x5; |
| 223 | else if (fint > 1500000 && fint <= 1750000) |
| 224 | f = 0x6; |
| 225 | else if (fint > 1750000 && fint <= 2100000) |
| 226 | f = 0x7; |
| 227 | else if (fint > 7500000 && fint <= 10000000) |
| 228 | f = 0xB; |
| 229 | else if (fint > 10000000 && fint <= 12500000) |
| 230 | f = 0xC; |
| 231 | else if (fint > 12500000 && fint <= 15000000) |
| 232 | f = 0xD; |
| 233 | else if (fint > 15000000 && fint <= 17500000) |
| 234 | f = 0xE; |
| 235 | else if (fint > 17500000 && fint <= 21000000) |
| 236 | f = 0xF; |
| 237 | else |
| 238 | pr_debug("clock: unknown freqsel setting for %d\n", n); |
| 239 | |
| 240 | return f; |
| 241 | } |
| 242 | |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 243 | /* Non-CORE DPLL (e.g., DPLLs that do not control SDRC) clock functions */ |
| 244 | |
| 245 | /* |
| 246 | * _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness |
| 247 | * @clk: pointer to a DPLL struct clk |
| 248 | * |
| 249 | * Instructs a non-CORE DPLL to lock. Waits for the DPLL to report |
| 250 | * readiness before returning. Will save and restore the DPLL's |
| 251 | * autoidle state across the enable, per the CDP code. If the DPLL |
| 252 | * locked successfully, return 0; if the DPLL did not lock in the time |
| 253 | * allotted, or DPLL3 was passed in, return -EINVAL. |
| 254 | */ |
| 255 | static int _omap3_noncore_dpll_lock(struct clk *clk) |
| 256 | { |
| 257 | u8 ai; |
| 258 | int r; |
| 259 | |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 260 | pr_debug("clock: locking DPLL %s\n", clk->name); |
| 261 | |
| 262 | ai = omap3_dpll_autoidle_read(clk); |
| 263 | |
Paul Walmsley | 416db86 | 2009-01-28 12:08:46 -0700 | [diff] [blame] | 264 | omap3_dpll_deny_idle(clk); |
| 265 | |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 266 | _omap3_dpll_write_clken(clk, DPLL_LOCKED); |
| 267 | |
Paul Walmsley | 416db86 | 2009-01-28 12:08:46 -0700 | [diff] [blame] | 268 | r = _omap3_wait_dpll_status(clk, 1); |
| 269 | |
| 270 | if (ai) |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 271 | omap3_dpll_allow_idle(clk); |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 272 | |
| 273 | return r; |
| 274 | } |
| 275 | |
| 276 | /* |
Russell King | c0bf313 | 2009-02-19 13:29:22 +0000 | [diff] [blame] | 277 | * _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 278 | * @clk: pointer to a DPLL struct clk |
| 279 | * |
| 280 | * Instructs a non-CORE DPLL to enter low-power bypass mode. In |
| 281 | * bypass mode, the DPLL's rate is set equal to its parent clock's |
| 282 | * rate. Waits for the DPLL to report readiness before returning. |
| 283 | * Will save and restore the DPLL's autoidle state across the enable, |
| 284 | * per the CDP code. If the DPLL entered bypass mode successfully, |
| 285 | * return 0; if the DPLL did not enter bypass in the time allotted, or |
| 286 | * DPLL3 was passed in, or the DPLL does not support low-power bypass, |
| 287 | * return -EINVAL. |
| 288 | */ |
| 289 | static int _omap3_noncore_dpll_bypass(struct clk *clk) |
| 290 | { |
| 291 | int r; |
| 292 | u8 ai; |
| 293 | |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 294 | if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS))) |
| 295 | return -EINVAL; |
| 296 | |
| 297 | pr_debug("clock: configuring DPLL %s for low-power bypass\n", |
| 298 | clk->name); |
| 299 | |
| 300 | ai = omap3_dpll_autoidle_read(clk); |
| 301 | |
| 302 | _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS); |
| 303 | |
| 304 | r = _omap3_wait_dpll_status(clk, 0); |
| 305 | |
| 306 | if (ai) |
| 307 | omap3_dpll_allow_idle(clk); |
| 308 | else |
| 309 | omap3_dpll_deny_idle(clk); |
| 310 | |
| 311 | return r; |
| 312 | } |
| 313 | |
| 314 | /* |
| 315 | * _omap3_noncore_dpll_stop - instruct a DPLL to stop |
| 316 | * @clk: pointer to a DPLL struct clk |
| 317 | * |
| 318 | * Instructs a non-CORE DPLL to enter low-power stop. Will save and |
| 319 | * restore the DPLL's autoidle state across the stop, per the CDP |
| 320 | * code. If DPLL3 was passed in, or the DPLL does not support |
| 321 | * low-power stop, return -EINVAL; otherwise, return 0. |
| 322 | */ |
| 323 | static int _omap3_noncore_dpll_stop(struct clk *clk) |
| 324 | { |
| 325 | u8 ai; |
| 326 | |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 327 | if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP))) |
| 328 | return -EINVAL; |
| 329 | |
| 330 | pr_debug("clock: stopping DPLL %s\n", clk->name); |
| 331 | |
| 332 | ai = omap3_dpll_autoidle_read(clk); |
| 333 | |
| 334 | _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP); |
| 335 | |
| 336 | if (ai) |
| 337 | omap3_dpll_allow_idle(clk); |
| 338 | else |
| 339 | omap3_dpll_deny_idle(clk); |
| 340 | |
| 341 | return 0; |
| 342 | } |
| 343 | |
| 344 | /** |
| 345 | * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode |
| 346 | * @clk: pointer to a DPLL struct clk |
| 347 | * |
| 348 | * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock. |
| 349 | * The choice of modes depends on the DPLL's programmed rate: if it is |
| 350 | * the same as the DPLL's parent clock, it will enter bypass; |
| 351 | * otherwise, it will enter lock. This code will wait for the DPLL to |
| 352 | * indicate readiness before returning, unless the DPLL takes too long |
| 353 | * to enter the target state. Intended to be used as the struct clk's |
| 354 | * enable function. If DPLL3 was passed in, or the DPLL does not |
| 355 | * support low-power stop, or if the DPLL took too long to enter |
| 356 | * bypass or lock, return -EINVAL; otherwise, return 0. |
| 357 | */ |
| 358 | static int omap3_noncore_dpll_enable(struct clk *clk) |
| 359 | { |
| 360 | int r; |
Russell King | c0bf313 | 2009-02-19 13:29:22 +0000 | [diff] [blame] | 361 | struct dpll_data *dd; |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 362 | |
Russell King | c0bf313 | 2009-02-19 13:29:22 +0000 | [diff] [blame] | 363 | dd = clk->dpll_data; |
| 364 | if (!dd) |
| 365 | return -EINVAL; |
| 366 | |
| 367 | if (clk->rate == dd->clk_bypass->rate) { |
| 368 | WARN_ON(clk->parent != dd->clk_bypass); |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 369 | r = _omap3_noncore_dpll_bypass(clk); |
Russell King | c0bf313 | 2009-02-19 13:29:22 +0000 | [diff] [blame] | 370 | } else { |
| 371 | WARN_ON(clk->parent != dd->clk_ref); |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 372 | r = _omap3_noncore_dpll_lock(clk); |
Russell King | c0bf313 | 2009-02-19 13:29:22 +0000 | [diff] [blame] | 373 | } |
| 374 | /* FIXME: this is dubious - if clk->rate has changed, what about propagating? */ |
| 375 | if (!r) |
| 376 | clk->rate = omap2_get_dpll_rate(clk); |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 377 | |
| 378 | return r; |
| 379 | } |
| 380 | |
| 381 | /** |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 382 | * omap3_noncore_dpll_disable - instruct a DPLL to enter low-power stop |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 383 | * @clk: pointer to a DPLL struct clk |
| 384 | * |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 385 | * Instructs a non-CORE DPLL to enter low-power stop. This function is |
| 386 | * intended for use in struct clkops. No return value. |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 387 | */ |
| 388 | static void omap3_noncore_dpll_disable(struct clk *clk) |
| 389 | { |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 390 | _omap3_noncore_dpll_stop(clk); |
| 391 | } |
| 392 | |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 393 | const struct clkops clkops_noncore_dpll_ops = { |
| 394 | .enable = omap3_noncore_dpll_enable, |
| 395 | .disable = omap3_noncore_dpll_disable, |
| 396 | }; |
Paul Walmsley | 16c90f0 | 2009-01-27 19:12:47 -0700 | [diff] [blame] | 397 | |
| 398 | /* Non-CORE DPLL rate set code */ |
| 399 | |
| 400 | /* |
| 401 | * omap3_noncore_dpll_program - set non-core DPLL M,N values directly |
| 402 | * @clk: struct clk * of DPLL to set |
| 403 | * @m: DPLL multiplier to set |
| 404 | * @n: DPLL divider to set |
| 405 | * @freqsel: FREQSEL value to set |
| 406 | * |
| 407 | * Program the DPLL with the supplied M, N values, and wait for the DPLL to |
| 408 | * lock.. Returns -EINVAL upon error, or 0 upon success. |
| 409 | */ |
| 410 | static int omap3_noncore_dpll_program(struct clk *clk, u16 m, u8 n, u16 freqsel) |
| 411 | { |
| 412 | struct dpll_data *dd = clk->dpll_data; |
| 413 | u32 v; |
| 414 | |
| 415 | /* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */ |
| 416 | _omap3_noncore_dpll_bypass(clk); |
| 417 | |
Paul Walmsley | f0587b6 | 2009-01-28 12:08:11 -0700 | [diff] [blame] | 418 | /* Set jitter correction */ |
| 419 | v = __raw_readl(dd->control_reg); |
| 420 | v &= ~dd->freqsel_mask; |
| 421 | v |= freqsel << __ffs(dd->freqsel_mask); |
| 422 | __raw_writel(v, dd->control_reg); |
| 423 | |
| 424 | /* Set DPLL multiplier, divider */ |
Paul Walmsley | 16c90f0 | 2009-01-27 19:12:47 -0700 | [diff] [blame] | 425 | v = __raw_readl(dd->mult_div1_reg); |
| 426 | v &= ~(dd->mult_mask | dd->div1_mask); |
Paul Walmsley | 16c90f0 | 2009-01-27 19:12:47 -0700 | [diff] [blame] | 427 | v |= m << __ffs(dd->mult_mask); |
Paul Walmsley | f0587b6 | 2009-01-28 12:08:11 -0700 | [diff] [blame] | 428 | v |= (n - 1) << __ffs(dd->div1_mask); |
Paul Walmsley | 16c90f0 | 2009-01-27 19:12:47 -0700 | [diff] [blame] | 429 | __raw_writel(v, dd->mult_div1_reg); |
| 430 | |
| 431 | /* We let the clock framework set the other output dividers later */ |
| 432 | |
| 433 | /* REVISIT: Set ramp-up delay? */ |
| 434 | |
| 435 | _omap3_noncore_dpll_lock(clk); |
| 436 | |
| 437 | return 0; |
| 438 | } |
| 439 | |
| 440 | /** |
| 441 | * omap3_noncore_dpll_set_rate - set non-core DPLL rate |
| 442 | * @clk: struct clk * of DPLL to set |
| 443 | * @rate: rounded target rate |
| 444 | * |
Russell King | c0bf313 | 2009-02-19 13:29:22 +0000 | [diff] [blame] | 445 | * Set the DPLL CLKOUT to the target rate. If the DPLL can enter |
| 446 | * low-power bypass, and the target rate is the bypass source clock |
| 447 | * rate, then configure the DPLL for bypass. Otherwise, round the |
| 448 | * target rate if it hasn't been done already, then program and lock |
| 449 | * the DPLL. Returns -EINVAL upon error, or 0 upon success. |
Paul Walmsley | 16c90f0 | 2009-01-27 19:12:47 -0700 | [diff] [blame] | 450 | */ |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 451 | int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate) |
Paul Walmsley | 16c90f0 | 2009-01-27 19:12:47 -0700 | [diff] [blame] | 452 | { |
Russell King | c0bf313 | 2009-02-19 13:29:22 +0000 | [diff] [blame] | 453 | struct clk *new_parent = NULL; |
Paul Walmsley | 16c90f0 | 2009-01-27 19:12:47 -0700 | [diff] [blame] | 454 | u16 freqsel; |
| 455 | struct dpll_data *dd; |
Russell King | c0bf313 | 2009-02-19 13:29:22 +0000 | [diff] [blame] | 456 | int ret; |
Paul Walmsley | 16c90f0 | 2009-01-27 19:12:47 -0700 | [diff] [blame] | 457 | |
| 458 | if (!clk || !rate) |
| 459 | return -EINVAL; |
| 460 | |
| 461 | dd = clk->dpll_data; |
| 462 | if (!dd) |
| 463 | return -EINVAL; |
| 464 | |
| 465 | if (rate == omap2_get_dpll_rate(clk)) |
| 466 | return 0; |
| 467 | |
Russell King | c0bf313 | 2009-02-19 13:29:22 +0000 | [diff] [blame] | 468 | /* |
| 469 | * Ensure both the bypass and ref clocks are enabled prior to |
| 470 | * doing anything; we need the bypass clock running to reprogram |
| 471 | * the DPLL. |
| 472 | */ |
| 473 | omap2_clk_enable(dd->clk_bypass); |
| 474 | omap2_clk_enable(dd->clk_ref); |
Paul Walmsley | 16c90f0 | 2009-01-27 19:12:47 -0700 | [diff] [blame] | 475 | |
Russell King | c0bf313 | 2009-02-19 13:29:22 +0000 | [diff] [blame] | 476 | if (dd->clk_bypass->rate == rate && |
| 477 | (clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS))) { |
| 478 | pr_debug("clock: %s: set rate: entering bypass.\n", clk->name); |
Paul Walmsley | 16c90f0 | 2009-01-27 19:12:47 -0700 | [diff] [blame] | 479 | |
Russell King | c0bf313 | 2009-02-19 13:29:22 +0000 | [diff] [blame] | 480 | ret = _omap3_noncore_dpll_bypass(clk); |
| 481 | if (!ret) |
| 482 | new_parent = dd->clk_bypass; |
| 483 | } else { |
| 484 | if (dd->last_rounded_rate != rate) |
| 485 | omap2_dpll_round_rate(clk, rate); |
Paul Walmsley | 16c90f0 | 2009-01-27 19:12:47 -0700 | [diff] [blame] | 486 | |
Russell King | c0bf313 | 2009-02-19 13:29:22 +0000 | [diff] [blame] | 487 | if (dd->last_rounded_rate == 0) |
| 488 | return -EINVAL; |
| 489 | |
| 490 | freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n); |
| 491 | if (!freqsel) |
| 492 | WARN_ON(1); |
| 493 | |
| 494 | pr_debug("clock: %s: set rate: locking rate to %lu.\n", |
| 495 | clk->name, rate); |
| 496 | |
| 497 | ret = omap3_noncore_dpll_program(clk, dd->last_rounded_m, |
| 498 | dd->last_rounded_n, freqsel); |
| 499 | if (!ret) |
| 500 | new_parent = dd->clk_ref; |
| 501 | } |
| 502 | if (!ret) { |
| 503 | /* |
| 504 | * Switch the parent clock in the heirarchy, and make sure |
| 505 | * that the new parent's usecount is correct. Note: we |
| 506 | * enable the new parent before disabling the old to avoid |
| 507 | * any unnecessary hardware disable->enable transitions. |
| 508 | */ |
| 509 | if (clk->usecount) { |
| 510 | omap2_clk_enable(new_parent); |
| 511 | omap2_clk_disable(clk->parent); |
| 512 | } |
| 513 | clk_reparent(clk, new_parent); |
| 514 | clk->rate = rate; |
| 515 | } |
| 516 | omap2_clk_disable(dd->clk_ref); |
| 517 | omap2_clk_disable(dd->clk_bypass); |
Paul Walmsley | 16c90f0 | 2009-01-27 19:12:47 -0700 | [diff] [blame] | 518 | |
Paul Walmsley | 16c90f0 | 2009-01-27 19:12:47 -0700 | [diff] [blame] | 519 | return 0; |
| 520 | } |
| 521 | |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 522 | int omap3_dpll4_set_rate(struct clk *clk, unsigned long rate) |
Paul Walmsley | 16c90f0 | 2009-01-27 19:12:47 -0700 | [diff] [blame] | 523 | { |
| 524 | /* |
| 525 | * According to the 12-5 CDP code from TI, "Limitation 2.5" |
| 526 | * on 3430ES1 prevents us from changing DPLL multipliers or dividers |
| 527 | * on DPLL4. |
| 528 | */ |
| 529 | if (omap_rev() == OMAP3430_REV_ES1_0) { |
| 530 | printk(KERN_ERR "clock: DPLL4 cannot change rate due to " |
| 531 | "silicon 'Limitation 2.5' on 3430ES1.\n"); |
| 532 | return -EINVAL; |
| 533 | } |
| 534 | return omap3_noncore_dpll_set_rate(clk, rate); |
| 535 | } |
| 536 | |
Paul Walmsley | 0eafd47 | 2009-01-28 12:27:42 -0700 | [diff] [blame] | 537 | |
| 538 | /* |
| 539 | * CORE DPLL (DPLL3) rate programming functions |
| 540 | * |
| 541 | * These call into SRAM code to do the actual CM writes, since the SDRAM |
| 542 | * is clocked from DPLL3. |
| 543 | */ |
| 544 | |
| 545 | /** |
| 546 | * omap3_core_dpll_m2_set_rate - set CORE DPLL M2 divider |
| 547 | * @clk: struct clk * of DPLL to set |
| 548 | * @rate: rounded target rate |
| 549 | * |
| 550 | * Program the DPLL M2 divider with the rounded target rate. Returns |
| 551 | * -EINVAL upon error, or 0 upon success. |
| 552 | */ |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 553 | int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate) |
Paul Walmsley | 0eafd47 | 2009-01-28 12:27:42 -0700 | [diff] [blame] | 554 | { |
| 555 | u32 new_div = 0; |
Paul Walmsley | 4519c2b | 2009-05-12 17:26:32 -0600 | [diff] [blame] | 556 | u32 unlock_dll = 0; |
Paul Walmsley | c9812d0 | 2009-06-19 19:08:26 -0600 | [diff] [blame] | 557 | u32 c; |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 558 | unsigned long validrate, sdrcrate, _mpurate; |
Jean Pihet | 58cda88 | 2009-07-24 19:43:25 -0600 | [diff] [blame] | 559 | struct omap_sdrc_params *sdrc_cs0; |
| 560 | struct omap_sdrc_params *sdrc_cs1; |
| 561 | int ret; |
Paul Walmsley | 0eafd47 | 2009-01-28 12:27:42 -0700 | [diff] [blame] | 562 | |
| 563 | if (!clk || !rate) |
| 564 | return -EINVAL; |
| 565 | |
Paul Walmsley | 0eafd47 | 2009-01-28 12:27:42 -0700 | [diff] [blame] | 566 | validrate = omap2_clksel_round_rate_div(clk, rate, &new_div); |
| 567 | if (validrate != rate) |
| 568 | return -EINVAL; |
| 569 | |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 570 | sdrcrate = sdrc_ick_p->rate; |
Paul Walmsley | 0eafd47 | 2009-01-28 12:27:42 -0700 | [diff] [blame] | 571 | if (rate > clk->rate) |
Tero Kristo | 3afec633 | 2009-06-19 19:08:29 -0600 | [diff] [blame] | 572 | sdrcrate <<= ((rate / clk->rate) >> 1); |
Paul Walmsley | 0eafd47 | 2009-01-28 12:27:42 -0700 | [diff] [blame] | 573 | else |
Tero Kristo | 3afec633 | 2009-06-19 19:08:29 -0600 | [diff] [blame] | 574 | sdrcrate >>= ((clk->rate / rate) >> 1); |
Paul Walmsley | 0eafd47 | 2009-01-28 12:27:42 -0700 | [diff] [blame] | 575 | |
Jean Pihet | 58cda88 | 2009-07-24 19:43:25 -0600 | [diff] [blame] | 576 | ret = omap2_sdrc_get_params(sdrcrate, &sdrc_cs0, &sdrc_cs1); |
| 577 | if (ret) |
Paul Walmsley | 0eafd47 | 2009-01-28 12:27:42 -0700 | [diff] [blame] | 578 | return -EINVAL; |
| 579 | |
Paul Walmsley | 4519c2b | 2009-05-12 17:26:32 -0600 | [diff] [blame] | 580 | if (sdrcrate < MIN_SDRC_DLL_LOCK_FREQ) { |
| 581 | pr_debug("clock: will unlock SDRC DLL\n"); |
| 582 | unlock_dll = 1; |
| 583 | } |
| 584 | |
Paul Walmsley | c9812d0 | 2009-06-19 19:08:26 -0600 | [diff] [blame] | 585 | /* |
| 586 | * XXX This only needs to be done when the CPU frequency changes |
| 587 | */ |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 588 | _mpurate = arm_fck_p->rate / CYCLES_PER_MHZ; |
| 589 | c = (_mpurate << SDRC_MPURATE_SCALE) >> SDRC_MPURATE_BASE_SHIFT; |
Paul Walmsley | c9812d0 | 2009-06-19 19:08:26 -0600 | [diff] [blame] | 590 | c += 1; /* for safety */ |
| 591 | c *= SDRC_MPURATE_LOOPS; |
| 592 | c >>= SDRC_MPURATE_SCALE; |
| 593 | if (c == 0) |
| 594 | c = 1; |
| 595 | |
Paul Walmsley | b7aee4b | 2009-05-12 17:27:10 -0600 | [diff] [blame] | 596 | pr_debug("clock: changing CORE DPLL rate from %lu to %lu\n", clk->rate, |
| 597 | validrate); |
Jean Pihet | 58cda88 | 2009-07-24 19:43:25 -0600 | [diff] [blame] | 598 | pr_debug("clock: SDRC CS0 timing params used:" |
| 599 | " RFR %08x CTRLA %08x CTRLB %08x MR %08x\n", |
| 600 | sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla, |
| 601 | sdrc_cs0->actim_ctrlb, sdrc_cs0->mr); |
| 602 | if (sdrc_cs1) |
| 603 | pr_debug("clock: SDRC CS1 timing params used: " |
| 604 | " RFR %08x CTRLA %08x CTRLB %08x MR %08x\n", |
| 605 | sdrc_cs1->rfr_ctrl, sdrc_cs1->actim_ctrla, |
| 606 | sdrc_cs1->actim_ctrlb, sdrc_cs1->mr); |
Paul Walmsley | 0eafd47 | 2009-01-28 12:27:42 -0700 | [diff] [blame] | 607 | |
Jean Pihet | 58cda88 | 2009-07-24 19:43:25 -0600 | [diff] [blame] | 608 | if (sdrc_cs1) |
| 609 | omap3_configure_core_dpll( |
| 610 | new_div, unlock_dll, c, rate > clk->rate, |
| 611 | sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla, |
| 612 | sdrc_cs0->actim_ctrlb, sdrc_cs0->mr, |
| 613 | sdrc_cs1->rfr_ctrl, sdrc_cs1->actim_ctrla, |
| 614 | sdrc_cs1->actim_ctrlb, sdrc_cs1->mr); |
| 615 | else |
| 616 | omap3_configure_core_dpll( |
| 617 | new_div, unlock_dll, c, rate > clk->rate, |
| 618 | sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla, |
| 619 | sdrc_cs0->actim_ctrlb, sdrc_cs0->mr, |
| 620 | 0, 0, 0, 0); |
Paul Walmsley | 0eafd47 | 2009-01-28 12:27:42 -0700 | [diff] [blame] | 621 | |
Paul Walmsley | 0eafd47 | 2009-01-28 12:27:42 -0700 | [diff] [blame] | 622 | return 0; |
| 623 | } |
| 624 | |
| 625 | |
Paul Walmsley | 16c90f0 | 2009-01-27 19:12:47 -0700 | [diff] [blame] | 626 | /* DPLL autoidle read/set code */ |
| 627 | |
| 628 | |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 629 | /** |
| 630 | * omap3_dpll_autoidle_read - read a DPLL's autoidle bits |
| 631 | * @clk: struct clk * of the DPLL to read |
| 632 | * |
| 633 | * Return the DPLL's autoidle bits, shifted down to bit 0. Returns |
| 634 | * -EINVAL if passed a null pointer or if the struct clk does not |
| 635 | * appear to refer to a DPLL. |
| 636 | */ |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 637 | u32 omap3_dpll_autoidle_read(struct clk *clk) |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 638 | { |
| 639 | const struct dpll_data *dd; |
| 640 | u32 v; |
| 641 | |
| 642 | if (!clk || !clk->dpll_data) |
| 643 | return -EINVAL; |
| 644 | |
| 645 | dd = clk->dpll_data; |
| 646 | |
Paul Walmsley | ad67ef6 | 2008-08-19 11:08:40 +0300 | [diff] [blame] | 647 | v = __raw_readl(dd->autoidle_reg); |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 648 | v &= dd->autoidle_mask; |
| 649 | v >>= __ffs(dd->autoidle_mask); |
| 650 | |
| 651 | return v; |
| 652 | } |
| 653 | |
| 654 | /** |
| 655 | * omap3_dpll_allow_idle - enable DPLL autoidle bits |
| 656 | * @clk: struct clk * of the DPLL to operate on |
| 657 | * |
| 658 | * Enable DPLL automatic idle control. This automatic idle mode |
| 659 | * switching takes effect only when the DPLL is locked, at least on |
| 660 | * OMAP3430. The DPLL will enter low-power stop when its downstream |
| 661 | * clocks are gated. No return value. |
| 662 | */ |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 663 | void omap3_dpll_allow_idle(struct clk *clk) |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 664 | { |
| 665 | const struct dpll_data *dd; |
Paul Walmsley | ad67ef6 | 2008-08-19 11:08:40 +0300 | [diff] [blame] | 666 | u32 v; |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 667 | |
| 668 | if (!clk || !clk->dpll_data) |
| 669 | return; |
| 670 | |
| 671 | dd = clk->dpll_data; |
| 672 | |
| 673 | /* |
| 674 | * REVISIT: CORE DPLL can optionally enter low-power bypass |
| 675 | * by writing 0x5 instead of 0x1. Add some mechanism to |
| 676 | * optionally enter this mode. |
| 677 | */ |
Paul Walmsley | ad67ef6 | 2008-08-19 11:08:40 +0300 | [diff] [blame] | 678 | v = __raw_readl(dd->autoidle_reg); |
| 679 | v &= ~dd->autoidle_mask; |
| 680 | v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask); |
| 681 | __raw_writel(v, dd->autoidle_reg); |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 682 | } |
| 683 | |
| 684 | /** |
| 685 | * omap3_dpll_deny_idle - prevent DPLL from automatically idling |
| 686 | * @clk: struct clk * of the DPLL to operate on |
| 687 | * |
| 688 | * Disable DPLL automatic idle control. No return value. |
| 689 | */ |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 690 | void omap3_dpll_deny_idle(struct clk *clk) |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 691 | { |
| 692 | const struct dpll_data *dd; |
Paul Walmsley | ad67ef6 | 2008-08-19 11:08:40 +0300 | [diff] [blame] | 693 | u32 v; |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 694 | |
| 695 | if (!clk || !clk->dpll_data) |
| 696 | return; |
| 697 | |
| 698 | dd = clk->dpll_data; |
| 699 | |
Paul Walmsley | ad67ef6 | 2008-08-19 11:08:40 +0300 | [diff] [blame] | 700 | v = __raw_readl(dd->autoidle_reg); |
| 701 | v &= ~dd->autoidle_mask; |
| 702 | v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask); |
| 703 | __raw_writel(v, dd->autoidle_reg); |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 704 | } |
| 705 | |
| 706 | /* Clock control for DPLL outputs */ |
| 707 | |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 708 | /** |
| 709 | * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate |
| 710 | * @clk: DPLL output struct clk |
| 711 | * |
| 712 | * Using parent clock DPLL data, look up DPLL state. If locked, set our |
| 713 | * rate to the dpll_clk * 2; otherwise, just use dpll_clk. |
| 714 | */ |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 715 | unsigned long omap3_clkoutx2_recalc(struct clk *clk) |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 716 | { |
| 717 | const struct dpll_data *dd; |
Russell King | 8b9dbc1 | 2009-02-12 10:12:59 +0000 | [diff] [blame] | 718 | unsigned long rate; |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 719 | u32 v; |
| 720 | struct clk *pclk; |
| 721 | |
| 722 | /* Walk up the parents of clk, looking for a DPLL */ |
| 723 | pclk = clk->parent; |
| 724 | while (pclk && !pclk->dpll_data) |
| 725 | pclk = pclk->parent; |
| 726 | |
| 727 | /* clk does not have a DPLL as a parent? */ |
| 728 | WARN_ON(!pclk); |
| 729 | |
| 730 | dd = pclk->dpll_data; |
| 731 | |
Russell King | c0bf313 | 2009-02-19 13:29:22 +0000 | [diff] [blame] | 732 | WARN_ON(!dd->enable_mask); |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 733 | |
| 734 | v = __raw_readl(dd->control_reg) & dd->enable_mask; |
| 735 | v >>= __ffs(dd->enable_mask); |
Russell King | c0bf313 | 2009-02-19 13:29:22 +0000 | [diff] [blame] | 736 | if (v != OMAP3XXX_EN_DPLL_LOCKED) |
Russell King | 8b9dbc1 | 2009-02-12 10:12:59 +0000 | [diff] [blame] | 737 | rate = clk->parent->rate; |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 738 | else |
Russell King | 8b9dbc1 | 2009-02-12 10:12:59 +0000 | [diff] [blame] | 739 | rate = clk->parent->rate * 2; |
| 740 | return rate; |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 741 | } |
| 742 | |
Paul Walmsley | 542313c | 2008-07-03 12:24:45 +0300 | [diff] [blame] | 743 | /* Common clock code */ |
| 744 | |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 745 | /* |
| 746 | * As it is structured now, this will prevent an OMAP2/3 multiboot |
| 747 | * kernel from compiling. This will need further attention. |
| 748 | */ |
| 749 | #if defined(CONFIG_ARCH_OMAP3) |
| 750 | |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 751 | struct clk_functions omap2_clk_functions = { |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 752 | .clk_enable = omap2_clk_enable, |
| 753 | .clk_disable = omap2_clk_disable, |
| 754 | .clk_round_rate = omap2_clk_round_rate, |
| 755 | .clk_set_rate = omap2_clk_set_rate, |
| 756 | .clk_set_parent = omap2_clk_set_parent, |
| 757 | .clk_disable_unused = omap2_clk_disable_unused, |
| 758 | }; |
| 759 | |
| 760 | /* |
| 761 | * Set clocks for bypass mode for reboot to work. |
| 762 | */ |
| 763 | void omap2_clk_prepare_for_reboot(void) |
| 764 | { |
| 765 | /* REVISIT: Not ready for 343x */ |
| 766 | #if 0 |
| 767 | u32 rate; |
| 768 | |
| 769 | if (vclk == NULL || sclk == NULL) |
| 770 | return; |
| 771 | |
| 772 | rate = clk_get_rate(sclk); |
| 773 | clk_set_rate(vclk, rate); |
| 774 | #endif |
| 775 | } |
| 776 | |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 777 | void omap3_clk_lock_dpll5(void) |
Rajendra Nayak | 7a66a39 | 2009-10-05 13:31:44 -0700 | [diff] [blame] | 778 | { |
| 779 | struct clk *dpll5_clk; |
| 780 | struct clk *dpll5_m2_clk; |
| 781 | |
| 782 | dpll5_clk = clk_get(NULL, "dpll5_ck"); |
| 783 | clk_set_rate(dpll5_clk, DPLL5_FREQ_FOR_USBHOST); |
| 784 | clk_enable(dpll5_clk); |
| 785 | |
| 786 | /* Enable autoidle to allow it to enter low power bypass */ |
| 787 | omap3_dpll_allow_idle(dpll5_clk); |
| 788 | |
| 789 | /* Program dpll5_m2_clk divider for no division */ |
| 790 | dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck"); |
| 791 | clk_enable(dpll5_m2_clk); |
| 792 | clk_set_rate(dpll5_m2_clk, DPLL5_FREQ_FOR_USBHOST); |
| 793 | |
| 794 | clk_disable(dpll5_m2_clk); |
| 795 | clk_disable(dpll5_clk); |
| 796 | return; |
| 797 | } |
| 798 | |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 799 | /* REVISIT: Move this init stuff out into clock.c */ |
| 800 | |
| 801 | /* |
| 802 | * Switch the MPU rate if specified on cmdline. |
| 803 | * We cannot do this early until cmdline is parsed. |
| 804 | */ |
| 805 | static int __init omap2_clk_arch_init(void) |
| 806 | { |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 807 | struct clk *osc_sys_ck, *dpll1_ck, *arm_fck, *core_ck; |
| 808 | unsigned long osc_sys_rate; |
| 809 | |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 810 | if (!mpurate) |
| 811 | return -EINVAL; |
| 812 | |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 813 | /* XXX test these for success */ |
| 814 | dpll1_ck = clk_get(NULL, "dpll1_ck"); |
| 815 | arm_fck = clk_get(NULL, "arm_fck"); |
| 816 | core_ck = clk_get(NULL, "core_ck"); |
| 817 | osc_sys_ck = clk_get(NULL, "osc_sys_ck"); |
| 818 | |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 819 | /* REVISIT: not yet ready for 343x */ |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 820 | if (clk_set_rate(dpll1_ck, mpurate)) |
Sanjeev Premi | 11b6638 | 2009-09-03 20:13:58 +0300 | [diff] [blame] | 821 | printk(KERN_ERR "*** Unable to set MPU rate\n"); |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 822 | |
| 823 | recalculate_root_clocks(); |
| 824 | |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 825 | osc_sys_rate = clk_get_rate(osc_sys_ck); |
| 826 | |
| 827 | pr_info("Switched to new clocking rate (Crystal/Core/MPU): " |
| 828 | "%ld.%01ld/%ld/%ld MHz\n", |
| 829 | (osc_sys_rate / 1000000), |
| 830 | ((osc_sys_rate / 100000) % 10), |
| 831 | (clk_get_rate(core_ck) / 1000000), |
| 832 | (clk_get_rate(arm_fck) / 1000000)); |
Sanjeev Premi | 11b6638 | 2009-09-03 20:13:58 +0300 | [diff] [blame] | 833 | |
| 834 | calibrate_delay(); |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 835 | |
| 836 | return 0; |
| 837 | } |
| 838 | arch_initcall(omap2_clk_arch_init); |
| 839 | |
Paul Walmsley | 02e19a9 | 2008-03-18 15:09:51 +0200 | [diff] [blame] | 840 | |
| 841 | #endif |
Paul Walmsley | 82e9bd5 | 2009-12-08 16:18:47 -0700 | [diff] [blame^] | 842 | |
| 843 | |