blob: c55fa675ba9b3ffa3f1befd5246d5482aa4902f5 [file] [log] [blame]
Pankaj Kumar3912c982011-12-07 16:59:03 +05301/*
2 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/remote_spinlock.h>
19
Vikram Mulukutla681d8682012-03-09 23:56:20 -080020#include <mach/scm-io.h>
Pankaj Kumar3912c982011-12-07 16:59:03 +053021#include <mach/msm_iomap.h>
22
23#include "clock.h"
24#include "clock-pll.h"
25#include "smd_private.h"
26
Vikram Mulukutla681d8682012-03-09 23:56:20 -080027#ifdef CONFIG_MSM_SECURE_IO
28#undef readl_relaxed
29#undef writel_relaxed
30#define readl_relaxed secure_readl
31#define writel_relaxed secure_writel
32#endif
33
34#define PLL_OUTCTRL BIT(0)
35#define PLL_BYPASSNL BIT(1)
36#define PLL_RESET_N BIT(2)
37#define PLL_MODE_MASK BM(3, 0)
38
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070039#define PLL_EN_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->en_reg)) : \
40 ((x)->en_reg))
41#define PLL_STATUS_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->status_reg)) : \
42 ((x)->status_reg))
43#define PLL_MODE_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->mode_reg)) : \
44 ((x)->mode_reg))
Vikram Mulukutla5b146722012-04-23 18:17:50 -070045#define PLL_L_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->l_reg)) : \
46 ((x)->l_reg))
47#define PLL_M_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->m_reg)) : \
48 ((x)->m_reg))
49#define PLL_N_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->n_reg)) : \
50 ((x)->n_reg))
51#define PLL_CONFIG_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->config_reg)) : \
52 ((x)->config_reg))
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070053
Vikram Mulukutla681d8682012-03-09 23:56:20 -080054static DEFINE_SPINLOCK(pll_reg_lock);
55
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070056#define ENABLE_WAIT_MAX_LOOPS 200
57
Vikram Mulukutla681d8682012-03-09 23:56:20 -080058int pll_vote_clk_enable(struct clk *clk)
59{
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070060 u32 ena, count;
Vikram Mulukutla681d8682012-03-09 23:56:20 -080061 unsigned long flags;
62 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
63
64 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070065 ena = readl_relaxed(PLL_EN_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080066 ena |= pll->en_mask;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070067 writel_relaxed(ena, PLL_EN_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080068 spin_unlock_irqrestore(&pll_reg_lock, flags);
69
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070070 /*
71 * Use a memory barrier since some PLL status registers are
72 * not within the same 1K segment as the voting registers.
73 */
74 mb();
Vikram Mulukutla681d8682012-03-09 23:56:20 -080075
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070076 /* Wait for pll to enable. */
77 for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
78 if (readl_relaxed(pll->status_reg) & pll->status_mask)
79 return 0;
80 udelay(1);
81 }
82
83 WARN("PLL %s didn't enable after voting for it!\n", clk->dbg_name);
84
85 return -ETIMEDOUT;
Vikram Mulukutla681d8682012-03-09 23:56:20 -080086}
87
88void pll_vote_clk_disable(struct clk *clk)
89{
90 u32 ena;
91 unsigned long flags;
92 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
93
94 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070095 ena = readl_relaxed(PLL_EN_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080096 ena &= ~(pll->en_mask);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070097 writel_relaxed(ena, PLL_EN_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080098 spin_unlock_irqrestore(&pll_reg_lock, flags);
99}
100
101struct clk *pll_vote_clk_get_parent(struct clk *clk)
102{
103 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
104 return pll->parent;
105}
106
107int pll_vote_clk_is_enabled(struct clk *clk)
108{
109 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700110 return !!(readl_relaxed(PLL_STATUS_REG(pll)) & pll->status_mask);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800111}
112
113struct clk_ops clk_ops_pll_vote = {
114 .enable = pll_vote_clk_enable,
115 .disable = pll_vote_clk_disable,
116 .auto_off = pll_vote_clk_disable,
117 .is_enabled = pll_vote_clk_is_enabled,
118 .get_parent = pll_vote_clk_get_parent,
119};
120
121static void __pll_clk_enable_reg(void __iomem *mode_reg)
122{
123 u32 mode = readl_relaxed(mode_reg);
124 /* Disable PLL bypass mode. */
125 mode |= PLL_BYPASSNL;
126 writel_relaxed(mode, mode_reg);
127
128 /*
129 * H/W requires a 5us delay between disabling the bypass and
130 * de-asserting the reset. Delay 10us just to be safe.
131 */
132 mb();
133 udelay(10);
134
135 /* De-assert active-low PLL reset. */
136 mode |= PLL_RESET_N;
137 writel_relaxed(mode, mode_reg);
138
139 /* Wait until PLL is locked. */
140 mb();
141 udelay(50);
142
143 /* Enable PLL output. */
144 mode |= PLL_OUTCTRL;
145 writel_relaxed(mode, mode_reg);
146
147 /* Ensure that the write above goes through before returning. */
148 mb();
149}
150
151static int local_pll_clk_enable(struct clk *clk)
152{
153 unsigned long flags;
154 struct pll_clk *pll = to_pll_clk(clk);
155
156 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700157 __pll_clk_enable_reg(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800158 spin_unlock_irqrestore(&pll_reg_lock, flags);
159
160 return 0;
161}
162
163static void __pll_clk_disable_reg(void __iomem *mode_reg)
164{
165 u32 mode = readl_relaxed(mode_reg);
166 mode &= ~PLL_MODE_MASK;
167 writel_relaxed(mode, mode_reg);
168}
169
170static void local_pll_clk_disable(struct clk *clk)
171{
172 unsigned long flags;
173 struct pll_clk *pll = to_pll_clk(clk);
174
175 /*
176 * Disable the PLL output, disable test mode, enable
177 * the bypass mode, and assert the reset.
178 */
179 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700180 __pll_clk_disable_reg(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800181 spin_unlock_irqrestore(&pll_reg_lock, flags);
182}
183
184static struct clk *local_pll_clk_get_parent(struct clk *clk)
185{
186 struct pll_clk *pll = to_pll_clk(clk);
187 return pll->parent;
188}
189
190int sr_pll_clk_enable(struct clk *clk)
191{
192 u32 mode;
193 unsigned long flags;
194 struct pll_clk *pll = to_pll_clk(clk);
195
196 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700197 mode = readl_relaxed(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800198 /* De-assert active-low PLL reset. */
199 mode |= PLL_RESET_N;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700200 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800201
202 /*
203 * H/W requires a 5us delay between disabling the bypass and
204 * de-asserting the reset. Delay 10us just to be safe.
205 */
206 mb();
207 udelay(10);
208
209 /* Disable PLL bypass mode. */
210 mode |= PLL_BYPASSNL;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700211 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800212
213 /* Wait until PLL is locked. */
214 mb();
215 udelay(60);
216
217 /* Enable PLL output. */
218 mode |= PLL_OUTCTRL;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700219 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800220
221 /* Ensure that the write above goes through before returning. */
222 mb();
223
224 spin_unlock_irqrestore(&pll_reg_lock, flags);
225
226 return 0;
227}
228
229struct clk_ops clk_ops_local_pll = {
230 .enable = local_pll_clk_enable,
231 .disable = local_pll_clk_disable,
232 .auto_off = local_pll_clk_disable,
233 .get_parent = local_pll_clk_get_parent,
234};
235
Pankaj Kumar3912c982011-12-07 16:59:03 +0530236struct pll_rate {
237 unsigned int lvalue;
238 unsigned long rate;
239};
240
241static struct pll_rate pll_l_rate[] = {
242 {10, 196000000},
243 {12, 245760000},
244 {30, 589820000},
245 {38, 737280000},
246 {41, 800000000},
247 {50, 960000000},
248 {52, 1008000000},
249 {62, 1200000000},
Pankaj Kumar50c705c2012-01-10 12:02:07 +0530250 {63, 1209600000},
Pankaj Kumar3912c982011-12-07 16:59:03 +0530251 {0, 0},
252};
253
254#define PLL_BASE 7
255
256struct shared_pll_control {
257 uint32_t version;
258 struct {
259 /*
260 * Denotes if the PLL is ON. Technically, this can be read
261 * directly from the PLL registers, but this feild is here,
262 * so let's use it.
263 */
264 uint32_t on;
265 /*
266 * One bit for each processor core. The application processor
267 * is allocated bit position 1. All other bits should be
268 * considered as votes from other processors.
269 */
270 uint32_t votes;
271 } pll[PLL_BASE + PLL_END];
272};
273
274static remote_spinlock_t pll_lock;
275static struct shared_pll_control *pll_control;
276
277void __init msm_shared_pll_control_init(void)
278{
279#define PLL_REMOTE_SPINLOCK_ID "S:7"
280 unsigned smem_size;
281
282 remote_spin_lock_init(&pll_lock, PLL_REMOTE_SPINLOCK_ID);
283
284 pll_control = smem_get_entry(SMEM_CLKREGIM_SOURCES, &smem_size);
285 if (!pll_control) {
286 pr_err("Can't find shared PLL control data structure!\n");
287 BUG();
288 /*
289 * There might be more PLLs than what the application processor knows
290 * about. But the index used for each PLL is guaranteed to remain the
291 * same.
292 */
293 } else if (smem_size < sizeof(struct shared_pll_control)) {
294 pr_err("Shared PLL control data"
295 "structure too small!\n");
296 BUG();
297 } else if (pll_control->version != 0xCCEE0001) {
298 pr_err("Shared PLL control version mismatch!\n");
299 BUG();
300 } else {
301 pr_info("Shared PLL control available.\n");
302 return;
303 }
304
305}
306
Pankaj Kumar3912c982011-12-07 16:59:03 +0530307static int pll_clk_enable(struct clk *clk)
308{
309 struct pll_shared_clk *pll = to_pll_shared_clk(clk);
310 unsigned int pll_id = pll->id;
311
312 remote_spin_lock(&pll_lock);
313
314 pll_control->pll[PLL_BASE + pll_id].votes |= BIT(1);
315 if (!pll_control->pll[PLL_BASE + pll_id].on) {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700316 __pll_clk_enable_reg(PLL_MODE_REG(pll));
Pankaj Kumar3912c982011-12-07 16:59:03 +0530317 pll_control->pll[PLL_BASE + pll_id].on = 1;
318 }
319
320 remote_spin_unlock(&pll_lock);
321 return 0;
322}
323
324static void pll_clk_disable(struct clk *clk)
325{
326 struct pll_shared_clk *pll = to_pll_shared_clk(clk);
327 unsigned int pll_id = pll->id;
328
329 remote_spin_lock(&pll_lock);
330
331 pll_control->pll[PLL_BASE + pll_id].votes &= ~BIT(1);
332 if (pll_control->pll[PLL_BASE + pll_id].on
333 && !pll_control->pll[PLL_BASE + pll_id].votes) {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700334 __pll_clk_disable_reg(PLL_MODE_REG(pll));
Pankaj Kumar3912c982011-12-07 16:59:03 +0530335 pll_control->pll[PLL_BASE + pll_id].on = 0;
336 }
337
338 remote_spin_unlock(&pll_lock);
339}
340
341static int pll_clk_is_enabled(struct clk *clk)
342{
343 struct pll_shared_clk *pll = to_pll_shared_clk(clk);
344
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700345 return readl_relaxed(PLL_MODE_REG(pll)) & BIT(0);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530346}
347
Matt Wagantalla15833b2012-04-03 11:00:56 -0700348static enum handoff pll_clk_handoff(struct clk *clk)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530349{
350 struct pll_shared_clk *pll = to_pll_shared_clk(clk);
351 unsigned int pll_lval;
352 struct pll_rate *l;
353
354 /*
355 * Wait for the PLLs to be initialized and then read their frequency.
356 */
357 do {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700358 pll_lval = readl_relaxed(PLL_MODE_REG(pll) + 4) & 0x3ff;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530359 cpu_relax();
360 udelay(50);
361 } while (pll_lval == 0);
362
363 /* Convert PLL L values to PLL Output rate */
364 for (l = pll_l_rate; l->rate != 0; l++) {
365 if (l->lvalue == pll_lval) {
366 clk->rate = l->rate;
367 break;
368 }
369 }
370
371 if (!clk->rate) {
372 pr_crit("Unknown PLL's L value!\n");
373 BUG();
374 }
375
Matt Wagantalla15833b2012-04-03 11:00:56 -0700376 return HANDOFF_ENABLED_CLK;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530377}
378
379struct clk_ops clk_pll_ops = {
380 .enable = pll_clk_enable,
381 .disable = pll_clk_disable,
382 .handoff = pll_clk_handoff,
Pankaj Kumar3912c982011-12-07 16:59:03 +0530383 .is_enabled = pll_clk_is_enabled,
384};
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700385
386static void __init __set_fsm_mode(void __iomem *mode_reg)
387{
388 u32 regval = readl_relaxed(mode_reg);
389
390 /* De-assert reset to FSM */
391 regval &= ~BIT(21);
392 writel_relaxed(regval, mode_reg);
393
394 /* Program bias count */
395 regval &= ~BM(19, 14);
396 regval |= BVAL(19, 14, 0x1);
397 writel_relaxed(regval, mode_reg);
398
399 /* Program lock count */
400 regval &= ~BM(13, 8);
401 regval |= BVAL(13, 8, 0x8);
402 writel_relaxed(regval, mode_reg);
403
404 /* Enable PLL FSM voting */
405 regval |= BIT(20);
406 writel_relaxed(regval, mode_reg);
407}
408
409void __init configure_pll(struct pll_config *config,
410 struct pll_config_regs *regs, u32 ena_fsm_mode)
411{
412 u32 regval;
413
414 writel_relaxed(config->l, PLL_L_REG(regs));
415 writel_relaxed(config->m, PLL_M_REG(regs));
416 writel_relaxed(config->n, PLL_N_REG(regs));
417
418 regval = readl_relaxed(PLL_CONFIG_REG(regs));
419
420 /* Enable the MN accumulator */
421 if (config->mn_ena_mask) {
422 regval &= ~config->mn_ena_mask;
423 regval |= config->mn_ena_val;
424 }
425
426 /* Enable the main output */
427 if (config->main_output_mask) {
428 regval &= ~config->main_output_mask;
429 regval |= config->main_output_val;
430 }
431
432 /* Set pre-divider and post-divider values */
433 regval &= ~config->pre_div_mask;
434 regval |= config->pre_div_val;
435 regval &= ~config->post_div_mask;
436 regval |= config->post_div_val;
437
438 /* Select VCO setting */
439 regval &= ~config->vco_mask;
440 regval |= config->vco_val;
441 writel_relaxed(regval, PLL_CONFIG_REG(regs));
442
443 /* Configure in FSM mode if necessary */
444 if (ena_fsm_mode)
445 __set_fsm_mode(PLL_MODE_REG(regs));
446}