blob: e83f17af2857aaa1b0d42fac6f7f06cfdb666b52 [file] [log] [blame]
Pankaj Kumar3912c982011-12-07 16:59:03 +05301/*
2 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/remote_spinlock.h>
19
Vikram Mulukutla681d8682012-03-09 23:56:20 -080020#include <mach/scm-io.h>
Pankaj Kumar3912c982011-12-07 16:59:03 +053021#include <mach/msm_iomap.h>
22
23#include "clock.h"
24#include "clock-pll.h"
25#include "smd_private.h"
26
Vikram Mulukutla681d8682012-03-09 23:56:20 -080027#ifdef CONFIG_MSM_SECURE_IO
28#undef readl_relaxed
29#undef writel_relaxed
30#define readl_relaxed secure_readl
31#define writel_relaxed secure_writel
32#endif
33
34#define PLL_OUTCTRL BIT(0)
35#define PLL_BYPASSNL BIT(1)
36#define PLL_RESET_N BIT(2)
37#define PLL_MODE_MASK BM(3, 0)
38
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070039#define PLL_EN_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->en_reg)) : \
40 ((x)->en_reg))
41#define PLL_STATUS_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->status_reg)) : \
42 ((x)->status_reg))
43#define PLL_MODE_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->mode_reg)) : \
44 ((x)->mode_reg))
45
Vikram Mulukutla681d8682012-03-09 23:56:20 -080046static DEFINE_SPINLOCK(pll_reg_lock);
47
48int pll_vote_clk_enable(struct clk *clk)
49{
50 u32 ena;
51 unsigned long flags;
52 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
53
54 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070055 ena = readl_relaxed(PLL_EN_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080056 ena |= pll->en_mask;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070057 writel_relaxed(ena, PLL_EN_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080058 spin_unlock_irqrestore(&pll_reg_lock, flags);
59
60 /* Wait until PLL is enabled */
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070061 while ((readl_relaxed(PLL_STATUS_REG(pll)) & pll->status_mask) == 0)
Vikram Mulukutla681d8682012-03-09 23:56:20 -080062 cpu_relax();
63
64 return 0;
65}
66
67void pll_vote_clk_disable(struct clk *clk)
68{
69 u32 ena;
70 unsigned long flags;
71 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
72
73 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070074 ena = readl_relaxed(PLL_EN_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080075 ena &= ~(pll->en_mask);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070076 writel_relaxed(ena, PLL_EN_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080077 spin_unlock_irqrestore(&pll_reg_lock, flags);
78}
79
80struct clk *pll_vote_clk_get_parent(struct clk *clk)
81{
82 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
83 return pll->parent;
84}
85
86int pll_vote_clk_is_enabled(struct clk *clk)
87{
88 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070089 return !!(readl_relaxed(PLL_STATUS_REG(pll)) & pll->status_mask);
Vikram Mulukutla681d8682012-03-09 23:56:20 -080090}
91
92struct clk_ops clk_ops_pll_vote = {
93 .enable = pll_vote_clk_enable,
94 .disable = pll_vote_clk_disable,
95 .auto_off = pll_vote_clk_disable,
96 .is_enabled = pll_vote_clk_is_enabled,
97 .get_parent = pll_vote_clk_get_parent,
98};
99
100static void __pll_clk_enable_reg(void __iomem *mode_reg)
101{
102 u32 mode = readl_relaxed(mode_reg);
103 /* Disable PLL bypass mode. */
104 mode |= PLL_BYPASSNL;
105 writel_relaxed(mode, mode_reg);
106
107 /*
108 * H/W requires a 5us delay between disabling the bypass and
109 * de-asserting the reset. Delay 10us just to be safe.
110 */
111 mb();
112 udelay(10);
113
114 /* De-assert active-low PLL reset. */
115 mode |= PLL_RESET_N;
116 writel_relaxed(mode, mode_reg);
117
118 /* Wait until PLL is locked. */
119 mb();
120 udelay(50);
121
122 /* Enable PLL output. */
123 mode |= PLL_OUTCTRL;
124 writel_relaxed(mode, mode_reg);
125
126 /* Ensure that the write above goes through before returning. */
127 mb();
128}
129
130static int local_pll_clk_enable(struct clk *clk)
131{
132 unsigned long flags;
133 struct pll_clk *pll = to_pll_clk(clk);
134
135 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700136 __pll_clk_enable_reg(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800137 spin_unlock_irqrestore(&pll_reg_lock, flags);
138
139 return 0;
140}
141
142static void __pll_clk_disable_reg(void __iomem *mode_reg)
143{
144 u32 mode = readl_relaxed(mode_reg);
145 mode &= ~PLL_MODE_MASK;
146 writel_relaxed(mode, mode_reg);
147}
148
149static void local_pll_clk_disable(struct clk *clk)
150{
151 unsigned long flags;
152 struct pll_clk *pll = to_pll_clk(clk);
153
154 /*
155 * Disable the PLL output, disable test mode, enable
156 * the bypass mode, and assert the reset.
157 */
158 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700159 __pll_clk_disable_reg(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800160 spin_unlock_irqrestore(&pll_reg_lock, flags);
161}
162
163static struct clk *local_pll_clk_get_parent(struct clk *clk)
164{
165 struct pll_clk *pll = to_pll_clk(clk);
166 return pll->parent;
167}
168
169int sr_pll_clk_enable(struct clk *clk)
170{
171 u32 mode;
172 unsigned long flags;
173 struct pll_clk *pll = to_pll_clk(clk);
174
175 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700176 mode = readl_relaxed(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800177 /* De-assert active-low PLL reset. */
178 mode |= PLL_RESET_N;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700179 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800180
181 /*
182 * H/W requires a 5us delay between disabling the bypass and
183 * de-asserting the reset. Delay 10us just to be safe.
184 */
185 mb();
186 udelay(10);
187
188 /* Disable PLL bypass mode. */
189 mode |= PLL_BYPASSNL;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700190 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800191
192 /* Wait until PLL is locked. */
193 mb();
194 udelay(60);
195
196 /* Enable PLL output. */
197 mode |= PLL_OUTCTRL;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700198 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800199
200 /* Ensure that the write above goes through before returning. */
201 mb();
202
203 spin_unlock_irqrestore(&pll_reg_lock, flags);
204
205 return 0;
206}
207
208struct clk_ops clk_ops_local_pll = {
209 .enable = local_pll_clk_enable,
210 .disable = local_pll_clk_disable,
211 .auto_off = local_pll_clk_disable,
212 .get_parent = local_pll_clk_get_parent,
213};
214
Pankaj Kumar3912c982011-12-07 16:59:03 +0530215struct pll_rate {
216 unsigned int lvalue;
217 unsigned long rate;
218};
219
220static struct pll_rate pll_l_rate[] = {
221 {10, 196000000},
222 {12, 245760000},
223 {30, 589820000},
224 {38, 737280000},
225 {41, 800000000},
226 {50, 960000000},
227 {52, 1008000000},
228 {62, 1200000000},
Pankaj Kumar50c705c2012-01-10 12:02:07 +0530229 {63, 1209600000},
Pankaj Kumar3912c982011-12-07 16:59:03 +0530230 {0, 0},
231};
232
233#define PLL_BASE 7
234
235struct shared_pll_control {
236 uint32_t version;
237 struct {
238 /*
239 * Denotes if the PLL is ON. Technically, this can be read
240 * directly from the PLL registers, but this feild is here,
241 * so let's use it.
242 */
243 uint32_t on;
244 /*
245 * One bit for each processor core. The application processor
246 * is allocated bit position 1. All other bits should be
247 * considered as votes from other processors.
248 */
249 uint32_t votes;
250 } pll[PLL_BASE + PLL_END];
251};
252
253static remote_spinlock_t pll_lock;
254static struct shared_pll_control *pll_control;
255
256void __init msm_shared_pll_control_init(void)
257{
258#define PLL_REMOTE_SPINLOCK_ID "S:7"
259 unsigned smem_size;
260
261 remote_spin_lock_init(&pll_lock, PLL_REMOTE_SPINLOCK_ID);
262
263 pll_control = smem_get_entry(SMEM_CLKREGIM_SOURCES, &smem_size);
264 if (!pll_control) {
265 pr_err("Can't find shared PLL control data structure!\n");
266 BUG();
267 /*
268 * There might be more PLLs than what the application processor knows
269 * about. But the index used for each PLL is guaranteed to remain the
270 * same.
271 */
272 } else if (smem_size < sizeof(struct shared_pll_control)) {
273 pr_err("Shared PLL control data"
274 "structure too small!\n");
275 BUG();
276 } else if (pll_control->version != 0xCCEE0001) {
277 pr_err("Shared PLL control version mismatch!\n");
278 BUG();
279 } else {
280 pr_info("Shared PLL control available.\n");
281 return;
282 }
283
284}
285
Pankaj Kumar3912c982011-12-07 16:59:03 +0530286static int pll_clk_enable(struct clk *clk)
287{
288 struct pll_shared_clk *pll = to_pll_shared_clk(clk);
289 unsigned int pll_id = pll->id;
290
291 remote_spin_lock(&pll_lock);
292
293 pll_control->pll[PLL_BASE + pll_id].votes |= BIT(1);
294 if (!pll_control->pll[PLL_BASE + pll_id].on) {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700295 __pll_clk_enable_reg(PLL_MODE_REG(pll));
Pankaj Kumar3912c982011-12-07 16:59:03 +0530296 pll_control->pll[PLL_BASE + pll_id].on = 1;
297 }
298
299 remote_spin_unlock(&pll_lock);
300 return 0;
301}
302
303static void pll_clk_disable(struct clk *clk)
304{
305 struct pll_shared_clk *pll = to_pll_shared_clk(clk);
306 unsigned int pll_id = pll->id;
307
308 remote_spin_lock(&pll_lock);
309
310 pll_control->pll[PLL_BASE + pll_id].votes &= ~BIT(1);
311 if (pll_control->pll[PLL_BASE + pll_id].on
312 && !pll_control->pll[PLL_BASE + pll_id].votes) {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700313 __pll_clk_disable_reg(PLL_MODE_REG(pll));
Pankaj Kumar3912c982011-12-07 16:59:03 +0530314 pll_control->pll[PLL_BASE + pll_id].on = 0;
315 }
316
317 remote_spin_unlock(&pll_lock);
318}
319
320static int pll_clk_is_enabled(struct clk *clk)
321{
322 struct pll_shared_clk *pll = to_pll_shared_clk(clk);
323
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700324 return readl_relaxed(PLL_MODE_REG(pll)) & BIT(0);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530325}
326
Matt Wagantalla15833b2012-04-03 11:00:56 -0700327static enum handoff pll_clk_handoff(struct clk *clk)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530328{
329 struct pll_shared_clk *pll = to_pll_shared_clk(clk);
330 unsigned int pll_lval;
331 struct pll_rate *l;
332
333 /*
334 * Wait for the PLLs to be initialized and then read their frequency.
335 */
336 do {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700337 pll_lval = readl_relaxed(PLL_MODE_REG(pll) + 4) & 0x3ff;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530338 cpu_relax();
339 udelay(50);
340 } while (pll_lval == 0);
341
342 /* Convert PLL L values to PLL Output rate */
343 for (l = pll_l_rate; l->rate != 0; l++) {
344 if (l->lvalue == pll_lval) {
345 clk->rate = l->rate;
346 break;
347 }
348 }
349
350 if (!clk->rate) {
351 pr_crit("Unknown PLL's L value!\n");
352 BUG();
353 }
354
Matt Wagantalla15833b2012-04-03 11:00:56 -0700355 return HANDOFF_ENABLED_CLK;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530356}
357
358struct clk_ops clk_pll_ops = {
359 .enable = pll_clk_enable,
360 .disable = pll_clk_disable,
361 .handoff = pll_clk_handoff,
Pankaj Kumar3912c982011-12-07 16:59:03 +0530362 .is_enabled = pll_clk_is_enabled,
363};