blob: 996f8831111dd178546ed57b6489fdde16c0dbc0 [file] [log] [blame]
Matt Wagantallbf430eb2012-03-22 11:45:49 -07001/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/kernel.h>
Matt Wagantallbf430eb2012-03-22 11:45:49 -070015#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070016#include <linux/init.h>
17#include <linux/io.h>
18#include <linux/delay.h>
19#include <linux/mutex.h>
20#include <linux/errno.h>
21#include <linux/cpufreq.h>
22#include <linux/clk.h>
Matt Wagantallec57f062011-08-16 23:54:46 -070023#include <linux/mfd/tps65023.h>
Matt Wagantallbf430eb2012-03-22 11:45:49 -070024#include <linux/platform_device.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025
26#include <mach/board.h>
27#include <mach/msm_iomap.h>
28
29#include "acpuclock.h"
30#include "avs.h"
31
32#define SHOT_SWITCH 4
33#define HOP_SWITCH 5
34#define SIMPLE_SLEW 6
35#define COMPLEX_SLEW 7
36
37#define SPSS_CLK_CNTL_ADDR (MSM_CSR_BASE + 0x100)
38#define SPSS_CLK_SEL_ADDR (MSM_CSR_BASE + 0x104)
39
40/* Scorpion PLL registers */
41#define SCPLL_CTL_ADDR (MSM_SCPLL_BASE + 0x4)
42#define SCPLL_STATUS_ADDR (MSM_SCPLL_BASE + 0x18)
43#define SCPLL_FSM_CTL_EXT_ADDR (MSM_SCPLL_BASE + 0x10)
44
Matt Wagantallec57f062011-08-16 23:54:46 -070045#ifdef CONFIG_QSD_SVS
46#define TPS65023_MAX_DCDC1 1600
47#else
48#define TPS65023_MAX_DCDC1 CONFIG_QSD_PMIC_DEFAULT_DCDC1
49#endif
50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051enum {
52 ACPU_PLL_TCXO = -1,
53 ACPU_PLL_0 = 0,
54 ACPU_PLL_1,
55 ACPU_PLL_2,
56 ACPU_PLL_3,
57 ACPU_PLL_END,
58};
59
60struct clkctl_acpu_speed {
61 unsigned int use_for_scaling;
62 unsigned int acpuclk_khz;
63 int pll;
64 unsigned int acpuclk_src_sel;
65 unsigned int acpuclk_src_div;
66 unsigned int ahbclk_khz;
67 unsigned int ahbclk_div;
68 unsigned int axiclk_khz;
69 unsigned int sc_core_src_sel_mask;
70 unsigned int sc_l_value;
71 int vdd;
72 unsigned long lpj; /* loops_per_jiffy */
73};
74
75struct clkctl_acpu_speed acpu_freq_tbl_998[] = {
76 { 0, 19200, ACPU_PLL_TCXO, 0, 0, 0, 0, 14000, 0, 0, 1000},
77 { 0, 128000, ACPU_PLL_1, 1, 5, 0, 0, 14000, 2, 0, 1000},
78 { 1, 245760, ACPU_PLL_0, 4, 0, 0, 0, 29000, 0, 0, 1000},
79 /* Update AXI_S and PLL0_S macros if above row numbers change. */
80 { 1, 384000, ACPU_PLL_3, 0, 0, 0, 0, 58000, 1, 0xA, 1000},
81 { 0, 422400, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xB, 1000},
82 { 0, 460800, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xC, 1000},
83 { 0, 499200, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xD, 1050},
84 { 0, 537600, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xE, 1050},
85 { 1, 576000, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xF, 1050},
86 { 0, 614400, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0x10, 1075},
87 { 0, 652800, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0x11, 1100},
88 { 0, 691200, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0x12, 1125},
89 { 0, 729600, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0x13, 1150},
90 { 1, 768000, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x14, 1150},
91 { 0, 806400, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x15, 1175},
92 { 0, 844800, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x16, 1225},
93 { 0, 883200, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x17, 1250},
94 { 0, 921600, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x18, 1300},
95 { 0, 960000, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x19, 1300},
96 { 1, 998400, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x1A, 1300},
97 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
98};
99
100struct clkctl_acpu_speed acpu_freq_tbl_768[] = {
101 { 0, 19200, ACPU_PLL_TCXO, 0, 0, 0, 0, 14000, 0, 0, 1000},
102 { 0, 128000, ACPU_PLL_1, 1, 5, 0, 0, 14000, 2, 0, 1000},
103 { 1, 245760, ACPU_PLL_0, 4, 0, 0, 0, 29000, 0, 0, 1000},
104 /* Update AXI_S and PLL0_S macros if above row numbers change. */
105 { 1, 384000, ACPU_PLL_3, 0, 0, 0, 0, 58000, 1, 0xA, 1075},
106 { 0, 422400, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xB, 1100},
107 { 0, 460800, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xC, 1125},
108 { 0, 499200, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xD, 1150},
109 { 0, 537600, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xE, 1150},
110 { 1, 576000, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xF, 1150},
111 { 0, 614400, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0x10, 1175},
112 { 0, 652800, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0x11, 1200},
113 { 0, 691200, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0x12, 1225},
114 { 0, 729600, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0x13, 1250},
115 { 1, 768000, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x14, 1250},
116 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
117};
118
119static struct clkctl_acpu_speed *acpu_freq_tbl = acpu_freq_tbl_998;
120#define AXI_S (&acpu_freq_tbl[1])
121#define PLL0_S (&acpu_freq_tbl[2])
122
123/* Use 128MHz for PC since ACPU will auto-switch to AXI (128MHz) before
124 * coming back up. This allows detection of return-from-PC, since 128MHz
125 * is only used for power collapse. */
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700126#define POWER_COLLAPSE_KHZ 128000
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127/* Use 245MHz (not 128MHz) for SWFI to avoid unnecessary steps between
128 * 128MHz<->245MHz. Jumping to high frequencies from 128MHz directly
129 * is not allowed. */
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700130#define WAIT_FOR_IRQ_KHZ 245760
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131
132#ifdef CONFIG_CPU_FREQ_MSM
133static struct cpufreq_frequency_table freq_table[20];
134
Matt Wagantallbf430eb2012-03-22 11:45:49 -0700135static void __devinit cpufreq_table_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700136{
137 unsigned int i;
138 unsigned int freq_cnt = 0;
139
140 /* Construct the freq_table table from acpu_freq_tbl since the
141 * freq_table values need to match frequencies specified in
142 * acpu_freq_tbl and acpu_freq_tbl needs to be fixed up during init.
143 */
144 for (i = 0; acpu_freq_tbl[i].acpuclk_khz != 0
145 && freq_cnt < ARRAY_SIZE(freq_table)-1; i++) {
146 if (acpu_freq_tbl[i].use_for_scaling) {
147 freq_table[freq_cnt].index = freq_cnt;
148 freq_table[freq_cnt].frequency
149 = acpu_freq_tbl[i].acpuclk_khz;
150 freq_cnt++;
151 }
152 }
153
154 /* freq_table not big enough to store all usable freqs. */
155 BUG_ON(acpu_freq_tbl[i].acpuclk_khz != 0);
156
157 freq_table[freq_cnt].index = freq_cnt;
158 freq_table[freq_cnt].frequency = CPUFREQ_TABLE_END;
159
160 pr_info("%d scaling frequencies supported.\n", freq_cnt);
161}
162#endif
163
164struct clock_state {
165 struct clkctl_acpu_speed *current_speed;
166 struct mutex lock;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167 struct clk *ebi1_clk;
168 int (*acpu_set_vdd) (int mvolts);
169};
170
171static struct clock_state drv_state = { 0 };
172
173static void scpll_set_freq(uint32_t lval, unsigned freq_switch)
174{
175 uint32_t regval;
176
177 if (lval > 33)
178 lval = 33;
179 if (lval < 10)
180 lval = 10;
181
182 /* wait for any calibrations or frequency switches to finish */
183 while (readl(SCPLL_STATUS_ADDR) & 0x3)
184 ;
185
186 /* write the new L val and switch mode */
187 regval = readl(SCPLL_FSM_CTL_EXT_ADDR);
188 regval &= ~(0x3f << 3);
189 regval |= (lval << 3);
190 if (freq_switch == SIMPLE_SLEW)
191 regval |= (0x1 << 9);
192
193 regval &= ~(0x3 << 0);
194 regval |= (freq_switch << 0);
195 writel(regval, SCPLL_FSM_CTL_EXT_ADDR);
196
197 dmb();
198
199 /* put in normal mode */
200 regval = readl(SCPLL_CTL_ADDR);
201 regval |= 0x7;
202 writel(regval, SCPLL_CTL_ADDR);
203
204 dmb();
205
206 /* wait for frequency switch to finish */
207 while (readl(SCPLL_STATUS_ADDR) & 0x1)
208 ;
209
210 /* status bit seems to clear early, using
211 * 100us to handle the worst case. */
212 udelay(100);
213}
214
215static void scpll_apps_enable(bool state)
216{
217 uint32_t regval;
218
219 if (state)
220 pr_debug("Enabling PLL 3\n");
221 else
222 pr_debug("Disabling PLL 3\n");
223
224 /* Wait for any frequency switches to finish. */
225 while (readl(SCPLL_STATUS_ADDR) & 0x1)
226 ;
227
228 /* put the pll in standby mode */
229 regval = readl(SCPLL_CTL_ADDR);
230 regval &= ~(0x7);
231 regval |= (0x2);
232 writel(regval, SCPLL_CTL_ADDR);
233
234 dmb();
235
236 if (state) {
237 /* put the pll in normal mode */
238 regval = readl(SCPLL_CTL_ADDR);
239 regval |= (0x7);
240 writel(regval, SCPLL_CTL_ADDR);
241 udelay(200);
242 } else {
243 /* put the pll in power down mode */
244 regval = readl(SCPLL_CTL_ADDR);
245 regval &= ~(0x7);
246 writel(regval, SCPLL_CTL_ADDR);
247 }
Matt Wagantallec57f062011-08-16 23:54:46 -0700248 udelay(62);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700249
250 if (state)
251 pr_debug("PLL 3 Enabled\n");
252 else
253 pr_debug("PLL 3 Disabled\n");
254}
255
256static void scpll_init(void)
257{
258 uint32_t regval;
259#define L_VAL_384MHZ 0xA
260#define L_VAL_768MHZ 0x14
261
262 pr_debug("Initializing PLL 3\n");
263
264 /* power down scpll */
265 writel(0x0, SCPLL_CTL_ADDR);
266
267 dmb();
268
269 /* set bypassnl, put into standby */
270 writel(0x00400002, SCPLL_CTL_ADDR);
271
272 /* set bypassnl, reset_n, full calibration */
273 writel(0x00600004, SCPLL_CTL_ADDR);
274
275 /* Ensure register write to initiate calibration has taken
276 effect before reading status flag */
277 dmb();
278
279 /* wait for cal_all_done */
280 while (readl(SCPLL_STATUS_ADDR) & 0x2)
281 ;
282
283 /* Start: Set of experimentally derived steps
284 * to work around a h/w bug. */
285
286 /* Put the pll in normal mode */
287 scpll_apps_enable(1);
288
289 /* SHOT switch to 384 MHz */
290 regval = readl(SCPLL_FSM_CTL_EXT_ADDR);
291 regval &= ~(0x3f << 3);
292 regval |= (L_VAL_384MHZ << 3);
293
294 regval &= ~0x7;
295 regval |= SHOT_SWITCH;
296 writel(regval, SCPLL_FSM_CTL_EXT_ADDR);
297
298 /* Trigger the freq switch by putting pll in normal mode. */
299 regval = readl(SCPLL_CTL_ADDR);
300 regval |= (0x7);
301 writel(regval, SCPLL_CTL_ADDR);
302
303 /* Wait for frequency switch to finish */
304 while (readl(SCPLL_STATUS_ADDR) & 0x1)
305 ;
306
307 /* Status bit seems to clear early, using
308 * 800 microseconds for the worst case. */
309 udelay(800);
310
311 /* HOP switch to 768 MHz. */
312 regval = readl(SCPLL_FSM_CTL_EXT_ADDR);
313 regval &= ~(0x3f << 3);
314 regval |= (L_VAL_768MHZ << 3);
315
316 regval &= ~0x7;
317 regval |= HOP_SWITCH;
318 writel(regval, SCPLL_FSM_CTL_EXT_ADDR);
319
320 /* Trigger the freq switch by putting pll in normal mode. */
321 regval = readl(SCPLL_CTL_ADDR);
322 regval |= (0x7);
323 writel(regval, SCPLL_CTL_ADDR);
324
325 /* Wait for frequency switch to finish */
326 while (readl(SCPLL_STATUS_ADDR) & 0x1)
327 ;
328
329 /* Status bit seems to clear early, using
330 * 100 microseconds for the worst case. */
331 udelay(100);
332
333 /* End: Work around for h/w bug */
334
335 /* Power down scpll */
336 scpll_apps_enable(0);
337}
338
339static void config_pll(struct clkctl_acpu_speed *s)
340{
341 uint32_t regval;
342
343 if (s->pll == ACPU_PLL_3)
344 scpll_set_freq(s->sc_l_value, HOP_SWITCH);
345 /* Configure the PLL divider mux if we plan to use it. */
346 else if (s->sc_core_src_sel_mask == 0) {
347 /* get the current clock source selection */
348 regval = readl(SPSS_CLK_SEL_ADDR) & 0x1;
349
350 /* configure the other clock source, then switch to it,
351 * using the glitch free mux */
352 switch (regval) {
353 case 0x0:
354 regval = readl(SPSS_CLK_CNTL_ADDR);
355 regval &= ~(0x7 << 4 | 0xf);
356 regval |= (s->acpuclk_src_sel << 4);
357 regval |= (s->acpuclk_src_div << 0);
358 writel(regval, SPSS_CLK_CNTL_ADDR);
359
360 regval = readl(SPSS_CLK_SEL_ADDR);
361 regval |= 0x1;
362 writel(regval, SPSS_CLK_SEL_ADDR);
363 break;
364
365 case 0x1:
366 regval = readl(SPSS_CLK_CNTL_ADDR);
367 regval &= ~(0x7 << 12 | 0xf << 8);
368 regval |= (s->acpuclk_src_sel << 12);
369 regval |= (s->acpuclk_src_div << 8);
370 writel(regval, SPSS_CLK_CNTL_ADDR);
371
372 regval = readl(SPSS_CLK_SEL_ADDR);
373 regval &= ~0x1;
374 writel(regval, SPSS_CLK_SEL_ADDR);
375 break;
376 }
377 dmb();
378 }
379
380 regval = readl(SPSS_CLK_SEL_ADDR);
381 regval &= ~(0x3 << 1);
382 regval |= (s->sc_core_src_sel_mask << 1);
383 writel(regval, SPSS_CLK_SEL_ADDR);
384}
385
386static int acpuclk_set_vdd_level(int vdd)
387{
388 if (drv_state.acpu_set_vdd) {
389 pr_debug("Switching VDD to %d mV\n", vdd);
390 return drv_state.acpu_set_vdd(vdd);
391 } else {
392 /* Assume that the PMIC supports scaling the processor
393 * to its maximum frequency at its default voltage.
394 */
395 return 0;
396 }
397}
398
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700399static int acpuclk_8x50_set_rate(int cpu, unsigned long rate,
400 enum setrate_reason reason)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700401{
402 struct clkctl_acpu_speed *tgt_s, *strt_s;
403 int res, rc = 0;
404 int freq_index = 0;
405
406 if (reason == SETRATE_CPUFREQ)
407 mutex_lock(&drv_state.lock);
408
409 strt_s = drv_state.current_speed;
410
411 if (rate == strt_s->acpuclk_khz)
412 goto out;
413
414 for (tgt_s = acpu_freq_tbl; tgt_s->acpuclk_khz != 0; tgt_s++) {
415 if (tgt_s->acpuclk_khz == rate)
416 break;
417 freq_index++;
418 }
419
420 if (tgt_s->acpuclk_khz == 0) {
421 rc = -EINVAL;
422 goto out;
423 }
424
425 if (reason == SETRATE_CPUFREQ) {
426#ifdef CONFIG_MSM_CPU_AVS
427 /* Notify avs before changing frequency */
428 rc = avs_adjust_freq(freq_index, 1);
429 if (rc) {
430 pr_err("Unable to increase ACPU vdd (%d)\n", rc);
431 goto out;
432 }
433#endif
434 /* Increase VDD if needed. */
435 if (tgt_s->vdd > strt_s->vdd) {
436 rc = acpuclk_set_vdd_level(tgt_s->vdd);
437 if (rc) {
438 pr_err("Unable to increase ACPU vdd (%d)\n",
439 rc);
440 goto out;
441 }
442 }
443 } else if (reason == SETRATE_PC
444 && rate != POWER_COLLAPSE_KHZ) {
445 /* Returning from PC. ACPU is running on AXI source.
446 * Step up to PLL0 before ramping up higher. */
447 config_pll(PLL0_S);
448 }
449
450 pr_debug("Switching from ACPU rate %u KHz -> %u KHz\n",
451 strt_s->acpuclk_khz, tgt_s->acpuclk_khz);
452
453 if (strt_s->pll != ACPU_PLL_3 && tgt_s->pll != ACPU_PLL_3) {
454 config_pll(tgt_s);
455 } else if (strt_s->pll != ACPU_PLL_3 && tgt_s->pll == ACPU_PLL_3) {
456 scpll_apps_enable(1);
457 config_pll(tgt_s);
458 } else if (strt_s->pll == ACPU_PLL_3 && tgt_s->pll != ACPU_PLL_3) {
459 config_pll(tgt_s);
460 scpll_apps_enable(0);
461 } else {
462 /* Temporarily switch to PLL0 while reconfiguring PLL3. */
463 config_pll(PLL0_S);
464 config_pll(tgt_s);
465 }
466
467 /* Update the driver state with the new clock freq */
468 drv_state.current_speed = tgt_s;
469
470 /* Re-adjust lpj for the new clock speed. */
471 loops_per_jiffy = tgt_s->lpj;
472
473 /* Nothing else to do for SWFI. */
474 if (reason == SETRATE_SWFI)
475 goto out;
476
477 if (strt_s->axiclk_khz != tgt_s->axiclk_khz) {
478 res = clk_set_rate(drv_state.ebi1_clk,
479 tgt_s->axiclk_khz * 1000);
480 if (res < 0)
481 pr_warning("Setting AXI min rate failed (%d)\n", res);
482 }
483
484 /* Nothing else to do for power collapse */
485 if (reason == SETRATE_PC)
486 goto out;
487
488#ifdef CONFIG_MSM_CPU_AVS
489 /* notify avs after changing frequency */
490 res = avs_adjust_freq(freq_index, 0);
491 if (res)
492 pr_warning("Unable to drop ACPU vdd (%d)\n", res);
493#endif
494
495 /* Drop VDD level if we can. */
496 if (tgt_s->vdd < strt_s->vdd) {
497 res = acpuclk_set_vdd_level(tgt_s->vdd);
498 if (res)
499 pr_warning("Unable to drop ACPU vdd (%d)\n", res);
500 }
501
502 pr_debug("ACPU speed change complete\n");
503out:
504 if (reason == SETRATE_CPUFREQ)
505 mutex_unlock(&drv_state.lock);
506 return rc;
507}
508
Matt Wagantallbf430eb2012-03-22 11:45:49 -0700509static void __devinit acpuclk_hw_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510{
511 struct clkctl_acpu_speed *speed;
512 uint32_t div, sel, regval;
513 int res;
514
515 /* Determine the source of the Scorpion clock. */
516 regval = readl(SPSS_CLK_SEL_ADDR);
517 switch ((regval & 0x6) >> 1) {
518 case 0: /* raw source clock */
519 case 3: /* low jitter PLL1 (768Mhz) */
520 if (regval & 0x1) {
521 sel = ((readl(SPSS_CLK_CNTL_ADDR) >> 4) & 0x7);
522 div = ((readl(SPSS_CLK_CNTL_ADDR) >> 0) & 0xf);
523 } else {
524 sel = ((readl(SPSS_CLK_CNTL_ADDR) >> 12) & 0x7);
525 div = ((readl(SPSS_CLK_CNTL_ADDR) >> 8) & 0xf);
526 }
527
528 /* Find the matching clock rate. */
529 for (speed = acpu_freq_tbl; speed->acpuclk_khz != 0; speed++) {
530 if (speed->acpuclk_src_sel == sel &&
531 speed->acpuclk_src_div == div)
532 break;
533 }
534 break;
535
536 case 1: /* unbuffered scorpion pll (384Mhz to 998.4Mhz) */
537 sel = ((readl(SCPLL_FSM_CTL_EXT_ADDR) >> 3) & 0x3f);
538
539 /* Find the matching clock rate. */
540 for (speed = acpu_freq_tbl; speed->acpuclk_khz != 0; speed++) {
541 if (speed->sc_l_value == sel &&
542 speed->sc_core_src_sel_mask == 1)
543 break;
544 }
545 break;
546
547 case 2: /* AXI bus clock (128Mhz) */
548 speed = AXI_S;
549 break;
550 default:
551 BUG();
552 }
553
554 /* Initialize scpll only if it wasn't already initialized by the boot
555 * loader. If the CPU is already running on scpll, then the scpll was
556 * initialized by the boot loader. */
557 if (speed->pll != ACPU_PLL_3)
558 scpll_init();
559
560 if (speed->acpuclk_khz == 0) {
561 pr_err("Error - ACPU clock reports invalid speed\n");
562 return;
563 }
564
565 drv_state.current_speed = speed;
566 res = clk_set_rate(drv_state.ebi1_clk, speed->axiclk_khz * 1000);
567 if (res < 0)
568 pr_warning("Setting AXI min rate failed (%d)\n", res);
569 res = clk_enable(drv_state.ebi1_clk);
570 if (res < 0)
571 pr_warning("Enabling AXI clock failed (%d)\n", res);
572
573 pr_info("ACPU running at %d KHz\n", speed->acpuclk_khz);
574}
575
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700576static unsigned long acpuclk_8x50_get_rate(int cpu)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700577{
578 return drv_state.current_speed->acpuclk_khz;
579}
580
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581/* Spare register populated with efuse data on max ACPU freq. */
582#define CT_CSR_PHYS 0xA8700000
583#define TCSR_SPARE2_ADDR (ct_csr_base + 0x60)
584
585#define PLL0_M_VAL_ADDR (MSM_CLK_CTL_BASE + 0x308)
586
Matt Wagantallbf430eb2012-03-22 11:45:49 -0700587static void __devinit acpu_freq_tbl_fixup(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700588{
589 void __iomem *ct_csr_base;
590 uint32_t tcsr_spare2, pll0_m_val;
591 unsigned int max_acpu_khz;
592 unsigned int i;
593
594 ct_csr_base = ioremap(CT_CSR_PHYS, PAGE_SIZE);
595 BUG_ON(ct_csr_base == NULL);
596
597 tcsr_spare2 = readl(TCSR_SPARE2_ADDR);
598
599 /* Check if the register is supported and meaningful. */
600 if ((tcsr_spare2 & 0xF000) != 0xA000) {
601 pr_info("Efuse data on Max ACPU freq not present.\n");
602 goto skip_efuse_fixup;
603 }
604
605 switch (tcsr_spare2 & 0xF0) {
606 case 0x70:
607 acpu_freq_tbl = acpu_freq_tbl_768;
608 max_acpu_khz = 768000;
609 break;
610 case 0x30:
611 case 0x00:
612 max_acpu_khz = 998400;
613 break;
614 case 0x10:
615 max_acpu_khz = 1267200;
616 break;
617 default:
618 pr_warning("Invalid efuse data (%x) on Max ACPU freq!\n",
619 tcsr_spare2);
620 goto skip_efuse_fixup;
621 }
622
623 pr_info("Max ACPU freq from efuse data is %d KHz\n", max_acpu_khz);
624
625 for (i = 0; acpu_freq_tbl[i].acpuclk_khz != 0; i++) {
626 if (acpu_freq_tbl[i].acpuclk_khz > max_acpu_khz) {
627 acpu_freq_tbl[i].acpuclk_khz = 0;
628 break;
629 }
630 }
631
632skip_efuse_fixup:
633 iounmap(ct_csr_base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700634
635 /* pll0_m_val will be 36 when PLL0 is run at 235MHz
636 * instead of the usual 245MHz. */
637 pll0_m_val = readl(PLL0_M_VAL_ADDR) & 0x7FFFF;
638 if (pll0_m_val == 36)
639 PLL0_S->acpuclk_khz = 235930;
640
641 for (i = 0; acpu_freq_tbl[i].acpuclk_khz != 0; i++) {
Matt Wagantallec57f062011-08-16 23:54:46 -0700642 if (acpu_freq_tbl[i].vdd > TPS65023_MAX_DCDC1) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700643 acpu_freq_tbl[i].acpuclk_khz = 0;
644 break;
645 }
646 }
647}
648
649/* Initalize the lpj field in the acpu_freq_tbl. */
Matt Wagantallbf430eb2012-03-22 11:45:49 -0700650static void __devinit lpj_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700651{
652 int i;
653 const struct clkctl_acpu_speed *base_clk = drv_state.current_speed;
654 for (i = 0; acpu_freq_tbl[i].acpuclk_khz; i++) {
655 acpu_freq_tbl[i].lpj = cpufreq_scale(loops_per_jiffy,
656 base_clk->acpuclk_khz,
657 acpu_freq_tbl[i].acpuclk_khz);
658 }
659}
660
661#ifdef CONFIG_MSM_CPU_AVS
Matt Wagantallbf430eb2012-03-22 11:45:49 -0700662static int __devinit acpu_avs_init(int (*set_vdd) (int), int khz)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700663{
664 int i;
665 int freq_count = 0;
666 int freq_index = -1;
667
668 for (i = 0; acpu_freq_tbl[i].acpuclk_khz; i++) {
669 freq_count++;
670 if (acpu_freq_tbl[i].acpuclk_khz == khz)
671 freq_index = i;
672 }
673
674 return avs_init(set_vdd, freq_count, freq_index);
675}
676#endif
677
Matt Wagantallec57f062011-08-16 23:54:46 -0700678static int qsd8x50_tps65023_set_dcdc1(int mVolts)
679{
680 int rc = 0;
681#ifdef CONFIG_QSD_SVS
682 rc = tps65023_set_dcdc1_level(mVolts);
683 /*
684 * By default the TPS65023 will be initialized to 1.225V.
685 * So we can safely switch to any frequency within this
686 * voltage even if the device is not probed/ready.
687 */
688 if (rc == -ENODEV && mVolts <= CONFIG_QSD_PMIC_DEFAULT_DCDC1)
689 rc = 0;
690#else
691 /*
692 * Disallow frequencies not supported in the default PMIC
693 * output voltage.
694 */
695 if (mVolts > CONFIG_QSD_PMIC_DEFAULT_DCDC1)
696 rc = -EFAULT;
697#endif
698 return rc;
699}
700
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700701static struct acpuclk_data acpuclk_8x50_data = {
702 .set_rate = acpuclk_8x50_set_rate,
703 .get_rate = acpuclk_8x50_get_rate,
704 .power_collapse_khz = POWER_COLLAPSE_KHZ,
705 .wait_for_irq_khz = WAIT_FOR_IRQ_KHZ,
Matt Wagantallec57f062011-08-16 23:54:46 -0700706 .switch_time_us = 20,
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700707};
708
Matt Wagantallbf430eb2012-03-22 11:45:49 -0700709static int __devinit acpuclk_8x50_probe(struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700710{
711 mutex_init(&drv_state.lock);
Matt Wagantallec57f062011-08-16 23:54:46 -0700712 drv_state.acpu_set_vdd = qsd8x50_tps65023_set_dcdc1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700713
714 drv_state.ebi1_clk = clk_get(NULL, "ebi1_acpu_clk");
715 BUG_ON(IS_ERR(drv_state.ebi1_clk));
716
717 acpu_freq_tbl_fixup();
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700718 acpuclk_hw_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700719 lpj_init();
720 /* Set a lower bound for ACPU rate for boot. This limits the
721 * maximum frequency hop caused by the first CPUFREQ switch. */
722 if (drv_state.current_speed->acpuclk_khz < PLL0_S->acpuclk_khz)
723 acpuclk_set_rate(0, PLL0_S->acpuclk_khz, SETRATE_CPUFREQ);
724
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700725 acpuclk_register(&acpuclk_8x50_data);
726
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700727#ifdef CONFIG_CPU_FREQ_MSM
728 cpufreq_table_init();
729 cpufreq_frequency_table_get_attr(freq_table, smp_processor_id());
730#endif
731#ifdef CONFIG_MSM_CPU_AVS
732 if (!acpu_avs_init(drv_state.acpu_set_vdd,
733 drv_state.current_speed->acpuclk_khz)) {
734 /* avs init successful. avs will handle voltage changes */
735 drv_state.acpu_set_vdd = NULL;
736 }
737#endif
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700738 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700739}
Matt Wagantallec57f062011-08-16 23:54:46 -0700740
Matt Wagantallbf430eb2012-03-22 11:45:49 -0700741static struct platform_driver acpuclk_8x50_driver = {
742 .probe = acpuclk_8x50_probe,
743 .driver = {
744 .name = "acpuclk-8x50",
745 .owner = THIS_MODULE,
746 },
Matt Wagantallec57f062011-08-16 23:54:46 -0700747};
Matt Wagantallbf430eb2012-03-22 11:45:49 -0700748
749static int __init acpuclk_8x50_init(void)
750{
751 return platform_driver_register(&acpuclk_8x50_driver);
752}
753postcore_initcall(acpuclk_8x50_init);