blob: 5682ac32516bdc3328191e1f91726ef938a6afeb [file] [log] [blame]
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001/*
2 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/io.h>
19#include <linux/delay.h>
20#include <linux/mutex.h>
21#include <linux/err.h>
22#include <linux/errno.h>
23#include <linux/cpufreq.h>
24#include <linux/cpu.h>
25#include <linux/regulator/consumer.h>
26
27#include <asm/mach-types.h>
28#include <asm/cpu.h>
29
30#include <mach/board.h>
31#include <mach/msm_iomap.h>
32#include <mach/socinfo.h>
33#include <mach/msm-krait-l2-accessors.h>
34#include <mach/rpm-regulator.h>
35#include <mach/msm_bus.h>
36
37#include "acpuclock.h"
38#include "acpuclock-krait.h"
39
40/* MUX source selects. */
41#define PRI_SRC_SEL_SEC_SRC 0
42#define PRI_SRC_SEL_HFPLL 1
43#define PRI_SRC_SEL_HFPLL_DIV2 2
44#define SEC_SRC_SEL_QSB 0
45#define SEC_SRC_SEL_L2PLL 1
46#define SEC_SRC_SEL_AUX 2
47
48/* PTE EFUSE register offset. */
49#define PTE_EFUSE 0xC0
50
51static DEFINE_MUTEX(driver_lock);
52static DEFINE_SPINLOCK(l2_lock);
53
54static struct drv_data {
55 const struct acpu_level *acpu_freq_tbl;
56 const struct l2_level *l2_freq_tbl;
57 struct scalable *scalable;
58 u32 bus_perf_client;
59 struct device *dev;
60} drv;
61
62static unsigned long acpuclk_krait_get_rate(int cpu)
63{
64 return drv.scalable[cpu].cur_speed->khz;
65}
66
67/* Select a source on the primary MUX. */
68static void set_pri_clk_src(struct scalable *sc, u32 pri_src_sel)
69{
70 u32 regval;
71
72 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
73 regval &= ~0x3;
74 regval |= (pri_src_sel & 0x3);
75 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
76 /* Wait for switch to complete. */
77 mb();
78 udelay(1);
79}
80
81/* Select a source on the secondary MUX. */
82static void set_sec_clk_src(struct scalable *sc, u32 sec_src_sel)
83{
84 u32 regval;
85
86 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
87 regval &= ~(0x3 << 2);
88 regval |= ((sec_src_sel & 0x3) << 2);
89 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
90 /* Wait for switch to complete. */
91 mb();
92 udelay(1);
93}
94
95/* Enable an already-configured HFPLL. */
96static void hfpll_enable(struct scalable *sc, bool skip_regulators)
97{
98 int rc;
99
100 if (!skip_regulators) {
101 /* Enable regulators required by the HFPLL. */
102 if (sc->vreg[VREG_HFPLL_A].rpm_vreg_id) {
103 rc = rpm_vreg_set_voltage(
104 sc->vreg[VREG_HFPLL_A].rpm_vreg_id,
105 sc->vreg[VREG_HFPLL_A].rpm_vreg_voter,
106 sc->vreg[VREG_HFPLL_A].cur_vdd,
107 sc->vreg[VREG_HFPLL_A].max_vdd, 0);
108 if (rc)
109 dev_err(drv.dev,
110 "%s regulator enable failed (%d)\n",
111 sc->vreg[VREG_HFPLL_A].name, rc);
112 }
113 if (sc->vreg[VREG_HFPLL_B].rpm_vreg_id) {
114 rc = rpm_vreg_set_voltage(
115 sc->vreg[VREG_HFPLL_B].rpm_vreg_id,
116 sc->vreg[VREG_HFPLL_B].rpm_vreg_voter,
117 sc->vreg[VREG_HFPLL_B].cur_vdd,
118 sc->vreg[VREG_HFPLL_B].max_vdd, 0);
119 if (rc)
120 dev_err(drv.dev,
121 "%s regulator enable failed (%d)\n",
122 sc->vreg[VREG_HFPLL_B].name, rc);
123 }
124 }
125
126 /* Disable PLL bypass mode. */
127 writel_relaxed(0x2, sc->hfpll_base + sc->hfpll_data->mode_offset);
128
129 /*
130 * H/W requires a 5us delay between disabling the bypass and
131 * de-asserting the reset. Delay 10us just to be safe.
132 */
133 mb();
134 udelay(10);
135
136 /* De-assert active-low PLL reset. */
137 writel_relaxed(0x6, sc->hfpll_base + sc->hfpll_data->mode_offset);
138
139 /* Wait for PLL to lock. */
140 mb();
141 udelay(60);
142
143 /* Enable PLL output. */
144 writel_relaxed(0x7, sc->hfpll_base + sc->hfpll_data->mode_offset);
145}
146
147/* Disable a HFPLL for power-savings or while it's being reprogrammed. */
148static void hfpll_disable(struct scalable *sc, bool skip_regulators)
149{
150 int rc;
151
152 /*
153 * Disable the PLL output, disable test mode, enable the bypass mode,
154 * and assert the reset.
155 */
156 writel_relaxed(0, sc->hfpll_base + sc->hfpll_data->mode_offset);
157
158 if (!skip_regulators) {
159 /* Remove voltage votes required by the HFPLL. */
160 if (sc->vreg[VREG_HFPLL_B].rpm_vreg_id) {
161 rc = rpm_vreg_set_voltage(
162 sc->vreg[VREG_HFPLL_B].rpm_vreg_id,
163 sc->vreg[VREG_HFPLL_B].rpm_vreg_voter,
164 0, 0, 0);
165 if (rc)
166 dev_err(drv.dev,
167 "%s regulator enable failed (%d)\n",
168 sc->vreg[VREG_HFPLL_B].name, rc);
169 }
170 if (sc->vreg[VREG_HFPLL_A].rpm_vreg_id) {
171 rc = rpm_vreg_set_voltage(
172 sc->vreg[VREG_HFPLL_A].rpm_vreg_id,
173 sc->vreg[VREG_HFPLL_A].rpm_vreg_voter,
174 0, 0, 0);
175 if (rc)
176 dev_err(drv.dev,
177 "%s regulator enable failed (%d)\n",
178 sc->vreg[VREG_HFPLL_A].name, rc);
179 }
180 }
181}
182
183/* Program the HFPLL rate. Assumes HFPLL is already disabled. */
184static void hfpll_set_rate(struct scalable *sc, const struct core_speed *tgt_s)
185{
186 writel_relaxed(tgt_s->pll_l_val,
187 sc->hfpll_base + sc->hfpll_data->l_offset);
188}
189
190/* Return the L2 speed that should be applied. */
191static const struct l2_level *compute_l2_level(struct scalable *sc,
192 const struct l2_level *vote_l)
193{
194 const struct l2_level *new_l;
195 int cpu;
196
197 /* Find max L2 speed vote. */
198 sc->l2_vote = vote_l;
199 new_l = drv.l2_freq_tbl;
200 for_each_present_cpu(cpu)
201 new_l = max(new_l, drv.scalable[cpu].l2_vote);
202
203 return new_l;
204}
205
206/* Update the bus bandwidth request. */
207static void set_bus_bw(unsigned int bw)
208{
209 int ret;
210
211 /* Update bandwidth if request has changed. This may sleep. */
212 ret = msm_bus_scale_client_update_request(drv.bus_perf_client, bw);
213 if (ret)
214 dev_err(drv.dev, "bandwidth request failed (%d)\n", ret);
215}
216
217/* Set the CPU or L2 clock speed. */
218static void set_speed(struct scalable *sc, const struct core_speed *tgt_s)
219{
220 const struct core_speed *strt_s = sc->cur_speed;
221
222 if (strt_s->src == HFPLL && tgt_s->src == HFPLL) {
223 /*
224 * Move to an always-on source running at a frequency
225 * that does not require an elevated CPU voltage.
226 */
227 set_sec_clk_src(sc, SEC_SRC_SEL_AUX);
228 set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
229
230 /* Re-program HFPLL. */
231 hfpll_disable(sc, 1);
232 hfpll_set_rate(sc, tgt_s);
233 hfpll_enable(sc, 1);
234
235 /* Move to HFPLL. */
236 set_pri_clk_src(sc, tgt_s->pri_src_sel);
237 } else if (strt_s->src == HFPLL && tgt_s->src != HFPLL) {
238 set_sec_clk_src(sc, tgt_s->sec_src_sel);
239 set_pri_clk_src(sc, tgt_s->pri_src_sel);
240 hfpll_disable(sc, 0);
241 } else if (strt_s->src != HFPLL && tgt_s->src == HFPLL) {
242 hfpll_set_rate(sc, tgt_s);
243 hfpll_enable(sc, 0);
244 set_pri_clk_src(sc, tgt_s->pri_src_sel);
245 } else {
246 set_sec_clk_src(sc, tgt_s->sec_src_sel);
247 }
248
249 sc->cur_speed = tgt_s;
250}
251
252/* Apply any per-cpu voltage increases. */
253static int increase_vdd(int cpu, int vdd_core, int vdd_mem, int vdd_dig,
254 enum setrate_reason reason)
255{
256 struct scalable *sc = &drv.scalable[cpu];
257 int rc = 0;
258
259 /*
260 * Increase vdd_mem active-set before vdd_dig.
261 * vdd_mem should be >= vdd_dig.
262 */
263 if (vdd_mem > sc->vreg[VREG_MEM].cur_vdd) {
264 rc = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
265 sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
266 sc->vreg[VREG_MEM].max_vdd, 0);
267 if (rc) {
268 dev_err(drv.dev,
269 "vdd_mem (cpu%d) increase failed (%d)\n",
270 cpu, rc);
271 return rc;
272 }
273 sc->vreg[VREG_MEM].cur_vdd = vdd_mem;
274 }
275
276 /* Increase vdd_dig active-set vote. */
277 if (vdd_dig > sc->vreg[VREG_DIG].cur_vdd) {
278 rc = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
279 sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
280 sc->vreg[VREG_DIG].max_vdd, 0);
281 if (rc) {
282 dev_err(drv.dev,
283 "vdd_dig (cpu%d) increase failed (%d)\n",
284 cpu, rc);
285 return rc;
286 }
287 sc->vreg[VREG_DIG].cur_vdd = vdd_dig;
288 }
289
290 /*
291 * Update per-CPU core voltage. Don't do this for the hotplug path for
292 * which it should already be correct. Attempting to set it is bad
293 * because we don't know what CPU we are running on at this point, but
294 * the CPU regulator API requires we call it from the affected CPU.
295 */
296 if (vdd_core > sc->vreg[VREG_CORE].cur_vdd
297 && reason != SETRATE_HOTPLUG) {
298 rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
299 sc->vreg[VREG_CORE].max_vdd);
300 if (rc) {
301 dev_err(drv.dev,
302 "vdd_core (cpu%d) increase failed (%d)\n",
303 cpu, rc);
304 return rc;
305 }
306 sc->vreg[VREG_CORE].cur_vdd = vdd_core;
307 }
308
309 return rc;
310}
311
312/* Apply any per-cpu voltage decreases. */
313static void decrease_vdd(int cpu, int vdd_core, int vdd_mem, int vdd_dig,
314 enum setrate_reason reason)
315{
316 struct scalable *sc = &drv.scalable[cpu];
317 int ret;
318
319 /*
320 * Update per-CPU core voltage. This must be called on the CPU
321 * that's being affected. Don't do this in the hotplug remove path,
322 * where the rail is off and we're executing on the other CPU.
323 */
324 if (vdd_core < sc->vreg[VREG_CORE].cur_vdd
325 && reason != SETRATE_HOTPLUG) {
326 ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
327 sc->vreg[VREG_CORE].max_vdd);
328 if (ret) {
329 dev_err(drv.dev,
330 "vdd_core (cpu%d) decrease failed (%d)\n",
331 cpu, ret);
332 return;
333 }
334 sc->vreg[VREG_CORE].cur_vdd = vdd_core;
335 }
336
337 /* Decrease vdd_dig active-set vote. */
338 if (vdd_dig < sc->vreg[VREG_DIG].cur_vdd) {
339 ret = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
340 sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
341 sc->vreg[VREG_DIG].max_vdd, 0);
342 if (ret) {
343 dev_err(drv.dev,
344 "vdd_dig (cpu%d) decrease failed (%d)\n",
345 cpu, ret);
346 return;
347 }
348 sc->vreg[VREG_DIG].cur_vdd = vdd_dig;
349 }
350
351 /*
352 * Decrease vdd_mem active-set after vdd_dig.
353 * vdd_mem should be >= vdd_dig.
354 */
355 if (vdd_mem < sc->vreg[VREG_MEM].cur_vdd) {
356 ret = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
357 sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
358 sc->vreg[VREG_MEM].max_vdd, 0);
359 if (ret) {
360 dev_err(drv.dev,
361 "vdd_mem (cpu%d) decrease failed (%d)\n",
362 cpu, ret);
363 return;
364 }
365 sc->vreg[VREG_MEM].cur_vdd = vdd_mem;
366 }
367}
368
369static int calculate_vdd_mem(const struct acpu_level *tgt)
370{
371 return tgt->l2_level->vdd_mem;
372}
373
374static int calculate_vdd_dig(const struct acpu_level *tgt)
375{
376 int pll_vdd_dig;
377 const int *hfpll_vdd = drv.scalable[L2].hfpll_data->vdd;
378 const u32 low_vdd_l_max = drv.scalable[L2].hfpll_data->low_vdd_l_max;
379
380 if (tgt->l2_level->speed.src != HFPLL)
381 pll_vdd_dig = hfpll_vdd[HFPLL_VDD_NONE];
382 else if (tgt->l2_level->speed.pll_l_val > low_vdd_l_max)
383 pll_vdd_dig = hfpll_vdd[HFPLL_VDD_NOM];
384 else
385 pll_vdd_dig = hfpll_vdd[HFPLL_VDD_LOW];
386
387 return max(tgt->l2_level->vdd_dig, pll_vdd_dig);
388}
389
390static int calculate_vdd_core(const struct acpu_level *tgt)
391{
392 return tgt->vdd_core;
393}
394
395/* Set the CPU's clock rate and adjust the L2 rate, voltage and BW requests. */
396static int acpuclk_krait_set_rate(int cpu, unsigned long rate,
397 enum setrate_reason reason)
398{
399 const struct core_speed *strt_acpu_s, *tgt_acpu_s;
400 const struct l2_level *tgt_l2_l;
401 const struct acpu_level *tgt;
402 int vdd_mem, vdd_dig, vdd_core;
403 unsigned long flags;
404 int rc = 0;
405
406 if (cpu > num_possible_cpus()) {
407 rc = -EINVAL;
408 goto out;
409 }
410
411 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
412 mutex_lock(&driver_lock);
413
414 strt_acpu_s = drv.scalable[cpu].cur_speed;
415
416 /* Return early if rate didn't change. */
417 if (rate == strt_acpu_s->khz)
418 goto out;
419
420 /* Find target frequency. */
421 for (tgt = drv.acpu_freq_tbl; tgt->speed.khz != 0; tgt++) {
422 if (tgt->speed.khz == rate) {
423 tgt_acpu_s = &tgt->speed;
424 break;
425 }
426 }
427 if (tgt->speed.khz == 0) {
428 rc = -EINVAL;
429 goto out;
430 }
431
432 /* Calculate voltage requirements for the current CPU. */
433 vdd_mem = calculate_vdd_mem(tgt);
434 vdd_dig = calculate_vdd_dig(tgt);
435 vdd_core = calculate_vdd_core(tgt);
436
437 /* Increase VDD levels if needed. */
438 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) {
439 rc = increase_vdd(cpu, vdd_core, vdd_mem, vdd_dig, reason);
440 if (rc)
441 goto out;
442 }
443
444 pr_debug("Switching from ACPU%d rate %lu KHz -> %lu KHz\n",
445 cpu, strt_acpu_s->khz, tgt_acpu_s->khz);
446
447 /* Set the new CPU speed. */
448 set_speed(&drv.scalable[cpu], tgt_acpu_s);
449
450 /*
451 * Update the L2 vote and apply the rate change. A spinlock is
452 * necessary to ensure L2 rate is calculated and set atomically
453 * with the CPU frequency, even if acpuclk_krait_set_rate() is
454 * called from an atomic context and the driver_lock mutex is not
455 * acquired.
456 */
457 spin_lock_irqsave(&l2_lock, flags);
458 tgt_l2_l = compute_l2_level(&drv.scalable[cpu], tgt->l2_level);
459 set_speed(&drv.scalable[L2], &tgt_l2_l->speed);
460 spin_unlock_irqrestore(&l2_lock, flags);
461
462 /* Nothing else to do for power collapse or SWFI. */
463 if (reason == SETRATE_PC || reason == SETRATE_SWFI)
464 goto out;
465
466 /* Update bus bandwith request. */
467 set_bus_bw(tgt_l2_l->bw_level);
468
469 /* Drop VDD levels if we can. */
470 decrease_vdd(cpu, vdd_core, vdd_mem, vdd_dig, reason);
471
472 pr_debug("ACPU%d speed change complete\n", cpu);
473
474out:
475 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
476 mutex_unlock(&driver_lock);
477 return rc;
478}
479
480/* Initialize a HFPLL at a given rate and enable it. */
481static void __init hfpll_init(struct scalable *sc,
482 const struct core_speed *tgt_s)
483{
484 pr_debug("Initializing HFPLL%d\n", sc - drv.scalable);
485
486 /* Disable the PLL for re-programming. */
487 hfpll_disable(sc, 1);
488
489 /* Configure PLL parameters for integer mode. */
490 writel_relaxed(sc->hfpll_data->config_val,
491 sc->hfpll_base + sc->hfpll_data->config_offset);
492 writel_relaxed(0, sc->hfpll_base + sc->hfpll_data->m_offset);
493 writel_relaxed(1, sc->hfpll_base + sc->hfpll_data->n_offset);
494
495 /* Set an initial rate and enable the PLL. */
496 hfpll_set_rate(sc, tgt_s);
497 hfpll_enable(sc, 0);
498}
499
500/* Voltage regulator initialization. */
501static void __init regulator_init(const struct acpu_level *lvl)
502{
503 int cpu, ret;
504 struct scalable *sc;
505 int vdd_mem, vdd_dig, vdd_core;
506
507 vdd_mem = calculate_vdd_mem(lvl);
508 vdd_dig = calculate_vdd_dig(lvl);
509
510 for_each_possible_cpu(cpu) {
511 sc = &drv.scalable[cpu];
512
513 /* Set initial vdd_mem vote. */
514 ret = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
515 sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
516 sc->vreg[VREG_MEM].max_vdd, 0);
517 if (ret) {
518 dev_err(drv.dev, "%s initialization failed (%d)\n",
519 sc->vreg[VREG_MEM].name, ret);
520 BUG();
521 }
522 sc->vreg[VREG_MEM].cur_vdd = vdd_mem;
523
524 /* Set initial vdd_dig vote. */
525 ret = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
526 sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
527 sc->vreg[VREG_DIG].max_vdd, 0);
528 if (ret) {
529 dev_err(drv.dev, "%s initialization failed (%d)\n",
530 sc->vreg[VREG_DIG].name, ret);
531 BUG();
532 }
533 sc->vreg[VREG_DIG].cur_vdd = vdd_dig;
534
535 /* Setup Krait CPU regulators and initial core voltage. */
536 sc->vreg[VREG_CORE].reg = regulator_get(NULL,
537 sc->vreg[VREG_CORE].name);
538 if (IS_ERR(sc->vreg[VREG_CORE].reg)) {
539 dev_err(drv.dev, "regulator_get(%s) failed (%ld)\n",
540 sc->vreg[VREG_CORE].name,
541 PTR_ERR(sc->vreg[VREG_CORE].reg));
542 BUG();
543 }
544 vdd_core = calculate_vdd_core(lvl);
545 ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
546 sc->vreg[VREG_CORE].max_vdd);
547 if (ret) {
548 dev_err(drv.dev, "regulator_set_voltage(%s) (%d)\n",
549 sc->vreg[VREG_CORE].name, ret);
550 BUG();
551 }
552 sc->vreg[VREG_CORE].cur_vdd = vdd_core;
553 ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
554 sc->vreg[VREG_CORE].peak_ua);
555 if (ret < 0) {
556 dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed"
557 " (%d)\n", sc->vreg[VREG_CORE].name, ret);
558 BUG();
559 }
560 ret = regulator_enable(sc->vreg[VREG_CORE].reg);
561 if (ret) {
562 dev_err(drv.dev, "regulator_enable(%s) failed (%d)\n",
563 sc->vreg[VREG_CORE].name, ret);
564 BUG();
565 }
566 }
567}
568
569/* Set initial rate for a given core. */
570static void __init init_clock_sources(struct scalable *sc,
571 const struct core_speed *tgt_s)
572{
573 u32 regval;
574
575 /* Program AUX source input to the secondary MUX. */
576 if (sc->aux_clk_sel_addr)
577 writel_relaxed(sc->aux_clk_sel, sc->aux_clk_sel_addr);
578
579 /* Switch away from the HFPLL while it's re-initialized. */
580 set_sec_clk_src(sc, SEC_SRC_SEL_AUX);
581 set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
582 hfpll_init(sc, tgt_s);
583
584 /* Set PRI_SRC_SEL_HFPLL_DIV2 divider to div-2. */
585 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
586 regval &= ~(0x3 << 6);
587 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
588
589 /* Switch to the target clock source. */
590 set_sec_clk_src(sc, tgt_s->sec_src_sel);
591 set_pri_clk_src(sc, tgt_s->pri_src_sel);
592 sc->cur_speed = tgt_s;
593}
594
595static void __init per_cpu_init(int cpu, const struct acpu_level *max_level)
596{
597 drv.scalable[cpu].hfpll_base =
598 ioremap(drv.scalable[cpu].hfpll_phys_base, SZ_32);
599 BUG_ON(!drv.scalable[cpu].hfpll_base);
600
601 init_clock_sources(&drv.scalable[cpu], &max_level->speed);
602 drv.scalable[cpu].l2_vote = max_level->l2_level;
603}
604
605/* Register with bus driver. */
606static void __init bus_init(struct msm_bus_scale_pdata *bus_scale_data,
607 unsigned int init_bw)
608{
609 int ret;
610
611 drv.bus_perf_client = msm_bus_scale_register_client(bus_scale_data);
612 if (!drv.bus_perf_client) {
613 dev_err(drv.dev, "unable to register bus client\n");
614 BUG();
615 }
616
617 ret = msm_bus_scale_client_update_request(drv.bus_perf_client, init_bw);
618 if (ret)
619 dev_err(drv.dev, "initial bandwidth req failed (%d)\n", ret);
620}
621
622#ifdef CONFIG_CPU_FREQ_MSM
623static struct cpufreq_frequency_table freq_table[NR_CPUS][35];
624
625static void __init cpufreq_table_init(void)
626{
627 int cpu;
628
629 for_each_possible_cpu(cpu) {
630 int i, freq_cnt = 0;
631 /* Construct the freq_table tables from acpu_freq_tbl. */
632 for (i = 0; drv.acpu_freq_tbl[i].speed.khz != 0
633 && freq_cnt < ARRAY_SIZE(*freq_table); i++) {
634 if (drv.acpu_freq_tbl[i].use_for_scaling) {
635 freq_table[cpu][freq_cnt].index = freq_cnt;
636 freq_table[cpu][freq_cnt].frequency
637 = drv.acpu_freq_tbl[i].speed.khz;
638 freq_cnt++;
639 }
640 }
641 /* freq_table not big enough to store all usable freqs. */
642 BUG_ON(drv.acpu_freq_tbl[i].speed.khz != 0);
643
644 freq_table[cpu][freq_cnt].index = freq_cnt;
645 freq_table[cpu][freq_cnt].frequency = CPUFREQ_TABLE_END;
646
647 dev_info(drv.dev, "CPU%d: %d frequencies supported\n",
648 cpu, freq_cnt);
649
650 /* Register table with CPUFreq. */
651 cpufreq_frequency_table_get_attr(freq_table[cpu], cpu);
652 }
653}
654#else
655static void __init cpufreq_table_init(void) {}
656#endif
657
658#define HOT_UNPLUG_KHZ STBY_KHZ
659static int __cpuinit acpuclk_cpu_callback(struct notifier_block *nfb,
660 unsigned long action, void *hcpu)
661{
662 static int prev_khz[NR_CPUS];
663 int rc, cpu = (int)hcpu;
664 struct scalable *sc = &drv.scalable[cpu];
665
666 switch (action & ~CPU_TASKS_FROZEN) {
667 case CPU_DEAD:
668 prev_khz[cpu] = acpuclk_krait_get_rate(cpu);
669 /* Fall through. */
670 case CPU_UP_CANCELED:
671 acpuclk_krait_set_rate(cpu, HOT_UNPLUG_KHZ, SETRATE_HOTPLUG);
672 regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, 0);
673 break;
674 case CPU_UP_PREPARE:
675 if (WARN_ON(!prev_khz[cpu]))
676 return NOTIFY_BAD;
677 rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
678 sc->vreg[VREG_CORE].peak_ua);
679 if (rc < 0)
680 return NOTIFY_BAD;
681 acpuclk_krait_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG);
682 break;
683 default:
684 break;
685 }
686
687 return NOTIFY_OK;
688}
689
690static struct notifier_block __cpuinitdata acpuclk_cpu_notifier = {
691 .notifier_call = acpuclk_cpu_callback,
692};
693
694static const struct acpu_level __init *select_freq_plan(
695 const struct acpu_level *const *pvs_tbl, u32 qfprom_phys)
696{
697 const struct acpu_level *l, *max_acpu_level = NULL;
698 void __iomem *qfprom_base;
699 u32 pte_efuse, pvs, tbl_idx;
700 char *pvs_names[] = { "Slow", "Nominal", "Fast", "Unknown" };
701
702 qfprom_base = ioremap(qfprom_phys, SZ_256);
703 /* Select frequency tables. */
704 if (qfprom_base) {
705 pte_efuse = readl_relaxed(qfprom_base + PTE_EFUSE);
706 pvs = (pte_efuse >> 10) & 0x7;
707 iounmap(qfprom_base);
708 if (pvs == 0x7)
709 pvs = (pte_efuse >> 13) & 0x7;
710
711 switch (pvs) {
712 case 0x0:
713 case 0x7:
714 tbl_idx = PVS_SLOW;
715 break;
716 case 0x1:
717 tbl_idx = PVS_NOMINAL;
718 break;
719 case 0x3:
720 tbl_idx = PVS_FAST;
721 break;
722 default:
723 tbl_idx = PVS_UNKNOWN;
724 break;
725 }
726 } else {
727 tbl_idx = PVS_UNKNOWN;
728 dev_err(drv.dev, "Unable to map QFPROM base\n");
729 }
730 dev_info(drv.dev, "ACPU PVS: %s\n", pvs_names[tbl_idx]);
731 if (tbl_idx == PVS_UNKNOWN) {
732 tbl_idx = PVS_SLOW;
733 dev_warn(drv.dev, "ACPU PVS: Defaulting to %s\n",
734 pvs_names[tbl_idx]);
735 }
736 drv.acpu_freq_tbl = pvs_tbl[tbl_idx];
737
738 /* Find the max supported scaling frequency. */
739 for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
740 if (l->use_for_scaling)
741 max_acpu_level = l;
742 BUG_ON(!max_acpu_level);
743 dev_info(drv.dev, "Max ACPU freq: %lu KHz\n",
744 max_acpu_level->speed.khz);
745
746 return max_acpu_level;
747}
748
749static struct acpuclk_data acpuclk_krait_data = {
750 .set_rate = acpuclk_krait_set_rate,
751 .get_rate = acpuclk_krait_get_rate,
752 .power_collapse_khz = STBY_KHZ,
753 .wait_for_irq_khz = STBY_KHZ,
754};
755
756int __init acpuclk_krait_init(struct device *dev,
757 const struct acpuclk_krait_params *params)
758{
759 const struct acpu_level *max_acpu_level;
760 int cpu;
761
762 drv.scalable = params->scalable;
763 drv.l2_freq_tbl = params->l2_freq_tbl;
764 drv.dev = dev;
765
766 drv.scalable[L2].hfpll_base =
767 ioremap(drv.scalable[L2].hfpll_phys_base, SZ_32);
768 BUG_ON(!drv.scalable[L2].hfpll_base);
769
770 max_acpu_level = select_freq_plan(params->pvs_acpu_freq_tbl,
771 params->qfprom_phys_base);
772 regulator_init(max_acpu_level);
773 bus_init(params->bus_scale_data, max_acpu_level->l2_level->bw_level);
774 init_clock_sources(&drv.scalable[L2], &max_acpu_level->l2_level->speed);
775 for_each_online_cpu(cpu)
776 per_cpu_init(cpu, max_acpu_level);
777
778 cpufreq_table_init();
779
780 acpuclk_register(&acpuclk_krait_data);
781 register_hotcpu_notifier(&acpuclk_cpu_notifier);
782
783 return 0;
784}