blob: d7477045c4461457e4ccaa0802b51d525eabaf7b [file] [log] [blame]
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001/*
2 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/io.h>
19#include <linux/delay.h>
20#include <linux/mutex.h>
21#include <linux/err.h>
22#include <linux/errno.h>
23#include <linux/cpufreq.h>
24#include <linux/cpu.h>
25#include <linux/regulator/consumer.h>
26
27#include <asm/mach-types.h>
28#include <asm/cpu.h>
29
30#include <mach/board.h>
31#include <mach/msm_iomap.h>
32#include <mach/socinfo.h>
33#include <mach/msm-krait-l2-accessors.h>
34#include <mach/rpm-regulator.h>
Matt Wagantall75473eb2012-05-31 15:23:22 -070035#include <mach/rpm-regulator-smd.h>
Matt Wagantalle9b715a2012-01-04 18:16:14 -080036#include <mach/msm_bus.h>
37
38#include "acpuclock.h"
39#include "acpuclock-krait.h"
40
41/* MUX source selects. */
42#define PRI_SRC_SEL_SEC_SRC 0
43#define PRI_SRC_SEL_HFPLL 1
44#define PRI_SRC_SEL_HFPLL_DIV2 2
45#define SEC_SRC_SEL_QSB 0
46#define SEC_SRC_SEL_L2PLL 1
47#define SEC_SRC_SEL_AUX 2
48
49/* PTE EFUSE register offset. */
50#define PTE_EFUSE 0xC0
51
52static DEFINE_MUTEX(driver_lock);
53static DEFINE_SPINLOCK(l2_lock);
54
55static struct drv_data {
56 const struct acpu_level *acpu_freq_tbl;
57 const struct l2_level *l2_freq_tbl;
58 struct scalable *scalable;
59 u32 bus_perf_client;
60 struct device *dev;
61} drv;
62
63static unsigned long acpuclk_krait_get_rate(int cpu)
64{
65 return drv.scalable[cpu].cur_speed->khz;
66}
67
68/* Select a source on the primary MUX. */
69static void set_pri_clk_src(struct scalable *sc, u32 pri_src_sel)
70{
71 u32 regval;
72
73 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
74 regval &= ~0x3;
75 regval |= (pri_src_sel & 0x3);
76 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
77 /* Wait for switch to complete. */
78 mb();
79 udelay(1);
80}
81
82/* Select a source on the secondary MUX. */
83static void set_sec_clk_src(struct scalable *sc, u32 sec_src_sel)
84{
85 u32 regval;
86
87 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
88 regval &= ~(0x3 << 2);
89 regval |= ((sec_src_sel & 0x3) << 2);
90 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
91 /* Wait for switch to complete. */
92 mb();
93 udelay(1);
94}
95
Matt Wagantall75473eb2012-05-31 15:23:22 -070096static void enable_rpm_vreg(struct vreg *vreg)
Matt Wagantalle9b715a2012-01-04 18:16:14 -080097{
98 int rc;
99
Matt Wagantall75473eb2012-05-31 15:23:22 -0700100 if (vreg->rpm_reg) {
101 rc = rpm_regulator_enable(vreg->rpm_reg);
102 if (rc) {
103 dev_err(drv.dev, "%s regulator enable failed (%d)\n",
104 vreg->name, rc);
105 BUG();
106 }
107 }
108}
109
110static void disable_rpm_vreg(struct vreg *vreg)
111{
112 int rc;
113
114 if (vreg->rpm_reg) {
115 rc = rpm_regulator_disable(vreg->rpm_reg);
116 if (rc)
117 dev_err(drv.dev, "%s regulator disable failed (%d)\n",
118 vreg->name, rc);
119 }
120}
121
122/* Enable an already-configured HFPLL. */
123static void hfpll_enable(struct scalable *sc, bool skip_regulators)
124{
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800125 if (!skip_regulators) {
126 /* Enable regulators required by the HFPLL. */
Matt Wagantall75473eb2012-05-31 15:23:22 -0700127 enable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]);
128 enable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800129 }
130
131 /* Disable PLL bypass mode. */
132 writel_relaxed(0x2, sc->hfpll_base + sc->hfpll_data->mode_offset);
133
134 /*
135 * H/W requires a 5us delay between disabling the bypass and
136 * de-asserting the reset. Delay 10us just to be safe.
137 */
138 mb();
139 udelay(10);
140
141 /* De-assert active-low PLL reset. */
142 writel_relaxed(0x6, sc->hfpll_base + sc->hfpll_data->mode_offset);
143
144 /* Wait for PLL to lock. */
145 mb();
146 udelay(60);
147
148 /* Enable PLL output. */
149 writel_relaxed(0x7, sc->hfpll_base + sc->hfpll_data->mode_offset);
150}
151
152/* Disable a HFPLL for power-savings or while it's being reprogrammed. */
153static void hfpll_disable(struct scalable *sc, bool skip_regulators)
154{
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800155 /*
156 * Disable the PLL output, disable test mode, enable the bypass mode,
157 * and assert the reset.
158 */
159 writel_relaxed(0, sc->hfpll_base + sc->hfpll_data->mode_offset);
160
161 if (!skip_regulators) {
162 /* Remove voltage votes required by the HFPLL. */
Matt Wagantall75473eb2012-05-31 15:23:22 -0700163 disable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]);
164 disable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800165 }
166}
167
168/* Program the HFPLL rate. Assumes HFPLL is already disabled. */
169static void hfpll_set_rate(struct scalable *sc, const struct core_speed *tgt_s)
170{
171 writel_relaxed(tgt_s->pll_l_val,
172 sc->hfpll_base + sc->hfpll_data->l_offset);
173}
174
175/* Return the L2 speed that should be applied. */
176static const struct l2_level *compute_l2_level(struct scalable *sc,
177 const struct l2_level *vote_l)
178{
179 const struct l2_level *new_l;
180 int cpu;
181
182 /* Find max L2 speed vote. */
183 sc->l2_vote = vote_l;
184 new_l = drv.l2_freq_tbl;
185 for_each_present_cpu(cpu)
186 new_l = max(new_l, drv.scalable[cpu].l2_vote);
187
188 return new_l;
189}
190
191/* Update the bus bandwidth request. */
192static void set_bus_bw(unsigned int bw)
193{
194 int ret;
195
196 /* Update bandwidth if request has changed. This may sleep. */
197 ret = msm_bus_scale_client_update_request(drv.bus_perf_client, bw);
198 if (ret)
199 dev_err(drv.dev, "bandwidth request failed (%d)\n", ret);
200}
201
202/* Set the CPU or L2 clock speed. */
203static void set_speed(struct scalable *sc, const struct core_speed *tgt_s)
204{
205 const struct core_speed *strt_s = sc->cur_speed;
206
207 if (strt_s->src == HFPLL && tgt_s->src == HFPLL) {
208 /*
209 * Move to an always-on source running at a frequency
210 * that does not require an elevated CPU voltage.
211 */
212 set_sec_clk_src(sc, SEC_SRC_SEL_AUX);
213 set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
214
215 /* Re-program HFPLL. */
Matt Wagantall75473eb2012-05-31 15:23:22 -0700216 hfpll_disable(sc, true);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800217 hfpll_set_rate(sc, tgt_s);
Matt Wagantall75473eb2012-05-31 15:23:22 -0700218 hfpll_enable(sc, true);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800219
220 /* Move to HFPLL. */
221 set_pri_clk_src(sc, tgt_s->pri_src_sel);
222 } else if (strt_s->src == HFPLL && tgt_s->src != HFPLL) {
223 set_sec_clk_src(sc, tgt_s->sec_src_sel);
224 set_pri_clk_src(sc, tgt_s->pri_src_sel);
Matt Wagantall75473eb2012-05-31 15:23:22 -0700225 hfpll_disable(sc, false);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800226 } else if (strt_s->src != HFPLL && tgt_s->src == HFPLL) {
227 hfpll_set_rate(sc, tgt_s);
Matt Wagantall75473eb2012-05-31 15:23:22 -0700228 hfpll_enable(sc, false);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800229 set_pri_clk_src(sc, tgt_s->pri_src_sel);
230 } else {
231 set_sec_clk_src(sc, tgt_s->sec_src_sel);
232 }
233
234 sc->cur_speed = tgt_s;
235}
236
237/* Apply any per-cpu voltage increases. */
238static int increase_vdd(int cpu, int vdd_core, int vdd_mem, int vdd_dig,
239 enum setrate_reason reason)
240{
241 struct scalable *sc = &drv.scalable[cpu];
242 int rc = 0;
243
244 /*
245 * Increase vdd_mem active-set before vdd_dig.
246 * vdd_mem should be >= vdd_dig.
247 */
248 if (vdd_mem > sc->vreg[VREG_MEM].cur_vdd) {
Matt Wagantall75473eb2012-05-31 15:23:22 -0700249 rc = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
250 vdd_mem, sc->vreg[VREG_MEM].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800251 if (rc) {
252 dev_err(drv.dev,
253 "vdd_mem (cpu%d) increase failed (%d)\n",
254 cpu, rc);
255 return rc;
256 }
257 sc->vreg[VREG_MEM].cur_vdd = vdd_mem;
258 }
259
260 /* Increase vdd_dig active-set vote. */
261 if (vdd_dig > sc->vreg[VREG_DIG].cur_vdd) {
Matt Wagantall75473eb2012-05-31 15:23:22 -0700262 rc = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
263 vdd_dig, sc->vreg[VREG_DIG].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800264 if (rc) {
265 dev_err(drv.dev,
266 "vdd_dig (cpu%d) increase failed (%d)\n",
267 cpu, rc);
268 return rc;
269 }
270 sc->vreg[VREG_DIG].cur_vdd = vdd_dig;
271 }
272
273 /*
274 * Update per-CPU core voltage. Don't do this for the hotplug path for
275 * which it should already be correct. Attempting to set it is bad
276 * because we don't know what CPU we are running on at this point, but
277 * the CPU regulator API requires we call it from the affected CPU.
278 */
279 if (vdd_core > sc->vreg[VREG_CORE].cur_vdd
280 && reason != SETRATE_HOTPLUG) {
281 rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
282 sc->vreg[VREG_CORE].max_vdd);
283 if (rc) {
284 dev_err(drv.dev,
285 "vdd_core (cpu%d) increase failed (%d)\n",
286 cpu, rc);
287 return rc;
288 }
289 sc->vreg[VREG_CORE].cur_vdd = vdd_core;
290 }
291
292 return rc;
293}
294
295/* Apply any per-cpu voltage decreases. */
296static void decrease_vdd(int cpu, int vdd_core, int vdd_mem, int vdd_dig,
297 enum setrate_reason reason)
298{
299 struct scalable *sc = &drv.scalable[cpu];
300 int ret;
301
302 /*
303 * Update per-CPU core voltage. This must be called on the CPU
304 * that's being affected. Don't do this in the hotplug remove path,
305 * where the rail is off and we're executing on the other CPU.
306 */
307 if (vdd_core < sc->vreg[VREG_CORE].cur_vdd
308 && reason != SETRATE_HOTPLUG) {
309 ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
310 sc->vreg[VREG_CORE].max_vdd);
311 if (ret) {
312 dev_err(drv.dev,
313 "vdd_core (cpu%d) decrease failed (%d)\n",
314 cpu, ret);
315 return;
316 }
317 sc->vreg[VREG_CORE].cur_vdd = vdd_core;
318 }
319
320 /* Decrease vdd_dig active-set vote. */
321 if (vdd_dig < sc->vreg[VREG_DIG].cur_vdd) {
Matt Wagantall75473eb2012-05-31 15:23:22 -0700322 ret = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
323 vdd_dig, sc->vreg[VREG_DIG].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800324 if (ret) {
325 dev_err(drv.dev,
326 "vdd_dig (cpu%d) decrease failed (%d)\n",
327 cpu, ret);
328 return;
329 }
330 sc->vreg[VREG_DIG].cur_vdd = vdd_dig;
331 }
332
333 /*
334 * Decrease vdd_mem active-set after vdd_dig.
335 * vdd_mem should be >= vdd_dig.
336 */
337 if (vdd_mem < sc->vreg[VREG_MEM].cur_vdd) {
Matt Wagantall75473eb2012-05-31 15:23:22 -0700338 ret = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
339 vdd_mem, sc->vreg[VREG_MEM].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800340 if (ret) {
341 dev_err(drv.dev,
342 "vdd_mem (cpu%d) decrease failed (%d)\n",
343 cpu, ret);
344 return;
345 }
346 sc->vreg[VREG_MEM].cur_vdd = vdd_mem;
347 }
348}
349
350static int calculate_vdd_mem(const struct acpu_level *tgt)
351{
352 return tgt->l2_level->vdd_mem;
353}
354
355static int calculate_vdd_dig(const struct acpu_level *tgt)
356{
357 int pll_vdd_dig;
358 const int *hfpll_vdd = drv.scalable[L2].hfpll_data->vdd;
359 const u32 low_vdd_l_max = drv.scalable[L2].hfpll_data->low_vdd_l_max;
360
361 if (tgt->l2_level->speed.src != HFPLL)
362 pll_vdd_dig = hfpll_vdd[HFPLL_VDD_NONE];
363 else if (tgt->l2_level->speed.pll_l_val > low_vdd_l_max)
364 pll_vdd_dig = hfpll_vdd[HFPLL_VDD_NOM];
365 else
366 pll_vdd_dig = hfpll_vdd[HFPLL_VDD_LOW];
367
368 return max(tgt->l2_level->vdd_dig, pll_vdd_dig);
369}
370
371static int calculate_vdd_core(const struct acpu_level *tgt)
372{
373 return tgt->vdd_core;
374}
375
376/* Set the CPU's clock rate and adjust the L2 rate, voltage and BW requests. */
377static int acpuclk_krait_set_rate(int cpu, unsigned long rate,
378 enum setrate_reason reason)
379{
380 const struct core_speed *strt_acpu_s, *tgt_acpu_s;
381 const struct l2_level *tgt_l2_l;
382 const struct acpu_level *tgt;
383 int vdd_mem, vdd_dig, vdd_core;
384 unsigned long flags;
385 int rc = 0;
386
387 if (cpu > num_possible_cpus()) {
388 rc = -EINVAL;
389 goto out;
390 }
391
392 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
393 mutex_lock(&driver_lock);
394
395 strt_acpu_s = drv.scalable[cpu].cur_speed;
396
397 /* Return early if rate didn't change. */
398 if (rate == strt_acpu_s->khz)
399 goto out;
400
401 /* Find target frequency. */
402 for (tgt = drv.acpu_freq_tbl; tgt->speed.khz != 0; tgt++) {
403 if (tgt->speed.khz == rate) {
404 tgt_acpu_s = &tgt->speed;
405 break;
406 }
407 }
408 if (tgt->speed.khz == 0) {
409 rc = -EINVAL;
410 goto out;
411 }
412
413 /* Calculate voltage requirements for the current CPU. */
414 vdd_mem = calculate_vdd_mem(tgt);
415 vdd_dig = calculate_vdd_dig(tgt);
416 vdd_core = calculate_vdd_core(tgt);
417
418 /* Increase VDD levels if needed. */
419 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) {
420 rc = increase_vdd(cpu, vdd_core, vdd_mem, vdd_dig, reason);
421 if (rc)
422 goto out;
423 }
424
425 pr_debug("Switching from ACPU%d rate %lu KHz -> %lu KHz\n",
426 cpu, strt_acpu_s->khz, tgt_acpu_s->khz);
427
428 /* Set the new CPU speed. */
429 set_speed(&drv.scalable[cpu], tgt_acpu_s);
430
431 /*
432 * Update the L2 vote and apply the rate change. A spinlock is
433 * necessary to ensure L2 rate is calculated and set atomically
434 * with the CPU frequency, even if acpuclk_krait_set_rate() is
435 * called from an atomic context and the driver_lock mutex is not
436 * acquired.
437 */
438 spin_lock_irqsave(&l2_lock, flags);
439 tgt_l2_l = compute_l2_level(&drv.scalable[cpu], tgt->l2_level);
440 set_speed(&drv.scalable[L2], &tgt_l2_l->speed);
441 spin_unlock_irqrestore(&l2_lock, flags);
442
443 /* Nothing else to do for power collapse or SWFI. */
444 if (reason == SETRATE_PC || reason == SETRATE_SWFI)
445 goto out;
446
447 /* Update bus bandwith request. */
448 set_bus_bw(tgt_l2_l->bw_level);
449
450 /* Drop VDD levels if we can. */
451 decrease_vdd(cpu, vdd_core, vdd_mem, vdd_dig, reason);
452
453 pr_debug("ACPU%d speed change complete\n", cpu);
454
455out:
456 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
457 mutex_unlock(&driver_lock);
458 return rc;
459}
460
461/* Initialize a HFPLL at a given rate and enable it. */
462static void __init hfpll_init(struct scalable *sc,
463 const struct core_speed *tgt_s)
464{
465 pr_debug("Initializing HFPLL%d\n", sc - drv.scalable);
466
467 /* Disable the PLL for re-programming. */
Matt Wagantall75473eb2012-05-31 15:23:22 -0700468 hfpll_disable(sc, true);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800469
470 /* Configure PLL parameters for integer mode. */
471 writel_relaxed(sc->hfpll_data->config_val,
472 sc->hfpll_base + sc->hfpll_data->config_offset);
473 writel_relaxed(0, sc->hfpll_base + sc->hfpll_data->m_offset);
474 writel_relaxed(1, sc->hfpll_base + sc->hfpll_data->n_offset);
475
476 /* Set an initial rate and enable the PLL. */
477 hfpll_set_rate(sc, tgt_s);
Matt Wagantall75473eb2012-05-31 15:23:22 -0700478 hfpll_enable(sc, false);
479}
480
481static void __init rpm_regulator_init(struct scalable *sc, enum vregs vreg,
482 int vdd, bool enable)
483{
484 int ret;
485
486 if (!sc->vreg[vreg].name)
487 return;
488
489 sc->vreg[vreg].rpm_reg = rpm_regulator_get(drv.dev,
490 sc->vreg[vreg].name);
491 if (IS_ERR(sc->vreg[vreg].rpm_reg)) {
492 dev_err(drv.dev, "rpm_regulator_get(%s) failed (%ld)\n",
493 sc->vreg[vreg].name,
494 PTR_ERR(sc->vreg[vreg].rpm_reg));
495 BUG();
496 }
497
498 ret = rpm_regulator_set_voltage(sc->vreg[vreg].rpm_reg, vdd,
499 sc->vreg[vreg].max_vdd);
500 if (ret) {
501 dev_err(drv.dev, "%s initialization failed (%d)\n",
502 sc->vreg[vreg].name, ret);
503 BUG();
504 }
505 sc->vreg[vreg].cur_vdd = vdd;
506
507 if (enable)
508 enable_rpm_vreg(&sc->vreg[vreg]);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800509}
510
511/* Voltage regulator initialization. */
Matt Wagantallbf9eb2c2012-05-31 09:44:22 -0700512static void __init regulator_init(struct device *dev,
513 const struct acpu_level *lvl)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800514{
515 int cpu, ret;
516 struct scalable *sc;
517 int vdd_mem, vdd_dig, vdd_core;
518
519 vdd_mem = calculate_vdd_mem(lvl);
520 vdd_dig = calculate_vdd_dig(lvl);
521
Matt Wagantall75473eb2012-05-31 15:23:22 -0700522 rpm_regulator_init(&drv.scalable[L2], VREG_HFPLL_A,
523 drv.scalable[L2].vreg[VREG_HFPLL_A].max_vdd, false);
524 rpm_regulator_init(&drv.scalable[L2], VREG_HFPLL_B,
525 drv.scalable[L2].vreg[VREG_HFPLL_B].max_vdd, false);
526
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800527 for_each_possible_cpu(cpu) {
528 sc = &drv.scalable[cpu];
529
Matt Wagantall75473eb2012-05-31 15:23:22 -0700530 rpm_regulator_init(sc, VREG_MEM, vdd_mem, true);
531 rpm_regulator_init(sc, VREG_DIG, vdd_dig, true);
532 rpm_regulator_init(sc, VREG_HFPLL_A,
533 sc->vreg[VREG_HFPLL_A].max_vdd, false);
534 rpm_regulator_init(sc, VREG_HFPLL_B,
535 sc->vreg[VREG_HFPLL_B].max_vdd, false);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800536
537 /* Setup Krait CPU regulators and initial core voltage. */
Matt Wagantallbf9eb2c2012-05-31 09:44:22 -0700538 sc->vreg[VREG_CORE].reg = regulator_get(dev,
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800539 sc->vreg[VREG_CORE].name);
540 if (IS_ERR(sc->vreg[VREG_CORE].reg)) {
541 dev_err(drv.dev, "regulator_get(%s) failed (%ld)\n",
542 sc->vreg[VREG_CORE].name,
543 PTR_ERR(sc->vreg[VREG_CORE].reg));
544 BUG();
545 }
546 vdd_core = calculate_vdd_core(lvl);
547 ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
548 sc->vreg[VREG_CORE].max_vdd);
549 if (ret) {
550 dev_err(drv.dev, "regulator_set_voltage(%s) (%d)\n",
551 sc->vreg[VREG_CORE].name, ret);
552 BUG();
553 }
554 sc->vreg[VREG_CORE].cur_vdd = vdd_core;
555 ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
556 sc->vreg[VREG_CORE].peak_ua);
557 if (ret < 0) {
558 dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed"
559 " (%d)\n", sc->vreg[VREG_CORE].name, ret);
560 BUG();
561 }
562 ret = regulator_enable(sc->vreg[VREG_CORE].reg);
563 if (ret) {
564 dev_err(drv.dev, "regulator_enable(%s) failed (%d)\n",
565 sc->vreg[VREG_CORE].name, ret);
566 BUG();
567 }
568 }
569}
570
571/* Set initial rate for a given core. */
572static void __init init_clock_sources(struct scalable *sc,
573 const struct core_speed *tgt_s)
574{
575 u32 regval;
576
577 /* Program AUX source input to the secondary MUX. */
578 if (sc->aux_clk_sel_addr)
579 writel_relaxed(sc->aux_clk_sel, sc->aux_clk_sel_addr);
580
581 /* Switch away from the HFPLL while it's re-initialized. */
582 set_sec_clk_src(sc, SEC_SRC_SEL_AUX);
583 set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
584 hfpll_init(sc, tgt_s);
585
586 /* Set PRI_SRC_SEL_HFPLL_DIV2 divider to div-2. */
587 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
588 regval &= ~(0x3 << 6);
589 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
590
591 /* Switch to the target clock source. */
592 set_sec_clk_src(sc, tgt_s->sec_src_sel);
593 set_pri_clk_src(sc, tgt_s->pri_src_sel);
594 sc->cur_speed = tgt_s;
595}
596
597static void __init per_cpu_init(int cpu, const struct acpu_level *max_level)
598{
599 drv.scalable[cpu].hfpll_base =
600 ioremap(drv.scalable[cpu].hfpll_phys_base, SZ_32);
601 BUG_ON(!drv.scalable[cpu].hfpll_base);
602
603 init_clock_sources(&drv.scalable[cpu], &max_level->speed);
604 drv.scalable[cpu].l2_vote = max_level->l2_level;
605}
606
607/* Register with bus driver. */
608static void __init bus_init(struct msm_bus_scale_pdata *bus_scale_data,
609 unsigned int init_bw)
610{
611 int ret;
612
613 drv.bus_perf_client = msm_bus_scale_register_client(bus_scale_data);
614 if (!drv.bus_perf_client) {
615 dev_err(drv.dev, "unable to register bus client\n");
616 BUG();
617 }
618
619 ret = msm_bus_scale_client_update_request(drv.bus_perf_client, init_bw);
620 if (ret)
621 dev_err(drv.dev, "initial bandwidth req failed (%d)\n", ret);
622}
623
624#ifdef CONFIG_CPU_FREQ_MSM
625static struct cpufreq_frequency_table freq_table[NR_CPUS][35];
626
627static void __init cpufreq_table_init(void)
628{
629 int cpu;
630
631 for_each_possible_cpu(cpu) {
632 int i, freq_cnt = 0;
633 /* Construct the freq_table tables from acpu_freq_tbl. */
634 for (i = 0; drv.acpu_freq_tbl[i].speed.khz != 0
635 && freq_cnt < ARRAY_SIZE(*freq_table); i++) {
636 if (drv.acpu_freq_tbl[i].use_for_scaling) {
637 freq_table[cpu][freq_cnt].index = freq_cnt;
638 freq_table[cpu][freq_cnt].frequency
639 = drv.acpu_freq_tbl[i].speed.khz;
640 freq_cnt++;
641 }
642 }
643 /* freq_table not big enough to store all usable freqs. */
644 BUG_ON(drv.acpu_freq_tbl[i].speed.khz != 0);
645
646 freq_table[cpu][freq_cnt].index = freq_cnt;
647 freq_table[cpu][freq_cnt].frequency = CPUFREQ_TABLE_END;
648
649 dev_info(drv.dev, "CPU%d: %d frequencies supported\n",
650 cpu, freq_cnt);
651
652 /* Register table with CPUFreq. */
653 cpufreq_frequency_table_get_attr(freq_table[cpu], cpu);
654 }
655}
656#else
657static void __init cpufreq_table_init(void) {}
658#endif
659
660#define HOT_UNPLUG_KHZ STBY_KHZ
661static int __cpuinit acpuclk_cpu_callback(struct notifier_block *nfb,
662 unsigned long action, void *hcpu)
663{
664 static int prev_khz[NR_CPUS];
665 int rc, cpu = (int)hcpu;
666 struct scalable *sc = &drv.scalable[cpu];
667
668 switch (action & ~CPU_TASKS_FROZEN) {
669 case CPU_DEAD:
670 prev_khz[cpu] = acpuclk_krait_get_rate(cpu);
671 /* Fall through. */
672 case CPU_UP_CANCELED:
673 acpuclk_krait_set_rate(cpu, HOT_UNPLUG_KHZ, SETRATE_HOTPLUG);
674 regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, 0);
675 break;
676 case CPU_UP_PREPARE:
677 if (WARN_ON(!prev_khz[cpu]))
678 return NOTIFY_BAD;
679 rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
680 sc->vreg[VREG_CORE].peak_ua);
681 if (rc < 0)
682 return NOTIFY_BAD;
683 acpuclk_krait_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG);
684 break;
685 default:
686 break;
687 }
688
689 return NOTIFY_OK;
690}
691
692static struct notifier_block __cpuinitdata acpuclk_cpu_notifier = {
693 .notifier_call = acpuclk_cpu_callback,
694};
695
696static const struct acpu_level __init *select_freq_plan(
697 const struct acpu_level *const *pvs_tbl, u32 qfprom_phys)
698{
699 const struct acpu_level *l, *max_acpu_level = NULL;
700 void __iomem *qfprom_base;
701 u32 pte_efuse, pvs, tbl_idx;
702 char *pvs_names[] = { "Slow", "Nominal", "Fast", "Unknown" };
703
704 qfprom_base = ioremap(qfprom_phys, SZ_256);
705 /* Select frequency tables. */
706 if (qfprom_base) {
707 pte_efuse = readl_relaxed(qfprom_base + PTE_EFUSE);
708 pvs = (pte_efuse >> 10) & 0x7;
709 iounmap(qfprom_base);
710 if (pvs == 0x7)
711 pvs = (pte_efuse >> 13) & 0x7;
712
713 switch (pvs) {
714 case 0x0:
715 case 0x7:
716 tbl_idx = PVS_SLOW;
717 break;
718 case 0x1:
719 tbl_idx = PVS_NOMINAL;
720 break;
721 case 0x3:
722 tbl_idx = PVS_FAST;
723 break;
724 default:
725 tbl_idx = PVS_UNKNOWN;
726 break;
727 }
728 } else {
729 tbl_idx = PVS_UNKNOWN;
730 dev_err(drv.dev, "Unable to map QFPROM base\n");
731 }
732 dev_info(drv.dev, "ACPU PVS: %s\n", pvs_names[tbl_idx]);
733 if (tbl_idx == PVS_UNKNOWN) {
734 tbl_idx = PVS_SLOW;
735 dev_warn(drv.dev, "ACPU PVS: Defaulting to %s\n",
736 pvs_names[tbl_idx]);
737 }
738 drv.acpu_freq_tbl = pvs_tbl[tbl_idx];
739
740 /* Find the max supported scaling frequency. */
741 for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
742 if (l->use_for_scaling)
743 max_acpu_level = l;
744 BUG_ON(!max_acpu_level);
745 dev_info(drv.dev, "Max ACPU freq: %lu KHz\n",
746 max_acpu_level->speed.khz);
747
748 return max_acpu_level;
749}
750
751static struct acpuclk_data acpuclk_krait_data = {
752 .set_rate = acpuclk_krait_set_rate,
753 .get_rate = acpuclk_krait_get_rate,
754 .power_collapse_khz = STBY_KHZ,
755 .wait_for_irq_khz = STBY_KHZ,
756};
757
758int __init acpuclk_krait_init(struct device *dev,
759 const struct acpuclk_krait_params *params)
760{
761 const struct acpu_level *max_acpu_level;
762 int cpu;
763
764 drv.scalable = params->scalable;
765 drv.l2_freq_tbl = params->l2_freq_tbl;
766 drv.dev = dev;
767
768 drv.scalable[L2].hfpll_base =
769 ioremap(drv.scalable[L2].hfpll_phys_base, SZ_32);
770 BUG_ON(!drv.scalable[L2].hfpll_base);
771
772 max_acpu_level = select_freq_plan(params->pvs_acpu_freq_tbl,
773 params->qfprom_phys_base);
Matt Wagantallbf9eb2c2012-05-31 09:44:22 -0700774 regulator_init(dev, max_acpu_level);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800775 bus_init(params->bus_scale_data, max_acpu_level->l2_level->bw_level);
776 init_clock_sources(&drv.scalable[L2], &max_acpu_level->l2_level->speed);
777 for_each_online_cpu(cpu)
778 per_cpu_init(cpu, max_acpu_level);
779
780 cpufreq_table_init();
781
782 acpuclk_register(&acpuclk_krait_data);
783 register_hotcpu_notifier(&acpuclk_cpu_notifier);
784
785 return 0;
786}