blob: 7b5aa235e0d11cf649b6a213361421dd2992ddee [file] [log] [blame]
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001/*
2 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
Matt Wagantalle9b715a2012-01-04 18:16:14 -080014#include <linux/kernel.h>
Matt Wagantall9515bc22012-07-19 18:13:40 -070015#include <linux/module.h>
Matt Wagantalle9b715a2012-01-04 18:16:14 -080016#include <linux/init.h>
17#include <linux/io.h>
18#include <linux/delay.h>
19#include <linux/mutex.h>
20#include <linux/err.h>
21#include <linux/errno.h>
22#include <linux/cpufreq.h>
23#include <linux/cpu.h>
24#include <linux/regulator/consumer.h>
25
26#include <asm/mach-types.h>
27#include <asm/cpu.h>
28
29#include <mach/board.h>
30#include <mach/msm_iomap.h>
31#include <mach/socinfo.h>
32#include <mach/msm-krait-l2-accessors.h>
33#include <mach/rpm-regulator.h>
Matt Wagantall75473eb2012-05-31 15:23:22 -070034#include <mach/rpm-regulator-smd.h>
Matt Wagantalle9b715a2012-01-04 18:16:14 -080035#include <mach/msm_bus.h>
36
37#include "acpuclock.h"
38#include "acpuclock-krait.h"
Stephen Boydc13b6792012-09-14 11:25:34 -070039#include "avs.h"
Matt Wagantalle9b715a2012-01-04 18:16:14 -080040
41/* MUX source selects. */
42#define PRI_SRC_SEL_SEC_SRC 0
43#define PRI_SRC_SEL_HFPLL 1
44#define PRI_SRC_SEL_HFPLL_DIV2 2
Matt Wagantalle9b715a2012-01-04 18:16:14 -080045
Matt Wagantall7c705e72012-09-25 12:47:24 -070046#define SECCLKAGD BIT(4)
47
Matt Wagantalle9b715a2012-01-04 18:16:14 -080048static DEFINE_MUTEX(driver_lock);
49static DEFINE_SPINLOCK(l2_lock);
50
51static struct drv_data {
Matt Wagantall06e4a1f2012-06-07 18:38:13 -070052 struct acpu_level *acpu_freq_tbl;
Matt Wagantalle9b715a2012-01-04 18:16:14 -080053 const struct l2_level *l2_freq_tbl;
54 struct scalable *scalable;
Matt Wagantall1f3762d2012-06-08 19:08:48 -070055 struct hfpll_data *hfpll_data;
Matt Wagantalle9b715a2012-01-04 18:16:14 -080056 u32 bus_perf_client;
Matt Wagantall1f3762d2012-06-08 19:08:48 -070057 struct msm_bus_scale_pdata *bus_scale;
Matt Wagantall9515bc22012-07-19 18:13:40 -070058 int boost_uv;
Matt Wagantalle9b715a2012-01-04 18:16:14 -080059 struct device *dev;
60} drv;
61
62static unsigned long acpuclk_krait_get_rate(int cpu)
63{
64 return drv.scalable[cpu].cur_speed->khz;
65}
66
67/* Select a source on the primary MUX. */
68static void set_pri_clk_src(struct scalable *sc, u32 pri_src_sel)
69{
70 u32 regval;
71
72 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
73 regval &= ~0x3;
74 regval |= (pri_src_sel & 0x3);
75 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
76 /* Wait for switch to complete. */
77 mb();
78 udelay(1);
79}
80
81/* Select a source on the secondary MUX. */
Matt Wagantalla133dbf2012-09-27 19:56:57 -070082static void __cpuinit set_sec_clk_src(struct scalable *sc, u32 sec_src_sel)
Matt Wagantalle9b715a2012-01-04 18:16:14 -080083{
84 u32 regval;
85
Matt Wagantall7c705e72012-09-25 12:47:24 -070086 /* 8064 Errata: disable sec_src clock gating during switch. */
Matt Wagantalle9b715a2012-01-04 18:16:14 -080087 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
Matt Wagantall7c705e72012-09-25 12:47:24 -070088 regval |= SECCLKAGD;
89 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
90
91 /* Program the MUX */
Matt Wagantalle9b715a2012-01-04 18:16:14 -080092 regval &= ~(0x3 << 2);
93 regval |= ((sec_src_sel & 0x3) << 2);
94 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
Matt Wagantall7c705e72012-09-25 12:47:24 -070095
96 /* 8064 Errata: re-enabled sec_src clock gating. */
97 regval &= ~SECCLKAGD;
98 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
99
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800100 /* Wait for switch to complete. */
101 mb();
102 udelay(1);
103}
104
Matt Wagantall302d9a32012-07-03 13:37:29 -0700105static int enable_rpm_vreg(struct vreg *vreg)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800106{
Matt Wagantall302d9a32012-07-03 13:37:29 -0700107 int ret = 0;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800108
Matt Wagantall75473eb2012-05-31 15:23:22 -0700109 if (vreg->rpm_reg) {
Matt Wagantall302d9a32012-07-03 13:37:29 -0700110 ret = rpm_regulator_enable(vreg->rpm_reg);
111 if (ret)
Matt Wagantall75473eb2012-05-31 15:23:22 -0700112 dev_err(drv.dev, "%s regulator enable failed (%d)\n",
Matt Wagantall302d9a32012-07-03 13:37:29 -0700113 vreg->name, ret);
Matt Wagantall75473eb2012-05-31 15:23:22 -0700114 }
Matt Wagantall302d9a32012-07-03 13:37:29 -0700115
116 return ret;
Matt Wagantall75473eb2012-05-31 15:23:22 -0700117}
118
119static void disable_rpm_vreg(struct vreg *vreg)
120{
121 int rc;
122
123 if (vreg->rpm_reg) {
124 rc = rpm_regulator_disable(vreg->rpm_reg);
125 if (rc)
126 dev_err(drv.dev, "%s regulator disable failed (%d)\n",
127 vreg->name, rc);
128 }
129}
130
131/* Enable an already-configured HFPLL. */
132static void hfpll_enable(struct scalable *sc, bool skip_regulators)
133{
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800134 if (!skip_regulators) {
135 /* Enable regulators required by the HFPLL. */
Matt Wagantall75473eb2012-05-31 15:23:22 -0700136 enable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]);
137 enable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800138 }
139
140 /* Disable PLL bypass mode. */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700141 writel_relaxed(0x2, sc->hfpll_base + drv.hfpll_data->mode_offset);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800142
143 /*
144 * H/W requires a 5us delay between disabling the bypass and
145 * de-asserting the reset. Delay 10us just to be safe.
146 */
147 mb();
148 udelay(10);
149
150 /* De-assert active-low PLL reset. */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700151 writel_relaxed(0x6, sc->hfpll_base + drv.hfpll_data->mode_offset);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800152
153 /* Wait for PLL to lock. */
154 mb();
155 udelay(60);
156
157 /* Enable PLL output. */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700158 writel_relaxed(0x7, sc->hfpll_base + drv.hfpll_data->mode_offset);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800159}
160
161/* Disable a HFPLL for power-savings or while it's being reprogrammed. */
162static void hfpll_disable(struct scalable *sc, bool skip_regulators)
163{
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800164 /*
165 * Disable the PLL output, disable test mode, enable the bypass mode,
166 * and assert the reset.
167 */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700168 writel_relaxed(0, sc->hfpll_base + drv.hfpll_data->mode_offset);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800169
170 if (!skip_regulators) {
171 /* Remove voltage votes required by the HFPLL. */
Matt Wagantall75473eb2012-05-31 15:23:22 -0700172 disable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]);
173 disable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800174 }
175}
176
177/* Program the HFPLL rate. Assumes HFPLL is already disabled. */
178static void hfpll_set_rate(struct scalable *sc, const struct core_speed *tgt_s)
179{
Matt Wagantalla77b7f32012-07-18 16:32:01 -0700180 void __iomem *base = sc->hfpll_base;
181 u32 regval;
182
183 writel_relaxed(tgt_s->pll_l_val, base + drv.hfpll_data->l_offset);
184
185 if (drv.hfpll_data->has_user_reg) {
186 regval = readl_relaxed(base + drv.hfpll_data->user_offset);
187 if (tgt_s->pll_l_val <= drv.hfpll_data->low_vco_l_max)
188 regval &= ~drv.hfpll_data->user_vco_mask;
189 else
190 regval |= drv.hfpll_data->user_vco_mask;
191 writel_relaxed(regval, base + drv.hfpll_data->user_offset);
192 }
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800193}
194
195/* Return the L2 speed that should be applied. */
Matt Wagantall600ea502012-06-08 18:49:53 -0700196static unsigned int compute_l2_level(struct scalable *sc, unsigned int vote_l)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800197{
Matt Wagantall600ea502012-06-08 18:49:53 -0700198 unsigned int new_l = 0;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800199 int cpu;
200
201 /* Find max L2 speed vote. */
202 sc->l2_vote = vote_l;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800203 for_each_present_cpu(cpu)
204 new_l = max(new_l, drv.scalable[cpu].l2_vote);
205
206 return new_l;
207}
208
209/* Update the bus bandwidth request. */
210static void set_bus_bw(unsigned int bw)
211{
212 int ret;
213
214 /* Update bandwidth if request has changed. This may sleep. */
215 ret = msm_bus_scale_client_update_request(drv.bus_perf_client, bw);
216 if (ret)
217 dev_err(drv.dev, "bandwidth request failed (%d)\n", ret);
218}
219
220/* Set the CPU or L2 clock speed. */
221static void set_speed(struct scalable *sc, const struct core_speed *tgt_s)
222{
223 const struct core_speed *strt_s = sc->cur_speed;
224
Stephen Boyd14a47392012-08-06 20:15:15 -0700225 if (strt_s == tgt_s)
226 return;
227
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800228 if (strt_s->src == HFPLL && tgt_s->src == HFPLL) {
229 /*
230 * Move to an always-on source running at a frequency
231 * that does not require an elevated CPU voltage.
232 */
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800233 set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
234
235 /* Re-program HFPLL. */
Matt Wagantall75473eb2012-05-31 15:23:22 -0700236 hfpll_disable(sc, true);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800237 hfpll_set_rate(sc, tgt_s);
Matt Wagantall75473eb2012-05-31 15:23:22 -0700238 hfpll_enable(sc, true);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800239
240 /* Move to HFPLL. */
241 set_pri_clk_src(sc, tgt_s->pri_src_sel);
242 } else if (strt_s->src == HFPLL && tgt_s->src != HFPLL) {
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800243 set_pri_clk_src(sc, tgt_s->pri_src_sel);
Matt Wagantall75473eb2012-05-31 15:23:22 -0700244 hfpll_disable(sc, false);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800245 } else if (strt_s->src != HFPLL && tgt_s->src == HFPLL) {
246 hfpll_set_rate(sc, tgt_s);
Matt Wagantall75473eb2012-05-31 15:23:22 -0700247 hfpll_enable(sc, false);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800248 set_pri_clk_src(sc, tgt_s->pri_src_sel);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800249 }
250
251 sc->cur_speed = tgt_s;
252}
253
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700254struct vdd_data {
255 int vdd_mem;
256 int vdd_dig;
257 int vdd_core;
258 int ua_core;
259};
260
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800261/* Apply any per-cpu voltage increases. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700262static int increase_vdd(int cpu, struct vdd_data *data,
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800263 enum setrate_reason reason)
264{
265 struct scalable *sc = &drv.scalable[cpu];
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700266 int rc;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800267
268 /*
269 * Increase vdd_mem active-set before vdd_dig.
270 * vdd_mem should be >= vdd_dig.
271 */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700272 if (data->vdd_mem > sc->vreg[VREG_MEM].cur_vdd) {
Matt Wagantall75473eb2012-05-31 15:23:22 -0700273 rc = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700274 data->vdd_mem, sc->vreg[VREG_MEM].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800275 if (rc) {
276 dev_err(drv.dev,
277 "vdd_mem (cpu%d) increase failed (%d)\n",
278 cpu, rc);
279 return rc;
280 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700281 sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800282 }
283
284 /* Increase vdd_dig active-set vote. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700285 if (data->vdd_dig > sc->vreg[VREG_DIG].cur_vdd) {
Matt Wagantall75473eb2012-05-31 15:23:22 -0700286 rc = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700287 data->vdd_dig, sc->vreg[VREG_DIG].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800288 if (rc) {
289 dev_err(drv.dev,
290 "vdd_dig (cpu%d) increase failed (%d)\n",
291 cpu, rc);
292 return rc;
293 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700294 sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig;
295 }
296
297 /* Increase current request. */
298 if (data->ua_core > sc->vreg[VREG_CORE].cur_ua) {
299 rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
300 data->ua_core);
301 if (rc < 0) {
302 dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
303 sc->vreg[VREG_CORE].name, rc);
304 return rc;
305 }
306 sc->vreg[VREG_CORE].cur_ua = data->ua_core;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800307 }
308
309 /*
310 * Update per-CPU core voltage. Don't do this for the hotplug path for
311 * which it should already be correct. Attempting to set it is bad
312 * because we don't know what CPU we are running on at this point, but
313 * the CPU regulator API requires we call it from the affected CPU.
314 */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700315 if (data->vdd_core > sc->vreg[VREG_CORE].cur_vdd
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800316 && reason != SETRATE_HOTPLUG) {
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700317 rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
318 data->vdd_core, sc->vreg[VREG_CORE].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800319 if (rc) {
320 dev_err(drv.dev,
321 "vdd_core (cpu%d) increase failed (%d)\n",
322 cpu, rc);
323 return rc;
324 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700325 sc->vreg[VREG_CORE].cur_vdd = data->vdd_core;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800326 }
327
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700328 return 0;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800329}
330
331/* Apply any per-cpu voltage decreases. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700332static void decrease_vdd(int cpu, struct vdd_data *data,
333 enum setrate_reason reason)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800334{
335 struct scalable *sc = &drv.scalable[cpu];
336 int ret;
337
338 /*
339 * Update per-CPU core voltage. This must be called on the CPU
340 * that's being affected. Don't do this in the hotplug remove path,
341 * where the rail is off and we're executing on the other CPU.
342 */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700343 if (data->vdd_core < sc->vreg[VREG_CORE].cur_vdd
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800344 && reason != SETRATE_HOTPLUG) {
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700345 ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
346 data->vdd_core, sc->vreg[VREG_CORE].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800347 if (ret) {
348 dev_err(drv.dev,
349 "vdd_core (cpu%d) decrease failed (%d)\n",
350 cpu, ret);
351 return;
352 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700353 sc->vreg[VREG_CORE].cur_vdd = data->vdd_core;
354 }
355
356 /* Decrease current request. */
357 if (data->ua_core < sc->vreg[VREG_CORE].cur_ua) {
358 ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
359 data->ua_core);
360 if (ret < 0) {
361 dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
362 sc->vreg[VREG_CORE].name, ret);
363 return;
364 }
365 sc->vreg[VREG_CORE].cur_ua = data->ua_core;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800366 }
367
368 /* Decrease vdd_dig active-set vote. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700369 if (data->vdd_dig < sc->vreg[VREG_DIG].cur_vdd) {
Matt Wagantall75473eb2012-05-31 15:23:22 -0700370 ret = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700371 data->vdd_dig, sc->vreg[VREG_DIG].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800372 if (ret) {
373 dev_err(drv.dev,
374 "vdd_dig (cpu%d) decrease failed (%d)\n",
375 cpu, ret);
376 return;
377 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700378 sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800379 }
380
381 /*
382 * Decrease vdd_mem active-set after vdd_dig.
383 * vdd_mem should be >= vdd_dig.
384 */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700385 if (data->vdd_mem < sc->vreg[VREG_MEM].cur_vdd) {
Matt Wagantall75473eb2012-05-31 15:23:22 -0700386 ret = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700387 data->vdd_mem, sc->vreg[VREG_MEM].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800388 if (ret) {
389 dev_err(drv.dev,
390 "vdd_mem (cpu%d) decrease failed (%d)\n",
391 cpu, ret);
392 return;
393 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700394 sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800395 }
396}
397
398static int calculate_vdd_mem(const struct acpu_level *tgt)
399{
Matt Wagantall600ea502012-06-08 18:49:53 -0700400 return drv.l2_freq_tbl[tgt->l2_level].vdd_mem;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800401}
402
Matt Wagantall72a38002012-07-18 13:42:55 -0700403static int get_src_dig(const struct core_speed *s)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800404{
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700405 const int *hfpll_vdd = drv.hfpll_data->vdd;
406 const u32 low_vdd_l_max = drv.hfpll_data->low_vdd_l_max;
Matt Wagantall87465f52012-07-23 22:03:06 -0700407 const u32 nom_vdd_l_max = drv.hfpll_data->nom_vdd_l_max;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800408
Matt Wagantall72a38002012-07-18 13:42:55 -0700409 if (s->src != HFPLL)
410 return hfpll_vdd[HFPLL_VDD_NONE];
Matt Wagantall87465f52012-07-23 22:03:06 -0700411 else if (s->pll_l_val > nom_vdd_l_max)
412 return hfpll_vdd[HFPLL_VDD_HIGH];
Matt Wagantall72a38002012-07-18 13:42:55 -0700413 else if (s->pll_l_val > low_vdd_l_max)
414 return hfpll_vdd[HFPLL_VDD_NOM];
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800415 else
Matt Wagantall72a38002012-07-18 13:42:55 -0700416 return hfpll_vdd[HFPLL_VDD_LOW];
417}
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800418
Matt Wagantall72a38002012-07-18 13:42:55 -0700419static int calculate_vdd_dig(const struct acpu_level *tgt)
420{
421 int l2_pll_vdd_dig, cpu_pll_vdd_dig;
422
423 l2_pll_vdd_dig = get_src_dig(&drv.l2_freq_tbl[tgt->l2_level].speed);
424 cpu_pll_vdd_dig = get_src_dig(&tgt->speed);
425
426 return max(drv.l2_freq_tbl[tgt->l2_level].vdd_dig,
427 max(l2_pll_vdd_dig, cpu_pll_vdd_dig));
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800428}
429
Matt Wagantall9515bc22012-07-19 18:13:40 -0700430static bool enable_boost = true;
431module_param_named(boost, enable_boost, bool, S_IRUGO | S_IWUSR);
432
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800433static int calculate_vdd_core(const struct acpu_level *tgt)
434{
Matt Wagantall9515bc22012-07-19 18:13:40 -0700435 return tgt->vdd_core + (enable_boost ? drv.boost_uv : 0);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800436}
437
438/* Set the CPU's clock rate and adjust the L2 rate, voltage and BW requests. */
439static int acpuclk_krait_set_rate(int cpu, unsigned long rate,
440 enum setrate_reason reason)
441{
442 const struct core_speed *strt_acpu_s, *tgt_acpu_s;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800443 const struct acpu_level *tgt;
Matt Wagantall600ea502012-06-08 18:49:53 -0700444 int tgt_l2_l;
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700445 struct vdd_data vdd_data;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800446 unsigned long flags;
447 int rc = 0;
448
Matt Wagantall5941a332012-07-10 23:20:44 -0700449 if (cpu > num_possible_cpus())
450 return -EINVAL;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800451
452 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
453 mutex_lock(&driver_lock);
454
455 strt_acpu_s = drv.scalable[cpu].cur_speed;
456
457 /* Return early if rate didn't change. */
458 if (rate == strt_acpu_s->khz)
459 goto out;
460
461 /* Find target frequency. */
462 for (tgt = drv.acpu_freq_tbl; tgt->speed.khz != 0; tgt++) {
463 if (tgt->speed.khz == rate) {
464 tgt_acpu_s = &tgt->speed;
465 break;
466 }
467 }
468 if (tgt->speed.khz == 0) {
469 rc = -EINVAL;
470 goto out;
471 }
472
473 /* Calculate voltage requirements for the current CPU. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700474 vdd_data.vdd_mem = calculate_vdd_mem(tgt);
475 vdd_data.vdd_dig = calculate_vdd_dig(tgt);
476 vdd_data.vdd_core = calculate_vdd_core(tgt);
477 vdd_data.ua_core = tgt->ua_core;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800478
Stephen Boydc13b6792012-09-14 11:25:34 -0700479 /* Disable AVS before voltage switch */
480 if (reason == SETRATE_CPUFREQ && drv.scalable[cpu].avs_enabled) {
481 AVS_DISABLE(cpu);
482 drv.scalable[cpu].avs_enabled = false;
483 }
484
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800485 /* Increase VDD levels if needed. */
486 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) {
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700487 rc = increase_vdd(cpu, &vdd_data, reason);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800488 if (rc)
489 goto out;
490 }
491
Matt Wagantallbd1b4042012-07-24 11:20:03 -0700492 dev_dbg(drv.dev, "Switching from ACPU%d rate %lu KHz -> %lu KHz\n",
493 cpu, strt_acpu_s->khz, tgt_acpu_s->khz);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800494
495 /* Set the new CPU speed. */
496 set_speed(&drv.scalable[cpu], tgt_acpu_s);
497
498 /*
499 * Update the L2 vote and apply the rate change. A spinlock is
500 * necessary to ensure L2 rate is calculated and set atomically
501 * with the CPU frequency, even if acpuclk_krait_set_rate() is
502 * called from an atomic context and the driver_lock mutex is not
503 * acquired.
504 */
505 spin_lock_irqsave(&l2_lock, flags);
506 tgt_l2_l = compute_l2_level(&drv.scalable[cpu], tgt->l2_level);
Matt Wagantall600ea502012-06-08 18:49:53 -0700507 set_speed(&drv.scalable[L2], &drv.l2_freq_tbl[tgt_l2_l].speed);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800508 spin_unlock_irqrestore(&l2_lock, flags);
509
510 /* Nothing else to do for power collapse or SWFI. */
511 if (reason == SETRATE_PC || reason == SETRATE_SWFI)
512 goto out;
513
514 /* Update bus bandwith request. */
Matt Wagantall600ea502012-06-08 18:49:53 -0700515 set_bus_bw(drv.l2_freq_tbl[tgt_l2_l].bw_level);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800516
517 /* Drop VDD levels if we can. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700518 decrease_vdd(cpu, &vdd_data, reason);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800519
Stephen Boydc13b6792012-09-14 11:25:34 -0700520 /* Re-enable AVS */
521 if (reason == SETRATE_CPUFREQ && tgt->avsdscr_setting) {
522 AVS_ENABLE(cpu, tgt->avsdscr_setting);
523 drv.scalable[cpu].avs_enabled = true;
524 }
525
Matt Wagantallbd1b4042012-07-24 11:20:03 -0700526 dev_dbg(drv.dev, "ACPU%d speed change complete\n", cpu);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800527
528out:
529 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
530 mutex_unlock(&driver_lock);
531 return rc;
532}
533
Matt Wagantallb7c231b2012-07-24 18:40:17 -0700534static struct acpuclk_data acpuclk_krait_data = {
535 .set_rate = acpuclk_krait_set_rate,
536 .get_rate = acpuclk_krait_get_rate,
537};
538
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800539/* Initialize a HFPLL at a given rate and enable it. */
Iliyan Malchev16aea522012-10-16 00:35:07 -0700540static void __cpuinit hfpll_init(struct scalable *sc,
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800541 const struct core_speed *tgt_s)
542{
Matt Wagantallbd1b4042012-07-24 11:20:03 -0700543 dev_dbg(drv.dev, "Initializing HFPLL%d\n", sc - drv.scalable);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800544
545 /* Disable the PLL for re-programming. */
Matt Wagantall75473eb2012-05-31 15:23:22 -0700546 hfpll_disable(sc, true);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800547
548 /* Configure PLL parameters for integer mode. */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700549 writel_relaxed(drv.hfpll_data->config_val,
550 sc->hfpll_base + drv.hfpll_data->config_offset);
551 writel_relaxed(0, sc->hfpll_base + drv.hfpll_data->m_offset);
552 writel_relaxed(1, sc->hfpll_base + drv.hfpll_data->n_offset);
Matt Wagantalla77b7f32012-07-18 16:32:01 -0700553 if (drv.hfpll_data->has_user_reg)
554 writel_relaxed(drv.hfpll_data->user_val,
555 sc->hfpll_base + drv.hfpll_data->user_offset);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800556
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700557 /* Program droop controller, if supported */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700558 if (drv.hfpll_data->has_droop_ctl)
559 writel_relaxed(drv.hfpll_data->droop_val,
560 sc->hfpll_base + drv.hfpll_data->droop_offset);
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700561
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800562 /* Set an initial rate and enable the PLL. */
563 hfpll_set_rate(sc, tgt_s);
Matt Wagantall75473eb2012-05-31 15:23:22 -0700564 hfpll_enable(sc, false);
565}
566
Matt Wagantall302d9a32012-07-03 13:37:29 -0700567static int __cpuinit rpm_regulator_init(struct scalable *sc, enum vregs vreg,
Matt Wagantall754ee272012-06-18 13:40:26 -0700568 int vdd, bool enable)
Matt Wagantall75473eb2012-05-31 15:23:22 -0700569{
570 int ret;
571
572 if (!sc->vreg[vreg].name)
Matt Wagantall302d9a32012-07-03 13:37:29 -0700573 return 0;
Matt Wagantall75473eb2012-05-31 15:23:22 -0700574
575 sc->vreg[vreg].rpm_reg = rpm_regulator_get(drv.dev,
576 sc->vreg[vreg].name);
577 if (IS_ERR(sc->vreg[vreg].rpm_reg)) {
Matt Wagantall302d9a32012-07-03 13:37:29 -0700578 ret = PTR_ERR(sc->vreg[vreg].rpm_reg);
579 dev_err(drv.dev, "rpm_regulator_get(%s) failed (%d)\n",
580 sc->vreg[vreg].name, ret);
581 goto err_get;
Matt Wagantall75473eb2012-05-31 15:23:22 -0700582 }
583
584 ret = rpm_regulator_set_voltage(sc->vreg[vreg].rpm_reg, vdd,
585 sc->vreg[vreg].max_vdd);
586 if (ret) {
587 dev_err(drv.dev, "%s initialization failed (%d)\n",
588 sc->vreg[vreg].name, ret);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700589 goto err_conf;
Matt Wagantall75473eb2012-05-31 15:23:22 -0700590 }
591 sc->vreg[vreg].cur_vdd = vdd;
592
Matt Wagantall302d9a32012-07-03 13:37:29 -0700593 if (enable) {
594 ret = enable_rpm_vreg(&sc->vreg[vreg]);
595 if (ret)
596 goto err_conf;
597 }
598
599 return 0;
600
601err_conf:
602 rpm_regulator_put(sc->vreg[vreg].rpm_reg);
603err_get:
604 return ret;
605}
606
607static void __cpuinit rpm_regulator_cleanup(struct scalable *sc,
608 enum vregs vreg)
609{
610 if (!sc->vreg[vreg].rpm_reg)
611 return;
612
613 disable_rpm_vreg(&sc->vreg[vreg]);
614 rpm_regulator_put(sc->vreg[vreg].rpm_reg);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800615}
616
617/* Voltage regulator initialization. */
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700618static int __cpuinit regulator_init(struct scalable *sc,
619 const struct acpu_level *acpu_level)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800620{
Matt Wagantall754ee272012-06-18 13:40:26 -0700621 int ret, vdd_mem, vdd_dig, vdd_core;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800622
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700623 vdd_mem = calculate_vdd_mem(acpu_level);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700624 ret = rpm_regulator_init(sc, VREG_MEM, vdd_mem, true);
625 if (ret)
626 goto err_mem;
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700627
628 vdd_dig = calculate_vdd_dig(acpu_level);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700629 ret = rpm_regulator_init(sc, VREG_DIG, vdd_dig, true);
630 if (ret)
631 goto err_dig;
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700632
Matt Wagantall302d9a32012-07-03 13:37:29 -0700633 ret = rpm_regulator_init(sc, VREG_HFPLL_A,
Matt Wagantall754ee272012-06-18 13:40:26 -0700634 sc->vreg[VREG_HFPLL_A].max_vdd, false);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700635 if (ret)
636 goto err_hfpll_a;
637 ret = rpm_regulator_init(sc, VREG_HFPLL_B,
Matt Wagantall754ee272012-06-18 13:40:26 -0700638 sc->vreg[VREG_HFPLL_B].max_vdd, false);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700639 if (ret)
640 goto err_hfpll_b;
Matt Wagantall75473eb2012-05-31 15:23:22 -0700641
Matt Wagantall754ee272012-06-18 13:40:26 -0700642 /* Setup Krait CPU regulators and initial core voltage. */
643 sc->vreg[VREG_CORE].reg = regulator_get(drv.dev,
644 sc->vreg[VREG_CORE].name);
645 if (IS_ERR(sc->vreg[VREG_CORE].reg)) {
Matt Wagantall302d9a32012-07-03 13:37:29 -0700646 ret = PTR_ERR(sc->vreg[VREG_CORE].reg);
647 dev_err(drv.dev, "regulator_get(%s) failed (%d)\n",
648 sc->vreg[VREG_CORE].name, ret);
649 goto err_core_get;
Matt Wagantall754ee272012-06-18 13:40:26 -0700650 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700651 ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
652 acpu_level->ua_core);
653 if (ret < 0) {
654 dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
655 sc->vreg[VREG_CORE].name, ret);
656 goto err_core_conf;
657 }
658 sc->vreg[VREG_CORE].cur_ua = acpu_level->ua_core;
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700659 vdd_core = calculate_vdd_core(acpu_level);
Matt Wagantall754ee272012-06-18 13:40:26 -0700660 ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
661 sc->vreg[VREG_CORE].max_vdd);
662 if (ret) {
663 dev_err(drv.dev, "regulator_set_voltage(%s) (%d)\n",
664 sc->vreg[VREG_CORE].name, ret);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700665 goto err_core_conf;
Matt Wagantall754ee272012-06-18 13:40:26 -0700666 }
667 sc->vreg[VREG_CORE].cur_vdd = vdd_core;
Matt Wagantall754ee272012-06-18 13:40:26 -0700668 ret = regulator_enable(sc->vreg[VREG_CORE].reg);
669 if (ret) {
670 dev_err(drv.dev, "regulator_enable(%s) failed (%d)\n",
671 sc->vreg[VREG_CORE].name, ret);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700672 goto err_core_conf;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800673 }
Matt Wagantall302d9a32012-07-03 13:37:29 -0700674
675 return 0;
676
677err_core_conf:
678 regulator_put(sc->vreg[VREG_CORE].reg);
679err_core_get:
680 rpm_regulator_cleanup(sc, VREG_HFPLL_B);
681err_hfpll_b:
682 rpm_regulator_cleanup(sc, VREG_HFPLL_A);
683err_hfpll_a:
684 rpm_regulator_cleanup(sc, VREG_DIG);
685err_dig:
686 rpm_regulator_cleanup(sc, VREG_MEM);
687err_mem:
688 return ret;
689}
690
691static void __cpuinit regulator_cleanup(struct scalable *sc)
692{
693 regulator_disable(sc->vreg[VREG_CORE].reg);
694 regulator_put(sc->vreg[VREG_CORE].reg);
695 rpm_regulator_cleanup(sc, VREG_HFPLL_B);
696 rpm_regulator_cleanup(sc, VREG_HFPLL_A);
697 rpm_regulator_cleanup(sc, VREG_DIG);
698 rpm_regulator_cleanup(sc, VREG_MEM);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800699}
700
701/* Set initial rate for a given core. */
Matt Wagantall302d9a32012-07-03 13:37:29 -0700702static int __cpuinit init_clock_sources(struct scalable *sc,
Matt Wagantall754ee272012-06-18 13:40:26 -0700703 const struct core_speed *tgt_s)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800704{
705 u32 regval;
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700706 void __iomem *aux_reg;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800707
708 /* Program AUX source input to the secondary MUX. */
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700709 if (sc->aux_clk_sel_phys) {
710 aux_reg = ioremap(sc->aux_clk_sel_phys, 4);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700711 if (!aux_reg)
712 return -ENOMEM;
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700713 writel_relaxed(sc->aux_clk_sel, aux_reg);
714 iounmap(aux_reg);
715 }
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800716
717 /* Switch away from the HFPLL while it's re-initialized. */
Matt Wagantalla133dbf2012-09-27 19:56:57 -0700718 set_sec_clk_src(sc, sc->sec_clk_sel);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800719 set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
720 hfpll_init(sc, tgt_s);
721
722 /* Set PRI_SRC_SEL_HFPLL_DIV2 divider to div-2. */
723 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
724 regval &= ~(0x3 << 6);
725 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
726
727 /* Switch to the target clock source. */
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800728 set_pri_clk_src(sc, tgt_s->pri_src_sel);
729 sc->cur_speed = tgt_s;
Matt Wagantall302d9a32012-07-03 13:37:29 -0700730
731 return 0;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800732}
733
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700734static void __cpuinit fill_cur_core_speed(struct core_speed *s,
735 struct scalable *sc)
736{
737 s->pri_src_sel = get_l2_indirect_reg(sc->l2cpmr_iaddr) & 0x3;
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700738 s->pll_l_val = readl_relaxed(sc->hfpll_base + drv.hfpll_data->l_offset);
739}
740
741static bool __cpuinit speed_equal(const struct core_speed *s1,
742 const struct core_speed *s2)
743{
744 return (s1->pri_src_sel == s2->pri_src_sel &&
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700745 s1->pll_l_val == s2->pll_l_val);
746}
747
748static const struct acpu_level __cpuinit *find_cur_acpu_level(int cpu)
749{
750 struct scalable *sc = &drv.scalable[cpu];
751 const struct acpu_level *l;
752 struct core_speed cur_speed;
753
754 fill_cur_core_speed(&cur_speed, sc);
755 for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
756 if (speed_equal(&l->speed, &cur_speed))
757 return l;
758 return NULL;
759}
760
761static const struct l2_level __init *find_cur_l2_level(void)
762{
763 struct scalable *sc = &drv.scalable[L2];
764 const struct l2_level *l;
765 struct core_speed cur_speed;
766
767 fill_cur_core_speed(&cur_speed, sc);
768 for (l = drv.l2_freq_tbl; l->speed.khz != 0; l++)
769 if (speed_equal(&l->speed, &cur_speed))
770 return l;
771 return NULL;
772}
773
774static const struct acpu_level __cpuinit *find_min_acpu_level(void)
775{
776 struct acpu_level *l;
777
778 for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
779 if (l->use_for_scaling)
780 return l;
781
782 return NULL;
783}
784
Matt Wagantall302d9a32012-07-03 13:37:29 -0700785static int __cpuinit per_cpu_init(int cpu)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800786{
Matt Wagantall754ee272012-06-18 13:40:26 -0700787 struct scalable *sc = &drv.scalable[cpu];
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700788 const struct acpu_level *acpu_level;
Matt Wagantall302d9a32012-07-03 13:37:29 -0700789 int ret;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800790
Matt Wagantall754ee272012-06-18 13:40:26 -0700791 sc->hfpll_base = ioremap(sc->hfpll_phys_base, SZ_32);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700792 if (!sc->hfpll_base) {
793 ret = -ENOMEM;
794 goto err_ioremap;
795 }
Matt Wagantall754ee272012-06-18 13:40:26 -0700796
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700797 acpu_level = find_cur_acpu_level(cpu);
Matt Wagantallb7c231b2012-07-24 18:40:17 -0700798 if (!acpu_level) {
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700799 acpu_level = find_min_acpu_level();
800 if (!acpu_level) {
801 ret = -ENODEV;
802 goto err_table;
803 }
804 dev_dbg(drv.dev, "CPU%d is running at an unknown rate. Defaulting to %lu KHz.\n",
805 cpu, acpu_level->speed.khz);
806 } else {
807 dev_dbg(drv.dev, "CPU%d is running at %lu KHz\n", cpu,
808 acpu_level->speed.khz);
809 }
810
811 ret = regulator_init(sc, acpu_level);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700812 if (ret)
813 goto err_regulators;
Matt Wagantall754ee272012-06-18 13:40:26 -0700814
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700815 ret = init_clock_sources(sc, &acpu_level->speed);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700816 if (ret)
817 goto err_clocks;
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700818
819 sc->l2_vote = acpu_level->l2_level;
Matt Wagantall754ee272012-06-18 13:40:26 -0700820 sc->initialized = true;
Matt Wagantall302d9a32012-07-03 13:37:29 -0700821
822 return 0;
823
824err_clocks:
825 regulator_cleanup(sc);
826err_regulators:
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700827err_table:
Matt Wagantall302d9a32012-07-03 13:37:29 -0700828 iounmap(sc->hfpll_base);
829err_ioremap:
830 return ret;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800831}
832
833/* Register with bus driver. */
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700834static void __init bus_init(const struct l2_level *l2_level)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800835{
836 int ret;
837
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700838 drv.bus_perf_client = msm_bus_scale_register_client(drv.bus_scale);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800839 if (!drv.bus_perf_client) {
840 dev_err(drv.dev, "unable to register bus client\n");
841 BUG();
842 }
843
Matt Wagantall754ee272012-06-18 13:40:26 -0700844 ret = msm_bus_scale_client_update_request(drv.bus_perf_client,
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700845 l2_level->bw_level);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800846 if (ret)
847 dev_err(drv.dev, "initial bandwidth req failed (%d)\n", ret);
848}
849
850#ifdef CONFIG_CPU_FREQ_MSM
851static struct cpufreq_frequency_table freq_table[NR_CPUS][35];
852
853static void __init cpufreq_table_init(void)
854{
855 int cpu;
856
857 for_each_possible_cpu(cpu) {
858 int i, freq_cnt = 0;
859 /* Construct the freq_table tables from acpu_freq_tbl. */
860 for (i = 0; drv.acpu_freq_tbl[i].speed.khz != 0
861 && freq_cnt < ARRAY_SIZE(*freq_table); i++) {
862 if (drv.acpu_freq_tbl[i].use_for_scaling) {
863 freq_table[cpu][freq_cnt].index = freq_cnt;
864 freq_table[cpu][freq_cnt].frequency
865 = drv.acpu_freq_tbl[i].speed.khz;
866 freq_cnt++;
867 }
868 }
869 /* freq_table not big enough to store all usable freqs. */
870 BUG_ON(drv.acpu_freq_tbl[i].speed.khz != 0);
871
872 freq_table[cpu][freq_cnt].index = freq_cnt;
873 freq_table[cpu][freq_cnt].frequency = CPUFREQ_TABLE_END;
874
875 dev_info(drv.dev, "CPU%d: %d frequencies supported\n",
876 cpu, freq_cnt);
877
878 /* Register table with CPUFreq. */
879 cpufreq_frequency_table_get_attr(freq_table[cpu], cpu);
880 }
881}
882#else
883static void __init cpufreq_table_init(void) {}
884#endif
885
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800886static int __cpuinit acpuclk_cpu_callback(struct notifier_block *nfb,
887 unsigned long action, void *hcpu)
888{
889 static int prev_khz[NR_CPUS];
890 int rc, cpu = (int)hcpu;
891 struct scalable *sc = &drv.scalable[cpu];
Matt Wagantallb7c231b2012-07-24 18:40:17 -0700892 unsigned long hot_unplug_khz = acpuclk_krait_data.power_collapse_khz;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800893
894 switch (action & ~CPU_TASKS_FROZEN) {
895 case CPU_DEAD:
896 prev_khz[cpu] = acpuclk_krait_get_rate(cpu);
897 /* Fall through. */
898 case CPU_UP_CANCELED:
Matt Wagantallb7c231b2012-07-24 18:40:17 -0700899 acpuclk_krait_set_rate(cpu, hot_unplug_khz, SETRATE_HOTPLUG);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800900 regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, 0);
901 break;
902 case CPU_UP_PREPARE:
Matt Wagantall754ee272012-06-18 13:40:26 -0700903 if (!sc->initialized) {
Matt Wagantall302d9a32012-07-03 13:37:29 -0700904 rc = per_cpu_init(cpu);
905 if (rc)
906 return NOTIFY_BAD;
Matt Wagantall754ee272012-06-18 13:40:26 -0700907 break;
908 }
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800909 if (WARN_ON(!prev_khz[cpu]))
910 return NOTIFY_BAD;
911 rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700912 sc->vreg[VREG_CORE].cur_ua);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800913 if (rc < 0)
914 return NOTIFY_BAD;
915 acpuclk_krait_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG);
916 break;
917 default:
918 break;
919 }
920
921 return NOTIFY_OK;
922}
923
924static struct notifier_block __cpuinitdata acpuclk_cpu_notifier = {
925 .notifier_call = acpuclk_cpu_callback,
926};
927
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700928static const int krait_needs_vmin(void)
929{
930 switch (read_cpuid_id()) {
931 case 0x511F04D0: /* KR28M2A20 */
932 case 0x511F04D1: /* KR28M2A21 */
933 case 0x510F06F0: /* KR28M4A10 */
934 return 1;
935 default:
936 return 0;
937 };
938}
939
940static void krait_apply_vmin(struct acpu_level *tbl)
941{
Stephen Boydc13b6792012-09-14 11:25:34 -0700942 for (; tbl->speed.khz != 0; tbl++) {
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700943 if (tbl->vdd_core < 1150000)
944 tbl->vdd_core = 1150000;
Stephen Boydc13b6792012-09-14 11:25:34 -0700945 tbl->avsdscr_setting = 0;
946 }
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700947}
948
Patrick Daly02db5a82012-08-24 14:22:06 -0700949static int __init get_speed_bin(u32 pte_efuse)
950{
951 uint32_t speed_bin;
952
953 speed_bin = pte_efuse & 0xF;
954 if (speed_bin == 0xF)
955 speed_bin = (pte_efuse >> 4) & 0xF;
956
957 if (speed_bin == 0xF) {
958 speed_bin = 0;
959 dev_warn(drv.dev, "SPEED BIN: Defaulting to %d\n", speed_bin);
960 } else {
961 dev_info(drv.dev, "SPEED BIN: %d\n", speed_bin);
962 }
963
964 return speed_bin;
965}
966
967static int __init get_pvs_bin(u32 pte_efuse)
968{
969 uint32_t pvs_bin;
970
971 pvs_bin = (pte_efuse >> 10) & 0x7;
972 if (pvs_bin == 0x7)
973 pvs_bin = (pte_efuse >> 13) & 0x7;
974
975 if (pvs_bin == 0x7) {
976 pvs_bin = 0;
977 dev_warn(drv.dev, "ACPU PVS: Defaulting to %d\n", pvs_bin);
978 } else {
979 dev_info(drv.dev, "ACPU PVS: %d\n", pvs_bin);
980 }
981
982 return pvs_bin;
983}
984
985static struct pvs_table * __init select_freq_plan(u32 pte_efuse_phys,
986 struct pvs_table (*pvs_tables)[NUM_PVS])
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800987{
Matt Wagantall519e94f2012-09-17 17:51:06 -0700988 void __iomem *pte_efuse;
Patrick Daly02db5a82012-08-24 14:22:06 -0700989 u32 pte_efuse_val, tbl_idx, bin_idx;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800990
Matt Wagantall519e94f2012-09-17 17:51:06 -0700991 pte_efuse = ioremap(pte_efuse_phys, 4);
Patrick Daly02db5a82012-08-24 14:22:06 -0700992 if (!pte_efuse) {
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800993 dev_err(drv.dev, "Unable to map QFPROM base\n");
Patrick Daly02db5a82012-08-24 14:22:06 -0700994 return NULL;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800995 }
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800996
Patrick Daly02db5a82012-08-24 14:22:06 -0700997 pte_efuse_val = readl_relaxed(pte_efuse);
998 iounmap(pte_efuse);
999
1000 /* Select frequency tables. */
1001 bin_idx = get_speed_bin(pte_efuse_val);
1002 tbl_idx = get_pvs_bin(pte_efuse_val);
1003
1004 return &pvs_tables[bin_idx][tbl_idx];
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001005}
Matt Wagantall06e4a1f2012-06-07 18:38:13 -07001006
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001007static void __init drv_data_init(struct device *dev,
1008 const struct acpuclk_krait_params *params)
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001009{
Patrick Daly02db5a82012-08-24 14:22:06 -07001010 struct pvs_table *pvs;
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001011
1012 drv.dev = dev;
1013 drv.scalable = kmemdup(params->scalable, params->scalable_size,
1014 GFP_KERNEL);
1015 BUG_ON(!drv.scalable);
1016
1017 drv.hfpll_data = kmemdup(params->hfpll_data, sizeof(*drv.hfpll_data),
1018 GFP_KERNEL);
1019 BUG_ON(!drv.hfpll_data);
1020
1021 drv.l2_freq_tbl = kmemdup(params->l2_freq_tbl, params->l2_freq_tbl_size,
1022 GFP_KERNEL);
1023 BUG_ON(!drv.l2_freq_tbl);
1024
1025 drv.bus_scale = kmemdup(params->bus_scale, sizeof(*drv.bus_scale),
1026 GFP_KERNEL);
1027 BUG_ON(!drv.bus_scale);
1028 drv.bus_scale->usecase = kmemdup(drv.bus_scale->usecase,
1029 drv.bus_scale->num_usecases * sizeof(*drv.bus_scale->usecase),
1030 GFP_KERNEL);
1031 BUG_ON(!drv.bus_scale->usecase);
1032
Patrick Daly02db5a82012-08-24 14:22:06 -07001033 pvs = select_freq_plan(params->pte_efuse_phys, params->pvs_tables);
1034 BUG_ON(!pvs->table);
1035
1036 drv.acpu_freq_tbl = kmemdup(pvs->table, pvs->size, GFP_KERNEL);
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001037 BUG_ON(!drv.acpu_freq_tbl);
Patrick Daly02db5a82012-08-24 14:22:06 -07001038 drv.boost_uv = pvs->boost_uv;
Matt Wagantallb7c231b2012-07-24 18:40:17 -07001039
1040 acpuclk_krait_data.power_collapse_khz = params->stby_khz;
1041 acpuclk_krait_data.wait_for_irq_khz = params->stby_khz;
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001042}
1043
1044static void __init hw_init(void)
1045{
1046 struct scalable *l2 = &drv.scalable[L2];
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -07001047 const struct l2_level *l2_level;
Matt Wagantall302d9a32012-07-03 13:37:29 -07001048 int cpu, rc;
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001049
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001050 if (krait_needs_vmin())
1051 krait_apply_vmin(drv.acpu_freq_tbl);
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001052
Matt Wagantall754ee272012-06-18 13:40:26 -07001053 l2->hfpll_base = ioremap(l2->hfpll_phys_base, SZ_32);
1054 BUG_ON(!l2->hfpll_base);
Matt Wagantall754ee272012-06-18 13:40:26 -07001055
Matt Wagantall302d9a32012-07-03 13:37:29 -07001056 rc = rpm_regulator_init(l2, VREG_HFPLL_A,
1057 l2->vreg[VREG_HFPLL_A].max_vdd, false);
1058 BUG_ON(rc);
1059 rc = rpm_regulator_init(l2, VREG_HFPLL_B,
1060 l2->vreg[VREG_HFPLL_B].max_vdd, false);
1061 BUG_ON(rc);
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -07001062
1063 l2_level = find_cur_l2_level();
Matt Wagantallb7c231b2012-07-24 18:40:17 -07001064 if (!l2_level) {
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -07001065 l2_level = drv.l2_freq_tbl;
Matt Wagantallb7c231b2012-07-24 18:40:17 -07001066 dev_dbg(drv.dev, "L2 is running at an unknown rate. Defaulting to %lu KHz.\n",
1067 l2_level->speed.khz);
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -07001068 } else {
1069 dev_dbg(drv.dev, "L2 is running at %lu KHz\n",
1070 l2_level->speed.khz);
1071 }
1072
1073 rc = init_clock_sources(l2, &l2_level->speed);
Matt Wagantall302d9a32012-07-03 13:37:29 -07001074 BUG_ON(rc);
1075
1076 for_each_online_cpu(cpu) {
1077 rc = per_cpu_init(cpu);
1078 BUG_ON(rc);
1079 }
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -07001080
1081 bus_init(l2_level);
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001082}
1083
1084int __init acpuclk_krait_init(struct device *dev,
1085 const struct acpuclk_krait_params *params)
1086{
1087 drv_data_init(dev, params);
1088 hw_init();
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001089
1090 cpufreq_table_init();
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001091 acpuclk_register(&acpuclk_krait_data);
1092 register_hotcpu_notifier(&acpuclk_cpu_notifier);
1093
1094 return 0;
1095}