blob: 1ac082f67e7b57490486b9f15e0168bcd4ea03fd [file] [log] [blame]
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001/*
2 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
Matt Wagantalle9b715a2012-01-04 18:16:14 -080014#include <linux/kernel.h>
Matt Wagantall9515bc22012-07-19 18:13:40 -070015#include <linux/module.h>
Matt Wagantalle9b715a2012-01-04 18:16:14 -080016#include <linux/init.h>
17#include <linux/io.h>
18#include <linux/delay.h>
19#include <linux/mutex.h>
20#include <linux/err.h>
21#include <linux/errno.h>
22#include <linux/cpufreq.h>
23#include <linux/cpu.h>
24#include <linux/regulator/consumer.h>
25
26#include <asm/mach-types.h>
27#include <asm/cpu.h>
28
29#include <mach/board.h>
30#include <mach/msm_iomap.h>
31#include <mach/socinfo.h>
32#include <mach/msm-krait-l2-accessors.h>
33#include <mach/rpm-regulator.h>
Matt Wagantall75473eb2012-05-31 15:23:22 -070034#include <mach/rpm-regulator-smd.h>
Matt Wagantalle9b715a2012-01-04 18:16:14 -080035#include <mach/msm_bus.h>
36
37#include "acpuclock.h"
38#include "acpuclock-krait.h"
Stephen Boydc13b6792012-09-14 11:25:34 -070039#include "avs.h"
Matt Wagantalle9b715a2012-01-04 18:16:14 -080040
41/* MUX source selects. */
42#define PRI_SRC_SEL_SEC_SRC 0
43#define PRI_SRC_SEL_HFPLL 1
44#define PRI_SRC_SEL_HFPLL_DIV2 2
Matt Wagantalle9b715a2012-01-04 18:16:14 -080045#define SEC_SRC_SEL_L2PLL 1
46#define SEC_SRC_SEL_AUX 2
47
Matt Wagantalle9b715a2012-01-04 18:16:14 -080048static DEFINE_MUTEX(driver_lock);
49static DEFINE_SPINLOCK(l2_lock);
50
51static struct drv_data {
Matt Wagantall06e4a1f2012-06-07 18:38:13 -070052 struct acpu_level *acpu_freq_tbl;
Matt Wagantalle9b715a2012-01-04 18:16:14 -080053 const struct l2_level *l2_freq_tbl;
54 struct scalable *scalable;
Matt Wagantall1f3762d2012-06-08 19:08:48 -070055 struct hfpll_data *hfpll_data;
Matt Wagantalle9b715a2012-01-04 18:16:14 -080056 u32 bus_perf_client;
Matt Wagantall1f3762d2012-06-08 19:08:48 -070057 struct msm_bus_scale_pdata *bus_scale;
Matt Wagantall9515bc22012-07-19 18:13:40 -070058 int boost_uv;
Matt Wagantalle9b715a2012-01-04 18:16:14 -080059 struct device *dev;
60} drv;
61
62static unsigned long acpuclk_krait_get_rate(int cpu)
63{
64 return drv.scalable[cpu].cur_speed->khz;
65}
66
67/* Select a source on the primary MUX. */
68static void set_pri_clk_src(struct scalable *sc, u32 pri_src_sel)
69{
70 u32 regval;
71
72 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
73 regval &= ~0x3;
74 regval |= (pri_src_sel & 0x3);
75 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
76 /* Wait for switch to complete. */
77 mb();
78 udelay(1);
79}
80
81/* Select a source on the secondary MUX. */
82static void set_sec_clk_src(struct scalable *sc, u32 sec_src_sel)
83{
84 u32 regval;
85
86 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
87 regval &= ~(0x3 << 2);
88 regval |= ((sec_src_sel & 0x3) << 2);
89 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
90 /* Wait for switch to complete. */
91 mb();
92 udelay(1);
93}
94
Matt Wagantall302d9a32012-07-03 13:37:29 -070095static int enable_rpm_vreg(struct vreg *vreg)
Matt Wagantalle9b715a2012-01-04 18:16:14 -080096{
Matt Wagantall302d9a32012-07-03 13:37:29 -070097 int ret = 0;
Matt Wagantalle9b715a2012-01-04 18:16:14 -080098
Matt Wagantall75473eb2012-05-31 15:23:22 -070099 if (vreg->rpm_reg) {
Matt Wagantall302d9a32012-07-03 13:37:29 -0700100 ret = rpm_regulator_enable(vreg->rpm_reg);
101 if (ret)
Matt Wagantall75473eb2012-05-31 15:23:22 -0700102 dev_err(drv.dev, "%s regulator enable failed (%d)\n",
Matt Wagantall302d9a32012-07-03 13:37:29 -0700103 vreg->name, ret);
Matt Wagantall75473eb2012-05-31 15:23:22 -0700104 }
Matt Wagantall302d9a32012-07-03 13:37:29 -0700105
106 return ret;
Matt Wagantall75473eb2012-05-31 15:23:22 -0700107}
108
109static void disable_rpm_vreg(struct vreg *vreg)
110{
111 int rc;
112
113 if (vreg->rpm_reg) {
114 rc = rpm_regulator_disable(vreg->rpm_reg);
115 if (rc)
116 dev_err(drv.dev, "%s regulator disable failed (%d)\n",
117 vreg->name, rc);
118 }
119}
120
121/* Enable an already-configured HFPLL. */
122static void hfpll_enable(struct scalable *sc, bool skip_regulators)
123{
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800124 if (!skip_regulators) {
125 /* Enable regulators required by the HFPLL. */
Matt Wagantall75473eb2012-05-31 15:23:22 -0700126 enable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]);
127 enable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800128 }
129
130 /* Disable PLL bypass mode. */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700131 writel_relaxed(0x2, sc->hfpll_base + drv.hfpll_data->mode_offset);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800132
133 /*
134 * H/W requires a 5us delay between disabling the bypass and
135 * de-asserting the reset. Delay 10us just to be safe.
136 */
137 mb();
138 udelay(10);
139
140 /* De-assert active-low PLL reset. */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700141 writel_relaxed(0x6, sc->hfpll_base + drv.hfpll_data->mode_offset);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800142
143 /* Wait for PLL to lock. */
144 mb();
145 udelay(60);
146
147 /* Enable PLL output. */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700148 writel_relaxed(0x7, sc->hfpll_base + drv.hfpll_data->mode_offset);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800149}
150
151/* Disable a HFPLL for power-savings or while it's being reprogrammed. */
152static void hfpll_disable(struct scalable *sc, bool skip_regulators)
153{
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800154 /*
155 * Disable the PLL output, disable test mode, enable the bypass mode,
156 * and assert the reset.
157 */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700158 writel_relaxed(0, sc->hfpll_base + drv.hfpll_data->mode_offset);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800159
160 if (!skip_regulators) {
161 /* Remove voltage votes required by the HFPLL. */
Matt Wagantall75473eb2012-05-31 15:23:22 -0700162 disable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]);
163 disable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800164 }
165}
166
167/* Program the HFPLL rate. Assumes HFPLL is already disabled. */
168static void hfpll_set_rate(struct scalable *sc, const struct core_speed *tgt_s)
169{
Matt Wagantalla77b7f32012-07-18 16:32:01 -0700170 void __iomem *base = sc->hfpll_base;
171 u32 regval;
172
173 writel_relaxed(tgt_s->pll_l_val, base + drv.hfpll_data->l_offset);
174
175 if (drv.hfpll_data->has_user_reg) {
176 regval = readl_relaxed(base + drv.hfpll_data->user_offset);
177 if (tgt_s->pll_l_val <= drv.hfpll_data->low_vco_l_max)
178 regval &= ~drv.hfpll_data->user_vco_mask;
179 else
180 regval |= drv.hfpll_data->user_vco_mask;
181 writel_relaxed(regval, base + drv.hfpll_data->user_offset);
182 }
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800183}
184
185/* Return the L2 speed that should be applied. */
Matt Wagantall600ea502012-06-08 18:49:53 -0700186static unsigned int compute_l2_level(struct scalable *sc, unsigned int vote_l)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800187{
Matt Wagantall600ea502012-06-08 18:49:53 -0700188 unsigned int new_l = 0;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800189 int cpu;
190
191 /* Find max L2 speed vote. */
192 sc->l2_vote = vote_l;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800193 for_each_present_cpu(cpu)
194 new_l = max(new_l, drv.scalable[cpu].l2_vote);
195
196 return new_l;
197}
198
199/* Update the bus bandwidth request. */
200static void set_bus_bw(unsigned int bw)
201{
202 int ret;
203
204 /* Update bandwidth if request has changed. This may sleep. */
205 ret = msm_bus_scale_client_update_request(drv.bus_perf_client, bw);
206 if (ret)
207 dev_err(drv.dev, "bandwidth request failed (%d)\n", ret);
208}
209
210/* Set the CPU or L2 clock speed. */
211static void set_speed(struct scalable *sc, const struct core_speed *tgt_s)
212{
213 const struct core_speed *strt_s = sc->cur_speed;
214
Stephen Boyd14a47392012-08-06 20:15:15 -0700215 if (strt_s == tgt_s)
216 return;
217
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800218 if (strt_s->src == HFPLL && tgt_s->src == HFPLL) {
219 /*
220 * Move to an always-on source running at a frequency
221 * that does not require an elevated CPU voltage.
222 */
223 set_sec_clk_src(sc, SEC_SRC_SEL_AUX);
224 set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
225
226 /* Re-program HFPLL. */
Matt Wagantall75473eb2012-05-31 15:23:22 -0700227 hfpll_disable(sc, true);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800228 hfpll_set_rate(sc, tgt_s);
Matt Wagantall75473eb2012-05-31 15:23:22 -0700229 hfpll_enable(sc, true);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800230
231 /* Move to HFPLL. */
232 set_pri_clk_src(sc, tgt_s->pri_src_sel);
233 } else if (strt_s->src == HFPLL && tgt_s->src != HFPLL) {
234 set_sec_clk_src(sc, tgt_s->sec_src_sel);
235 set_pri_clk_src(sc, tgt_s->pri_src_sel);
Matt Wagantall75473eb2012-05-31 15:23:22 -0700236 hfpll_disable(sc, false);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800237 } else if (strt_s->src != HFPLL && tgt_s->src == HFPLL) {
238 hfpll_set_rate(sc, tgt_s);
Matt Wagantall75473eb2012-05-31 15:23:22 -0700239 hfpll_enable(sc, false);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800240 set_pri_clk_src(sc, tgt_s->pri_src_sel);
241 } else {
242 set_sec_clk_src(sc, tgt_s->sec_src_sel);
243 }
244
245 sc->cur_speed = tgt_s;
246}
247
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700248struct vdd_data {
249 int vdd_mem;
250 int vdd_dig;
251 int vdd_core;
252 int ua_core;
253};
254
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800255/* Apply any per-cpu voltage increases. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700256static int increase_vdd(int cpu, struct vdd_data *data,
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800257 enum setrate_reason reason)
258{
259 struct scalable *sc = &drv.scalable[cpu];
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700260 int rc;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800261
262 /*
263 * Increase vdd_mem active-set before vdd_dig.
264 * vdd_mem should be >= vdd_dig.
265 */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700266 if (data->vdd_mem > sc->vreg[VREG_MEM].cur_vdd) {
Matt Wagantall75473eb2012-05-31 15:23:22 -0700267 rc = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700268 data->vdd_mem, sc->vreg[VREG_MEM].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800269 if (rc) {
270 dev_err(drv.dev,
271 "vdd_mem (cpu%d) increase failed (%d)\n",
272 cpu, rc);
273 return rc;
274 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700275 sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800276 }
277
278 /* Increase vdd_dig active-set vote. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700279 if (data->vdd_dig > sc->vreg[VREG_DIG].cur_vdd) {
Matt Wagantall75473eb2012-05-31 15:23:22 -0700280 rc = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700281 data->vdd_dig, sc->vreg[VREG_DIG].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800282 if (rc) {
283 dev_err(drv.dev,
284 "vdd_dig (cpu%d) increase failed (%d)\n",
285 cpu, rc);
286 return rc;
287 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700288 sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig;
289 }
290
291 /* Increase current request. */
292 if (data->ua_core > sc->vreg[VREG_CORE].cur_ua) {
293 rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
294 data->ua_core);
295 if (rc < 0) {
296 dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
297 sc->vreg[VREG_CORE].name, rc);
298 return rc;
299 }
300 sc->vreg[VREG_CORE].cur_ua = data->ua_core;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800301 }
302
303 /*
304 * Update per-CPU core voltage. Don't do this for the hotplug path for
305 * which it should already be correct. Attempting to set it is bad
306 * because we don't know what CPU we are running on at this point, but
307 * the CPU regulator API requires we call it from the affected CPU.
308 */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700309 if (data->vdd_core > sc->vreg[VREG_CORE].cur_vdd
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800310 && reason != SETRATE_HOTPLUG) {
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700311 rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
312 data->vdd_core, sc->vreg[VREG_CORE].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800313 if (rc) {
314 dev_err(drv.dev,
315 "vdd_core (cpu%d) increase failed (%d)\n",
316 cpu, rc);
317 return rc;
318 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700319 sc->vreg[VREG_CORE].cur_vdd = data->vdd_core;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800320 }
321
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700322 return 0;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800323}
324
325/* Apply any per-cpu voltage decreases. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700326static void decrease_vdd(int cpu, struct vdd_data *data,
327 enum setrate_reason reason)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800328{
329 struct scalable *sc = &drv.scalable[cpu];
330 int ret;
331
332 /*
333 * Update per-CPU core voltage. This must be called on the CPU
334 * that's being affected. Don't do this in the hotplug remove path,
335 * where the rail is off and we're executing on the other CPU.
336 */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700337 if (data->vdd_core < sc->vreg[VREG_CORE].cur_vdd
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800338 && reason != SETRATE_HOTPLUG) {
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700339 ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
340 data->vdd_core, sc->vreg[VREG_CORE].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800341 if (ret) {
342 dev_err(drv.dev,
343 "vdd_core (cpu%d) decrease failed (%d)\n",
344 cpu, ret);
345 return;
346 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700347 sc->vreg[VREG_CORE].cur_vdd = data->vdd_core;
348 }
349
350 /* Decrease current request. */
351 if (data->ua_core < sc->vreg[VREG_CORE].cur_ua) {
352 ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
353 data->ua_core);
354 if (ret < 0) {
355 dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
356 sc->vreg[VREG_CORE].name, ret);
357 return;
358 }
359 sc->vreg[VREG_CORE].cur_ua = data->ua_core;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800360 }
361
362 /* Decrease vdd_dig active-set vote. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700363 if (data->vdd_dig < sc->vreg[VREG_DIG].cur_vdd) {
Matt Wagantall75473eb2012-05-31 15:23:22 -0700364 ret = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700365 data->vdd_dig, sc->vreg[VREG_DIG].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800366 if (ret) {
367 dev_err(drv.dev,
368 "vdd_dig (cpu%d) decrease failed (%d)\n",
369 cpu, ret);
370 return;
371 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700372 sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800373 }
374
375 /*
376 * Decrease vdd_mem active-set after vdd_dig.
377 * vdd_mem should be >= vdd_dig.
378 */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700379 if (data->vdd_mem < sc->vreg[VREG_MEM].cur_vdd) {
Matt Wagantall75473eb2012-05-31 15:23:22 -0700380 ret = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700381 data->vdd_mem, sc->vreg[VREG_MEM].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800382 if (ret) {
383 dev_err(drv.dev,
384 "vdd_mem (cpu%d) decrease failed (%d)\n",
385 cpu, ret);
386 return;
387 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700388 sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800389 }
390}
391
392static int calculate_vdd_mem(const struct acpu_level *tgt)
393{
Matt Wagantall600ea502012-06-08 18:49:53 -0700394 return drv.l2_freq_tbl[tgt->l2_level].vdd_mem;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800395}
396
Matt Wagantall72a38002012-07-18 13:42:55 -0700397static int get_src_dig(const struct core_speed *s)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800398{
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700399 const int *hfpll_vdd = drv.hfpll_data->vdd;
400 const u32 low_vdd_l_max = drv.hfpll_data->low_vdd_l_max;
Matt Wagantall87465f52012-07-23 22:03:06 -0700401 const u32 nom_vdd_l_max = drv.hfpll_data->nom_vdd_l_max;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800402
Matt Wagantall72a38002012-07-18 13:42:55 -0700403 if (s->src != HFPLL)
404 return hfpll_vdd[HFPLL_VDD_NONE];
Matt Wagantall87465f52012-07-23 22:03:06 -0700405 else if (s->pll_l_val > nom_vdd_l_max)
406 return hfpll_vdd[HFPLL_VDD_HIGH];
Matt Wagantall72a38002012-07-18 13:42:55 -0700407 else if (s->pll_l_val > low_vdd_l_max)
408 return hfpll_vdd[HFPLL_VDD_NOM];
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800409 else
Matt Wagantall72a38002012-07-18 13:42:55 -0700410 return hfpll_vdd[HFPLL_VDD_LOW];
411}
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800412
Matt Wagantall72a38002012-07-18 13:42:55 -0700413static int calculate_vdd_dig(const struct acpu_level *tgt)
414{
415 int l2_pll_vdd_dig, cpu_pll_vdd_dig;
416
417 l2_pll_vdd_dig = get_src_dig(&drv.l2_freq_tbl[tgt->l2_level].speed);
418 cpu_pll_vdd_dig = get_src_dig(&tgt->speed);
419
420 return max(drv.l2_freq_tbl[tgt->l2_level].vdd_dig,
421 max(l2_pll_vdd_dig, cpu_pll_vdd_dig));
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800422}
423
Matt Wagantall9515bc22012-07-19 18:13:40 -0700424static bool enable_boost = true;
425module_param_named(boost, enable_boost, bool, S_IRUGO | S_IWUSR);
426
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800427static int calculate_vdd_core(const struct acpu_level *tgt)
428{
Matt Wagantall9515bc22012-07-19 18:13:40 -0700429 return tgt->vdd_core + (enable_boost ? drv.boost_uv : 0);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800430}
431
432/* Set the CPU's clock rate and adjust the L2 rate, voltage and BW requests. */
433static int acpuclk_krait_set_rate(int cpu, unsigned long rate,
434 enum setrate_reason reason)
435{
436 const struct core_speed *strt_acpu_s, *tgt_acpu_s;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800437 const struct acpu_level *tgt;
Matt Wagantall600ea502012-06-08 18:49:53 -0700438 int tgt_l2_l;
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700439 struct vdd_data vdd_data;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800440 unsigned long flags;
441 int rc = 0;
442
Matt Wagantall5941a332012-07-10 23:20:44 -0700443 if (cpu > num_possible_cpus())
444 return -EINVAL;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800445
446 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
447 mutex_lock(&driver_lock);
448
449 strt_acpu_s = drv.scalable[cpu].cur_speed;
450
451 /* Return early if rate didn't change. */
452 if (rate == strt_acpu_s->khz)
453 goto out;
454
455 /* Find target frequency. */
456 for (tgt = drv.acpu_freq_tbl; tgt->speed.khz != 0; tgt++) {
457 if (tgt->speed.khz == rate) {
458 tgt_acpu_s = &tgt->speed;
459 break;
460 }
461 }
462 if (tgt->speed.khz == 0) {
463 rc = -EINVAL;
464 goto out;
465 }
466
467 /* Calculate voltage requirements for the current CPU. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700468 vdd_data.vdd_mem = calculate_vdd_mem(tgt);
469 vdd_data.vdd_dig = calculate_vdd_dig(tgt);
470 vdd_data.vdd_core = calculate_vdd_core(tgt);
471 vdd_data.ua_core = tgt->ua_core;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800472
Stephen Boydc13b6792012-09-14 11:25:34 -0700473 /* Disable AVS before voltage switch */
474 if (reason == SETRATE_CPUFREQ && drv.scalable[cpu].avs_enabled) {
475 AVS_DISABLE(cpu);
476 drv.scalable[cpu].avs_enabled = false;
477 }
478
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800479 /* Increase VDD levels if needed. */
480 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) {
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700481 rc = increase_vdd(cpu, &vdd_data, reason);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800482 if (rc)
483 goto out;
484 }
485
Matt Wagantallbd1b4042012-07-24 11:20:03 -0700486 dev_dbg(drv.dev, "Switching from ACPU%d rate %lu KHz -> %lu KHz\n",
487 cpu, strt_acpu_s->khz, tgt_acpu_s->khz);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800488
489 /* Set the new CPU speed. */
490 set_speed(&drv.scalable[cpu], tgt_acpu_s);
491
492 /*
493 * Update the L2 vote and apply the rate change. A spinlock is
494 * necessary to ensure L2 rate is calculated and set atomically
495 * with the CPU frequency, even if acpuclk_krait_set_rate() is
496 * called from an atomic context and the driver_lock mutex is not
497 * acquired.
498 */
499 spin_lock_irqsave(&l2_lock, flags);
500 tgt_l2_l = compute_l2_level(&drv.scalable[cpu], tgt->l2_level);
Matt Wagantall600ea502012-06-08 18:49:53 -0700501 set_speed(&drv.scalable[L2], &drv.l2_freq_tbl[tgt_l2_l].speed);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800502 spin_unlock_irqrestore(&l2_lock, flags);
503
504 /* Nothing else to do for power collapse or SWFI. */
505 if (reason == SETRATE_PC || reason == SETRATE_SWFI)
506 goto out;
507
508 /* Update bus bandwith request. */
Matt Wagantall600ea502012-06-08 18:49:53 -0700509 set_bus_bw(drv.l2_freq_tbl[tgt_l2_l].bw_level);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800510
511 /* Drop VDD levels if we can. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700512 decrease_vdd(cpu, &vdd_data, reason);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800513
Stephen Boydc13b6792012-09-14 11:25:34 -0700514 /* Re-enable AVS */
515 if (reason == SETRATE_CPUFREQ && tgt->avsdscr_setting) {
516 AVS_ENABLE(cpu, tgt->avsdscr_setting);
517 drv.scalable[cpu].avs_enabled = true;
518 }
519
Matt Wagantallbd1b4042012-07-24 11:20:03 -0700520 dev_dbg(drv.dev, "ACPU%d speed change complete\n", cpu);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800521
522out:
523 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
524 mutex_unlock(&driver_lock);
525 return rc;
526}
527
Matt Wagantallb7c231b2012-07-24 18:40:17 -0700528static struct acpuclk_data acpuclk_krait_data = {
529 .set_rate = acpuclk_krait_set_rate,
530 .get_rate = acpuclk_krait_get_rate,
531};
532
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800533/* Initialize a HFPLL at a given rate and enable it. */
Iliyan Malchev16aea522012-10-16 00:35:07 -0700534static void __cpuinit hfpll_init(struct scalable *sc,
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800535 const struct core_speed *tgt_s)
536{
Matt Wagantallbd1b4042012-07-24 11:20:03 -0700537 dev_dbg(drv.dev, "Initializing HFPLL%d\n", sc - drv.scalable);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800538
539 /* Disable the PLL for re-programming. */
Matt Wagantall75473eb2012-05-31 15:23:22 -0700540 hfpll_disable(sc, true);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800541
542 /* Configure PLL parameters for integer mode. */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700543 writel_relaxed(drv.hfpll_data->config_val,
544 sc->hfpll_base + drv.hfpll_data->config_offset);
545 writel_relaxed(0, sc->hfpll_base + drv.hfpll_data->m_offset);
546 writel_relaxed(1, sc->hfpll_base + drv.hfpll_data->n_offset);
Matt Wagantalla77b7f32012-07-18 16:32:01 -0700547 if (drv.hfpll_data->has_user_reg)
548 writel_relaxed(drv.hfpll_data->user_val,
549 sc->hfpll_base + drv.hfpll_data->user_offset);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800550
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700551 /* Program droop controller, if supported */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700552 if (drv.hfpll_data->has_droop_ctl)
553 writel_relaxed(drv.hfpll_data->droop_val,
554 sc->hfpll_base + drv.hfpll_data->droop_offset);
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700555
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800556 /* Set an initial rate and enable the PLL. */
557 hfpll_set_rate(sc, tgt_s);
Matt Wagantall75473eb2012-05-31 15:23:22 -0700558 hfpll_enable(sc, false);
559}
560
Matt Wagantall302d9a32012-07-03 13:37:29 -0700561static int __cpuinit rpm_regulator_init(struct scalable *sc, enum vregs vreg,
Matt Wagantall754ee272012-06-18 13:40:26 -0700562 int vdd, bool enable)
Matt Wagantall75473eb2012-05-31 15:23:22 -0700563{
564 int ret;
565
566 if (!sc->vreg[vreg].name)
Matt Wagantall302d9a32012-07-03 13:37:29 -0700567 return 0;
Matt Wagantall75473eb2012-05-31 15:23:22 -0700568
569 sc->vreg[vreg].rpm_reg = rpm_regulator_get(drv.dev,
570 sc->vreg[vreg].name);
571 if (IS_ERR(sc->vreg[vreg].rpm_reg)) {
Matt Wagantall302d9a32012-07-03 13:37:29 -0700572 ret = PTR_ERR(sc->vreg[vreg].rpm_reg);
573 dev_err(drv.dev, "rpm_regulator_get(%s) failed (%d)\n",
574 sc->vreg[vreg].name, ret);
575 goto err_get;
Matt Wagantall75473eb2012-05-31 15:23:22 -0700576 }
577
578 ret = rpm_regulator_set_voltage(sc->vreg[vreg].rpm_reg, vdd,
579 sc->vreg[vreg].max_vdd);
580 if (ret) {
581 dev_err(drv.dev, "%s initialization failed (%d)\n",
582 sc->vreg[vreg].name, ret);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700583 goto err_conf;
Matt Wagantall75473eb2012-05-31 15:23:22 -0700584 }
585 sc->vreg[vreg].cur_vdd = vdd;
586
Matt Wagantall302d9a32012-07-03 13:37:29 -0700587 if (enable) {
588 ret = enable_rpm_vreg(&sc->vreg[vreg]);
589 if (ret)
590 goto err_conf;
591 }
592
593 return 0;
594
595err_conf:
596 rpm_regulator_put(sc->vreg[vreg].rpm_reg);
597err_get:
598 return ret;
599}
600
601static void __cpuinit rpm_regulator_cleanup(struct scalable *sc,
602 enum vregs vreg)
603{
604 if (!sc->vreg[vreg].rpm_reg)
605 return;
606
607 disable_rpm_vreg(&sc->vreg[vreg]);
608 rpm_regulator_put(sc->vreg[vreg].rpm_reg);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800609}
610
611/* Voltage regulator initialization. */
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700612static int __cpuinit regulator_init(struct scalable *sc,
613 const struct acpu_level *acpu_level)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800614{
Matt Wagantall754ee272012-06-18 13:40:26 -0700615 int ret, vdd_mem, vdd_dig, vdd_core;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800616
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700617 vdd_mem = calculate_vdd_mem(acpu_level);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700618 ret = rpm_regulator_init(sc, VREG_MEM, vdd_mem, true);
619 if (ret)
620 goto err_mem;
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700621
622 vdd_dig = calculate_vdd_dig(acpu_level);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700623 ret = rpm_regulator_init(sc, VREG_DIG, vdd_dig, true);
624 if (ret)
625 goto err_dig;
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700626
Matt Wagantall302d9a32012-07-03 13:37:29 -0700627 ret = rpm_regulator_init(sc, VREG_HFPLL_A,
Matt Wagantall754ee272012-06-18 13:40:26 -0700628 sc->vreg[VREG_HFPLL_A].max_vdd, false);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700629 if (ret)
630 goto err_hfpll_a;
631 ret = rpm_regulator_init(sc, VREG_HFPLL_B,
Matt Wagantall754ee272012-06-18 13:40:26 -0700632 sc->vreg[VREG_HFPLL_B].max_vdd, false);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700633 if (ret)
634 goto err_hfpll_b;
Matt Wagantall75473eb2012-05-31 15:23:22 -0700635
Matt Wagantall754ee272012-06-18 13:40:26 -0700636 /* Setup Krait CPU regulators and initial core voltage. */
637 sc->vreg[VREG_CORE].reg = regulator_get(drv.dev,
638 sc->vreg[VREG_CORE].name);
639 if (IS_ERR(sc->vreg[VREG_CORE].reg)) {
Matt Wagantall302d9a32012-07-03 13:37:29 -0700640 ret = PTR_ERR(sc->vreg[VREG_CORE].reg);
641 dev_err(drv.dev, "regulator_get(%s) failed (%d)\n",
642 sc->vreg[VREG_CORE].name, ret);
643 goto err_core_get;
Matt Wagantall754ee272012-06-18 13:40:26 -0700644 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700645 ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
646 acpu_level->ua_core);
647 if (ret < 0) {
648 dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
649 sc->vreg[VREG_CORE].name, ret);
650 goto err_core_conf;
651 }
652 sc->vreg[VREG_CORE].cur_ua = acpu_level->ua_core;
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700653 vdd_core = calculate_vdd_core(acpu_level);
Matt Wagantall754ee272012-06-18 13:40:26 -0700654 ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
655 sc->vreg[VREG_CORE].max_vdd);
656 if (ret) {
657 dev_err(drv.dev, "regulator_set_voltage(%s) (%d)\n",
658 sc->vreg[VREG_CORE].name, ret);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700659 goto err_core_conf;
Matt Wagantall754ee272012-06-18 13:40:26 -0700660 }
661 sc->vreg[VREG_CORE].cur_vdd = vdd_core;
Matt Wagantall754ee272012-06-18 13:40:26 -0700662 ret = regulator_enable(sc->vreg[VREG_CORE].reg);
663 if (ret) {
664 dev_err(drv.dev, "regulator_enable(%s) failed (%d)\n",
665 sc->vreg[VREG_CORE].name, ret);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700666 goto err_core_conf;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800667 }
Matt Wagantall302d9a32012-07-03 13:37:29 -0700668
669 return 0;
670
671err_core_conf:
672 regulator_put(sc->vreg[VREG_CORE].reg);
673err_core_get:
674 rpm_regulator_cleanup(sc, VREG_HFPLL_B);
675err_hfpll_b:
676 rpm_regulator_cleanup(sc, VREG_HFPLL_A);
677err_hfpll_a:
678 rpm_regulator_cleanup(sc, VREG_DIG);
679err_dig:
680 rpm_regulator_cleanup(sc, VREG_MEM);
681err_mem:
682 return ret;
683}
684
685static void __cpuinit regulator_cleanup(struct scalable *sc)
686{
687 regulator_disable(sc->vreg[VREG_CORE].reg);
688 regulator_put(sc->vreg[VREG_CORE].reg);
689 rpm_regulator_cleanup(sc, VREG_HFPLL_B);
690 rpm_regulator_cleanup(sc, VREG_HFPLL_A);
691 rpm_regulator_cleanup(sc, VREG_DIG);
692 rpm_regulator_cleanup(sc, VREG_MEM);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800693}
694
695/* Set initial rate for a given core. */
Matt Wagantall302d9a32012-07-03 13:37:29 -0700696static int __cpuinit init_clock_sources(struct scalable *sc,
Matt Wagantall754ee272012-06-18 13:40:26 -0700697 const struct core_speed *tgt_s)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800698{
699 u32 regval;
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700700 void __iomem *aux_reg;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800701
702 /* Program AUX source input to the secondary MUX. */
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700703 if (sc->aux_clk_sel_phys) {
704 aux_reg = ioremap(sc->aux_clk_sel_phys, 4);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700705 if (!aux_reg)
706 return -ENOMEM;
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700707 writel_relaxed(sc->aux_clk_sel, aux_reg);
708 iounmap(aux_reg);
709 }
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800710
711 /* Switch away from the HFPLL while it's re-initialized. */
712 set_sec_clk_src(sc, SEC_SRC_SEL_AUX);
713 set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
714 hfpll_init(sc, tgt_s);
715
716 /* Set PRI_SRC_SEL_HFPLL_DIV2 divider to div-2. */
717 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
718 regval &= ~(0x3 << 6);
719 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
720
721 /* Switch to the target clock source. */
722 set_sec_clk_src(sc, tgt_s->sec_src_sel);
723 set_pri_clk_src(sc, tgt_s->pri_src_sel);
724 sc->cur_speed = tgt_s;
Matt Wagantall302d9a32012-07-03 13:37:29 -0700725
726 return 0;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800727}
728
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700729static void __cpuinit fill_cur_core_speed(struct core_speed *s,
730 struct scalable *sc)
731{
732 s->pri_src_sel = get_l2_indirect_reg(sc->l2cpmr_iaddr) & 0x3;
733 s->sec_src_sel = (get_l2_indirect_reg(sc->l2cpmr_iaddr) >> 2) & 0x3;
734 s->pll_l_val = readl_relaxed(sc->hfpll_base + drv.hfpll_data->l_offset);
735}
736
737static bool __cpuinit speed_equal(const struct core_speed *s1,
738 const struct core_speed *s2)
739{
740 return (s1->pri_src_sel == s2->pri_src_sel &&
741 s1->sec_src_sel == s2->sec_src_sel &&
742 s1->pll_l_val == s2->pll_l_val);
743}
744
745static const struct acpu_level __cpuinit *find_cur_acpu_level(int cpu)
746{
747 struct scalable *sc = &drv.scalable[cpu];
748 const struct acpu_level *l;
749 struct core_speed cur_speed;
750
751 fill_cur_core_speed(&cur_speed, sc);
752 for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
753 if (speed_equal(&l->speed, &cur_speed))
754 return l;
755 return NULL;
756}
757
758static const struct l2_level __init *find_cur_l2_level(void)
759{
760 struct scalable *sc = &drv.scalable[L2];
761 const struct l2_level *l;
762 struct core_speed cur_speed;
763
764 fill_cur_core_speed(&cur_speed, sc);
765 for (l = drv.l2_freq_tbl; l->speed.khz != 0; l++)
766 if (speed_equal(&l->speed, &cur_speed))
767 return l;
768 return NULL;
769}
770
771static const struct acpu_level __cpuinit *find_min_acpu_level(void)
772{
773 struct acpu_level *l;
774
775 for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
776 if (l->use_for_scaling)
777 return l;
778
779 return NULL;
780}
781
Matt Wagantall302d9a32012-07-03 13:37:29 -0700782static int __cpuinit per_cpu_init(int cpu)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800783{
Matt Wagantall754ee272012-06-18 13:40:26 -0700784 struct scalable *sc = &drv.scalable[cpu];
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700785 const struct acpu_level *acpu_level;
Matt Wagantall302d9a32012-07-03 13:37:29 -0700786 int ret;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800787
Matt Wagantall754ee272012-06-18 13:40:26 -0700788 sc->hfpll_base = ioremap(sc->hfpll_phys_base, SZ_32);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700789 if (!sc->hfpll_base) {
790 ret = -ENOMEM;
791 goto err_ioremap;
792 }
Matt Wagantall754ee272012-06-18 13:40:26 -0700793
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700794 acpu_level = find_cur_acpu_level(cpu);
Matt Wagantallb7c231b2012-07-24 18:40:17 -0700795 if (!acpu_level) {
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700796 acpu_level = find_min_acpu_level();
797 if (!acpu_level) {
798 ret = -ENODEV;
799 goto err_table;
800 }
801 dev_dbg(drv.dev, "CPU%d is running at an unknown rate. Defaulting to %lu KHz.\n",
802 cpu, acpu_level->speed.khz);
803 } else {
804 dev_dbg(drv.dev, "CPU%d is running at %lu KHz\n", cpu,
805 acpu_level->speed.khz);
806 }
807
808 ret = regulator_init(sc, acpu_level);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700809 if (ret)
810 goto err_regulators;
Matt Wagantall754ee272012-06-18 13:40:26 -0700811
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700812 ret = init_clock_sources(sc, &acpu_level->speed);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700813 if (ret)
814 goto err_clocks;
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700815
816 sc->l2_vote = acpu_level->l2_level;
Matt Wagantall754ee272012-06-18 13:40:26 -0700817 sc->initialized = true;
Matt Wagantall302d9a32012-07-03 13:37:29 -0700818
819 return 0;
820
821err_clocks:
822 regulator_cleanup(sc);
823err_regulators:
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700824err_table:
Matt Wagantall302d9a32012-07-03 13:37:29 -0700825 iounmap(sc->hfpll_base);
826err_ioremap:
827 return ret;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800828}
829
830/* Register with bus driver. */
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700831static void __init bus_init(const struct l2_level *l2_level)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800832{
833 int ret;
834
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700835 drv.bus_perf_client = msm_bus_scale_register_client(drv.bus_scale);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800836 if (!drv.bus_perf_client) {
837 dev_err(drv.dev, "unable to register bus client\n");
838 BUG();
839 }
840
Matt Wagantall754ee272012-06-18 13:40:26 -0700841 ret = msm_bus_scale_client_update_request(drv.bus_perf_client,
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700842 l2_level->bw_level);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800843 if (ret)
844 dev_err(drv.dev, "initial bandwidth req failed (%d)\n", ret);
845}
846
847#ifdef CONFIG_CPU_FREQ_MSM
848static struct cpufreq_frequency_table freq_table[NR_CPUS][35];
849
850static void __init cpufreq_table_init(void)
851{
852 int cpu;
853
854 for_each_possible_cpu(cpu) {
855 int i, freq_cnt = 0;
856 /* Construct the freq_table tables from acpu_freq_tbl. */
857 for (i = 0; drv.acpu_freq_tbl[i].speed.khz != 0
858 && freq_cnt < ARRAY_SIZE(*freq_table); i++) {
859 if (drv.acpu_freq_tbl[i].use_for_scaling) {
860 freq_table[cpu][freq_cnt].index = freq_cnt;
861 freq_table[cpu][freq_cnt].frequency
862 = drv.acpu_freq_tbl[i].speed.khz;
863 freq_cnt++;
864 }
865 }
866 /* freq_table not big enough to store all usable freqs. */
867 BUG_ON(drv.acpu_freq_tbl[i].speed.khz != 0);
868
869 freq_table[cpu][freq_cnt].index = freq_cnt;
870 freq_table[cpu][freq_cnt].frequency = CPUFREQ_TABLE_END;
871
872 dev_info(drv.dev, "CPU%d: %d frequencies supported\n",
873 cpu, freq_cnt);
874
875 /* Register table with CPUFreq. */
876 cpufreq_frequency_table_get_attr(freq_table[cpu], cpu);
877 }
878}
879#else
880static void __init cpufreq_table_init(void) {}
881#endif
882
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800883static int __cpuinit acpuclk_cpu_callback(struct notifier_block *nfb,
884 unsigned long action, void *hcpu)
885{
886 static int prev_khz[NR_CPUS];
887 int rc, cpu = (int)hcpu;
888 struct scalable *sc = &drv.scalable[cpu];
Matt Wagantallb7c231b2012-07-24 18:40:17 -0700889 unsigned long hot_unplug_khz = acpuclk_krait_data.power_collapse_khz;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800890
891 switch (action & ~CPU_TASKS_FROZEN) {
892 case CPU_DEAD:
893 prev_khz[cpu] = acpuclk_krait_get_rate(cpu);
894 /* Fall through. */
895 case CPU_UP_CANCELED:
Matt Wagantallb7c231b2012-07-24 18:40:17 -0700896 acpuclk_krait_set_rate(cpu, hot_unplug_khz, SETRATE_HOTPLUG);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800897 regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, 0);
898 break;
899 case CPU_UP_PREPARE:
Matt Wagantall754ee272012-06-18 13:40:26 -0700900 if (!sc->initialized) {
Matt Wagantall302d9a32012-07-03 13:37:29 -0700901 rc = per_cpu_init(cpu);
902 if (rc)
903 return NOTIFY_BAD;
Matt Wagantall754ee272012-06-18 13:40:26 -0700904 break;
905 }
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800906 if (WARN_ON(!prev_khz[cpu]))
907 return NOTIFY_BAD;
908 rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700909 sc->vreg[VREG_CORE].cur_ua);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800910 if (rc < 0)
911 return NOTIFY_BAD;
912 acpuclk_krait_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG);
913 break;
914 default:
915 break;
916 }
917
918 return NOTIFY_OK;
919}
920
921static struct notifier_block __cpuinitdata acpuclk_cpu_notifier = {
922 .notifier_call = acpuclk_cpu_callback,
923};
924
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700925static const int krait_needs_vmin(void)
926{
927 switch (read_cpuid_id()) {
928 case 0x511F04D0: /* KR28M2A20 */
929 case 0x511F04D1: /* KR28M2A21 */
930 case 0x510F06F0: /* KR28M4A10 */
931 return 1;
932 default:
933 return 0;
934 };
935}
936
937static void krait_apply_vmin(struct acpu_level *tbl)
938{
Stephen Boydc13b6792012-09-14 11:25:34 -0700939 for (; tbl->speed.khz != 0; tbl++) {
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700940 if (tbl->vdd_core < 1150000)
941 tbl->vdd_core = 1150000;
Stephen Boydc13b6792012-09-14 11:25:34 -0700942 tbl->avsdscr_setting = 0;
943 }
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700944}
945
Matt Wagantall519e94f2012-09-17 17:51:06 -0700946static int __init select_freq_plan(u32 pte_efuse_phys)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800947{
Matt Wagantall519e94f2012-09-17 17:51:06 -0700948 void __iomem *pte_efuse;
949 u32 pte_efuse_val, pvs, tbl_idx;
Matt Wagantallf5cc3892012-06-07 19:47:02 -0700950 char *pvs_names[] = { "Slow", "Nominal", "Fast", "Faster", "Unknown" };
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800951
Matt Wagantall519e94f2012-09-17 17:51:06 -0700952 pte_efuse = ioremap(pte_efuse_phys, 4);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800953 /* Select frequency tables. */
Matt Wagantall519e94f2012-09-17 17:51:06 -0700954 if (pte_efuse) {
955 pte_efuse_val = readl_relaxed(pte_efuse);
956 pvs = (pte_efuse_val >> 10) & 0x7;
957 iounmap(pte_efuse);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800958 if (pvs == 0x7)
Matt Wagantall519e94f2012-09-17 17:51:06 -0700959 pvs = (pte_efuse_val >> 13) & 0x7;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800960
961 switch (pvs) {
962 case 0x0:
963 case 0x7:
964 tbl_idx = PVS_SLOW;
965 break;
966 case 0x1:
967 tbl_idx = PVS_NOMINAL;
968 break;
969 case 0x3:
970 tbl_idx = PVS_FAST;
971 break;
Matt Wagantallf5cc3892012-06-07 19:47:02 -0700972 case 0x4:
973 tbl_idx = PVS_FASTER;
974 break;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800975 default:
976 tbl_idx = PVS_UNKNOWN;
977 break;
978 }
979 } else {
980 tbl_idx = PVS_UNKNOWN;
981 dev_err(drv.dev, "Unable to map QFPROM base\n");
982 }
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700983 if (tbl_idx == PVS_UNKNOWN) {
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800984 tbl_idx = PVS_SLOW;
985 dev_warn(drv.dev, "ACPU PVS: Defaulting to %s\n",
986 pvs_names[tbl_idx]);
Matt Wagantallf5cc3892012-06-07 19:47:02 -0700987 } else {
988 dev_info(drv.dev, "ACPU PVS: %s\n", pvs_names[tbl_idx]);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800989 }
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800990
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700991 return tbl_idx;
992}
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700993
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700994static void __init drv_data_init(struct device *dev,
995 const struct acpuclk_krait_params *params)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800996{
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700997 int tbl_idx;
998
999 drv.dev = dev;
1000 drv.scalable = kmemdup(params->scalable, params->scalable_size,
1001 GFP_KERNEL);
1002 BUG_ON(!drv.scalable);
1003
1004 drv.hfpll_data = kmemdup(params->hfpll_data, sizeof(*drv.hfpll_data),
1005 GFP_KERNEL);
1006 BUG_ON(!drv.hfpll_data);
1007
1008 drv.l2_freq_tbl = kmemdup(params->l2_freq_tbl, params->l2_freq_tbl_size,
1009 GFP_KERNEL);
1010 BUG_ON(!drv.l2_freq_tbl);
1011
1012 drv.bus_scale = kmemdup(params->bus_scale, sizeof(*drv.bus_scale),
1013 GFP_KERNEL);
1014 BUG_ON(!drv.bus_scale);
1015 drv.bus_scale->usecase = kmemdup(drv.bus_scale->usecase,
1016 drv.bus_scale->num_usecases * sizeof(*drv.bus_scale->usecase),
1017 GFP_KERNEL);
1018 BUG_ON(!drv.bus_scale->usecase);
1019
Matt Wagantall519e94f2012-09-17 17:51:06 -07001020 tbl_idx = select_freq_plan(params->pte_efuse_phys);
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001021 drv.acpu_freq_tbl = kmemdup(params->pvs_tables[tbl_idx].table,
1022 params->pvs_tables[tbl_idx].size,
1023 GFP_KERNEL);
1024 BUG_ON(!drv.acpu_freq_tbl);
Matt Wagantall9515bc22012-07-19 18:13:40 -07001025 drv.boost_uv = params->pvs_tables[tbl_idx].boost_uv;
Matt Wagantallb7c231b2012-07-24 18:40:17 -07001026
1027 acpuclk_krait_data.power_collapse_khz = params->stby_khz;
1028 acpuclk_krait_data.wait_for_irq_khz = params->stby_khz;
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001029}
1030
1031static void __init hw_init(void)
1032{
1033 struct scalable *l2 = &drv.scalable[L2];
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -07001034 const struct l2_level *l2_level;
Matt Wagantall302d9a32012-07-03 13:37:29 -07001035 int cpu, rc;
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001036
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001037 if (krait_needs_vmin())
1038 krait_apply_vmin(drv.acpu_freq_tbl);
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001039
Matt Wagantall754ee272012-06-18 13:40:26 -07001040 l2->hfpll_base = ioremap(l2->hfpll_phys_base, SZ_32);
1041 BUG_ON(!l2->hfpll_base);
Matt Wagantall754ee272012-06-18 13:40:26 -07001042
Matt Wagantall302d9a32012-07-03 13:37:29 -07001043 rc = rpm_regulator_init(l2, VREG_HFPLL_A,
1044 l2->vreg[VREG_HFPLL_A].max_vdd, false);
1045 BUG_ON(rc);
1046 rc = rpm_regulator_init(l2, VREG_HFPLL_B,
1047 l2->vreg[VREG_HFPLL_B].max_vdd, false);
1048 BUG_ON(rc);
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -07001049
1050 l2_level = find_cur_l2_level();
Matt Wagantallb7c231b2012-07-24 18:40:17 -07001051 if (!l2_level) {
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -07001052 l2_level = drv.l2_freq_tbl;
Matt Wagantallb7c231b2012-07-24 18:40:17 -07001053 dev_dbg(drv.dev, "L2 is running at an unknown rate. Defaulting to %lu KHz.\n",
1054 l2_level->speed.khz);
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -07001055 } else {
1056 dev_dbg(drv.dev, "L2 is running at %lu KHz\n",
1057 l2_level->speed.khz);
1058 }
1059
1060 rc = init_clock_sources(l2, &l2_level->speed);
Matt Wagantall302d9a32012-07-03 13:37:29 -07001061 BUG_ON(rc);
1062
1063 for_each_online_cpu(cpu) {
1064 rc = per_cpu_init(cpu);
1065 BUG_ON(rc);
1066 }
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -07001067
1068 bus_init(l2_level);
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001069}
1070
1071int __init acpuclk_krait_init(struct device *dev,
1072 const struct acpuclk_krait_params *params)
1073{
1074 drv_data_init(dev, params);
1075 hw_init();
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001076
1077 cpufreq_table_init();
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001078 acpuclk_register(&acpuclk_krait_data);
1079 register_hotcpu_notifier(&acpuclk_cpu_notifier);
1080
1081 return 0;
1082}