blob: a5a6ff1caffc5b7271ef367db90c2b5fde300bef [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) "%s: " fmt, __func__
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/io.h>
18#include <linux/delay.h>
19#include <linux/mutex.h>
20#include <linux/err.h>
21#include <linux/errno.h>
22#include <linux/cpufreq.h>
23#include <linux/cpu.h>
24#include <linux/regulator/consumer.h>
25
26#include <asm/mach-types.h>
27#include <asm/cpu.h>
28
29#include <mach/board.h>
30#include <mach/msm_iomap.h>
31#include <mach/rpm-regulator.h>
32#include <mach/msm_bus.h>
33#include <mach/msm_bus_board.h>
34#include <mach/socinfo.h>
Stephen Boyd469ed3e2011-09-29 16:41:19 -070035#include <mach/msm-krait-l2-accessors.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036
37#include "acpuclock.h"
38
39/*
40 * Source IDs.
41 * These must be negative to not overlap with the source IDs
42 * used by the 8x60 local clock driver.
43 */
44#define PLL_8 0
45#define HFPLL -1
46#define QSB -2
47
48/* Mux source selects. */
49#define PRI_SRC_SEL_SEC_SRC 0
50#define PRI_SRC_SEL_HFPLL 1
51#define PRI_SRC_SEL_HFPLL_DIV2 2
52#define SEC_SRC_SEL_QSB 0
53
54/* HFPLL registers offsets. */
55#define HFPLL_MODE 0x00
56#define HFPLL_CONFIG_CTL 0x04
57#define HFPLL_L_VAL 0x08
58#define HFPLL_M_VAL 0x0C
59#define HFPLL_N_VAL 0x10
60#define HFPLL_DROOP_CTL 0x14
61
62/* CP15 L2 indirect addresses. */
63#define L2CPMR_IADDR 0x500
64#define L2CPUCPMR_IADDR 0x501
65
66#define STBY_KHZ 1
67
68#define HFPLL_NOMINAL_VDD 1050000
69#define HFPLL_LOW_VDD 1050000
70#define HFPLL_LOW_VDD_PLL_L_MAX 0x28
71
72#define SECCLKAGD BIT(4)
73
74enum scalables {
75 CPU0 = 0,
76 CPU1,
Vikram Mulukutlaa00149c2011-07-21 18:43:26 -070077 CPU2,
78 CPU3,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070079 L2,
80 NUM_SCALABLES
81};
82
83enum vregs {
84 VREG_CORE,
85 VREG_MEM,
86 VREG_DIG,
87 NUM_VREG
88};
89
90struct vreg {
91 const char name[15];
92 const unsigned int max_vdd;
93 const int rpm_vreg_voter;
94 const int rpm_vreg_id;
95 struct regulator *reg;
96 unsigned int cur_vdd;
97};
98
99struct core_speed {
100 unsigned int khz;
101 int src;
102 unsigned int pri_src_sel;
103 unsigned int sec_src_sel;
104 unsigned int pll_l_val;
105};
106
107struct l2_level {
108 struct core_speed speed;
109 unsigned int vdd_dig;
110 unsigned int vdd_mem;
111 unsigned int bw_level;
112};
113
114struct acpu_level {
115 unsigned int use_for_scaling;
116 struct core_speed speed;
117 struct l2_level *l2_level;
118 unsigned int vdd_core;
119};
120
121struct scalable {
122 void * __iomem const hfpll_base;
123 void * __iomem const aux_clk_sel;
124 const uint32_t l2cpmr_iaddr;
125 struct core_speed *current_speed;
126 struct l2_level *l2_vote;
127 struct vreg vreg[NUM_VREG];
128 bool first_set_call;
129};
130
Vikram Mulukutlaa00149c2011-07-21 18:43:26 -0700131static struct scalable scalable_8960[] = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700132 [CPU0] = {
133 .hfpll_base = MSM_HFPLL_BASE + 0x200,
134 .aux_clk_sel = MSM_ACC0_BASE + 0x014,
135 .l2cpmr_iaddr = L2CPUCPMR_IADDR,
136 .vreg[VREG_CORE] = { "krait0", 1150000 },
137 .vreg[VREG_MEM] = { "krait0_mem", 1150000,
138 RPM_VREG_VOTER1,
139 RPM_VREG_ID_PM8921_L24 },
140 .vreg[VREG_DIG] = { "krait0_dig", 1150000,
141 RPM_VREG_VOTER1,
142 RPM_VREG_ID_PM8921_S3 },
143 },
144 [CPU1] = {
145 .hfpll_base = MSM_HFPLL_BASE + 0x300,
146 .aux_clk_sel = MSM_ACC1_BASE + 0x014,
147 .l2cpmr_iaddr = L2CPUCPMR_IADDR,
148 .vreg[VREG_CORE] = { "krait1", 1150000 },
149 .vreg[VREG_MEM] = { "krait0_mem", 1150000,
150 RPM_VREG_VOTER2,
151 RPM_VREG_ID_PM8921_L24 },
152 .vreg[VREG_DIG] = { "krait0_dig", 1150000,
153 RPM_VREG_VOTER2,
154 RPM_VREG_ID_PM8921_S3 },
155 },
156 [L2] = {
157 .hfpll_base = MSM_HFPLL_BASE + 0x400,
158 .aux_clk_sel = MSM_APCS_GCC_BASE + 0x028,
159 .l2cpmr_iaddr = L2CPMR_IADDR,
160 },
161};
162
Stephen Boyd7ad84752011-08-05 14:04:28 -0700163static DEFINE_MUTEX(driver_lock);
164static DEFINE_SPINLOCK(l2_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700165
Vikram Mulukutlaa00149c2011-07-21 18:43:26 -0700166static struct scalable scalable_8064[] = {
167 [CPU0] = {
168 .hfpll_base = MSM_HFPLL_BASE + 0x200,
169 .aux_clk_sel = MSM_ACC0_BASE + 0x014,
170 .l2cpmr_iaddr = L2CPUCPMR_IADDR,
171 .vreg[VREG_CORE] = { "krait0", 1150000 },
172 .vreg[VREG_MEM] = { "krait0_mem", 1150000,
173 RPM_VREG_VOTER1,
174 RPM_VREG_ID_PM8921_L24 },
175 .vreg[VREG_DIG] = { "krait0_dig", 1150000,
176 RPM_VREG_VOTER1,
177 RPM_VREG_ID_PM8921_S3 },
178 },
179 [CPU1] = {
180 .hfpll_base = MSM_HFPLL_BASE + 0x240,
181 .aux_clk_sel = MSM_ACC1_BASE + 0x014,
182 .l2cpmr_iaddr = L2CPUCPMR_IADDR,
183 .vreg[VREG_CORE] = { "krait1", 1150000 },
184 .vreg[VREG_MEM] = { "krait0_mem", 1150000,
185 RPM_VREG_VOTER2,
186 RPM_VREG_ID_PM8921_L24 },
187 .vreg[VREG_DIG] = { "krait0_dig", 1150000,
188 RPM_VREG_VOTER2,
189 RPM_VREG_ID_PM8921_S3 },
190 },
191 [CPU2] = {
192 .hfpll_base = MSM_HFPLL_BASE + 0x280,
193 .aux_clk_sel = MSM_ACC2_BASE + 0x014,
194 .l2cpmr_iaddr = L2CPUCPMR_IADDR,
195 .vreg[VREG_CORE] = { "krait2", 1150000 },
196 .vreg[VREG_MEM] = { "krait0_mem", 1150000,
197 RPM_VREG_VOTER4,
198 RPM_VREG_ID_PM8921_L24 },
199 .vreg[VREG_DIG] = { "krait0_dig", 1150000,
200 RPM_VREG_VOTER4,
201 RPM_VREG_ID_PM8921_S3 },
202 },
203 [CPU3] = {
204 .hfpll_base = MSM_HFPLL_BASE + 0x2C0,
205 .aux_clk_sel = MSM_ACC3_BASE + 0x014,
206 .l2cpmr_iaddr = L2CPUCPMR_IADDR,
207 .vreg[VREG_CORE] = { "krait3", 1150000 },
208 .vreg[VREG_MEM] = { "krait0_mem", 1150000,
209 RPM_VREG_VOTER5,
210 RPM_VREG_ID_PM8921_L24 },
211 .vreg[VREG_DIG] = { "krait0_dig", 1150000,
212 RPM_VREG_VOTER5,
213 RPM_VREG_ID_PM8921_S3 },
214 },
215 [L2] = {
216 .hfpll_base = MSM_HFPLL_BASE + 0x300,
217 .aux_clk_sel = MSM_APCS_GCC_BASE + 0x028,
218 .l2cpmr_iaddr = L2CPMR_IADDR,
219 },
220};
221
222static struct scalable *scalable;
223static struct l2_level *l2_freq_tbl;
224static struct acpu_level *acpu_freq_tbl;
225static int l2_freq_tbl_size;
226static int cpu_boot_idx;
227static int l2_boot_idx;
228
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700229/* Instantaneous bandwidth requests in MB/s. */
230#define BW_MBPS(_bw) \
231 { \
232 .vectors = (struct msm_bus_vectors[]){ \
233 {\
234 .src = MSM_BUS_MASTER_AMPSS_M0, \
235 .dst = MSM_BUS_SLAVE_EBI_CH0, \
236 .ib = (_bw) * 1000000UL, \
237 .ab = (_bw) * 100000UL, \
238 }, \
239 { \
240 .src = MSM_BUS_MASTER_AMPSS_M1, \
241 .dst = MSM_BUS_SLAVE_EBI_CH0, \
242 .ib = (_bw) * 1000000UL, \
243 .ab = (_bw) * 100000UL, \
244 }, \
245 }, \
246 .num_paths = 2, \
247 }
248static struct msm_bus_paths bw_level_tbl[] = {
249 [0] = BW_MBPS(616), /* At least 77 MHz on bus. */
250 [1] = BW_MBPS(1024), /* At least 128 MHz on bus. */
251 [2] = BW_MBPS(1536), /* At least 192 MHz on bus. */
252 [3] = BW_MBPS(2048), /* At least 256 MHz on bus. */
253 [4] = BW_MBPS(3080), /* At least 385 MHz on bus. */
254 [5] = BW_MBPS(3968), /* At least 496 MHz on bus. */
255};
256
257static struct msm_bus_scale_pdata bus_client_pdata = {
258 .usecase = bw_level_tbl,
259 .num_usecases = ARRAY_SIZE(bw_level_tbl),
260 .active_only = 1,
261 .name = "acpuclock",
262};
263
264static uint32_t bus_perf_client;
265
266/* TODO: Update vdd_dig and vdd_mem when voltage data is available. */
Vikram Mulukutlaa00149c2011-07-21 18:43:26 -0700267#define L2(x) (&l2_freq_tbl_8960[(x)])
268static struct l2_level l2_freq_tbl_8960[] = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269 [0] = { {STBY_KHZ, QSB, 0, 0, 0x00 }, 1050000, 1050000, 0 },
Matt Wagantalle64d56a2011-07-14 19:35:27 -0700270 [1] = { { 384000, PLL_8, 0, 2, 0x00 }, 1050000, 1050000, 1 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271 [2] = { { 432000, HFPLL, 2, 0, 0x20 }, 1050000, 1050000, 1 },
272 [3] = { { 486000, HFPLL, 2, 0, 0x24 }, 1050000, 1050000, 1 },
273 [4] = { { 540000, HFPLL, 2, 0, 0x28 }, 1050000, 1050000, 1 },
274 [5] = { { 594000, HFPLL, 1, 0, 0x16 }, 1050000, 1050000, 2 },
275 [6] = { { 648000, HFPLL, 1, 0, 0x18 }, 1050000, 1050000, 2 },
276 [7] = { { 702000, HFPLL, 1, 0, 0x1A }, 1050000, 1050000, 2 },
Matt Wagantalle64d56a2011-07-14 19:35:27 -0700277 [8] = { { 756000, HFPLL, 1, 0, 0x1C }, 1150000, 1150000, 2 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278 [9] = { { 810000, HFPLL, 1, 0, 0x1E }, 1150000, 1150000, 3 },
279 [10] = { { 864000, HFPLL, 1, 0, 0x20 }, 1150000, 1150000, 3 },
280 [11] = { { 918000, HFPLL, 1, 0, 0x22 }, 1150000, 1150000, 3 },
281 [12] = { { 972000, HFPLL, 1, 0, 0x24 }, 1150000, 1150000, 3 },
Matt Wagantalle64d56a2011-07-14 19:35:27 -0700282 [13] = { { 1026000, HFPLL, 1, 0, 0x26 }, 1150000, 1150000, 4 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283 [14] = { { 1080000, HFPLL, 1, 0, 0x28 }, 1150000, 1150000, 4 },
284 [15] = { { 1134000, HFPLL, 1, 0, 0x2A }, 1150000, 1150000, 4 },
285 [16] = { { 1188000, HFPLL, 1, 0, 0x2C }, 1150000, 1150000, 4 },
Matt Wagantalle64d56a2011-07-14 19:35:27 -0700286 [17] = { { 1242000, HFPLL, 1, 0, 0x2E }, 1150000, 1150000, 5 },
287 [18] = { { 1296000, HFPLL, 1, 0, 0x30 }, 1150000, 1150000, 5 },
288 [19] = { { 1350000, HFPLL, 1, 0, 0x32 }, 1150000, 1150000, 5 },
289 [20] = { { 1404000, HFPLL, 1, 0, 0x34 }, 1150000, 1150000, 5 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700290 [21] = { { 1458000, HFPLL, 1, 0, 0x36 }, 1150000, 1150000, 5 },
291 [22] = { { 1512000, HFPLL, 1, 0, 0x38 }, 1150000, 1150000, 5 },
292 [23] = { { 1566000, HFPLL, 1, 0, 0x3A }, 1150000, 1150000, 5 },
293 [24] = { { 1620000, HFPLL, 1, 0, 0x3C }, 1150000, 1150000, 5 },
294 [25] = { { 1674000, HFPLL, 1, 0, 0x3E }, 1150000, 1150000, 5 },
295};
296
297/* TODO: Update core voltages when data is available. */
Vikram Mulukutlaa00149c2011-07-21 18:43:26 -0700298static struct acpu_level acpu_freq_tbl_8960[] = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299 { 0, {STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 1050000 },
300 { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 1050000 },
Matt Wagantalle64d56a2011-07-14 19:35:27 -0700301 { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 1050000 },
302 { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 1050000 },
303 { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 1050000 },
304 { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 1050000 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700305 { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 1050000 },
Matt Wagantalle64d56a2011-07-14 19:35:27 -0700306 { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 1050000 },
307 { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(13), 1150000 },
308 { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(13), 1150000 },
309 { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(13), 1150000 },
310 { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(13), 1150000 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311 { 0, { 0 } }
312};
313
Vikram Mulukutlaa00149c2011-07-21 18:43:26 -0700314/* TODO: Update vdd_dig and vdd_mem when voltage data is available. */
315#undef L2
316#define L2(x) (&l2_freq_tbl_8064[(x)])
317static struct l2_level l2_freq_tbl_8064[] = {
318 [0] = { {STBY_KHZ, QSB, 0, 0, 0x00 }, 1050000, 1050000, 0 },
319 [1] = { { 384000, PLL_8, 0, 2, 0x00 }, 1050000, 1050000, 0 },
320 [2] = { { 432000, HFPLL, 2, 0, 0x20 }, 1050000, 1050000, 1 },
321 [3] = { { 486000, HFPLL, 2, 0, 0x24 }, 1050000, 1050000, 1 },
322 [4] = { { 540000, HFPLL, 2, 0, 0x28 }, 1050000, 1050000, 1 },
323 [5] = { { 594000, HFPLL, 1, 0, 0x16 }, 1050000, 1050000, 2 },
324 [6] = { { 648000, HFPLL, 1, 0, 0x18 }, 1050000, 1050000, 2 },
325 [7] = { { 702000, HFPLL, 1, 0, 0x1A }, 1050000, 1050000, 2 },
326 [8] = { { 756000, HFPLL, 1, 0, 0x1C }, 1150000, 1150000, 3 },
327 [9] = { { 810000, HFPLL, 1, 0, 0x1E }, 1150000, 1150000, 3 },
328 [10] = { { 864000, HFPLL, 1, 0, 0x20 }, 1150000, 1150000, 3 },
329 [11] = { { 918000, HFPLL, 1, 0, 0x22 }, 1150000, 1150000, 3 },
330 [12] = { { 972000, HFPLL, 1, 0, 0x24 }, 1150000, 1150000, 3 },
331 [13] = { { 1026000, HFPLL, 1, 0, 0x26 }, 1150000, 1150000, 3 },
332 [14] = { { 1080000, HFPLL, 1, 0, 0x28 }, 1150000, 1150000, 4 },
333 [15] = { { 1134000, HFPLL, 1, 0, 0x2A }, 1150000, 1150000, 4 },
334 [16] = { { 1188000, HFPLL, 1, 0, 0x2C }, 1150000, 1150000, 4 },
335 [17] = { { 1242000, HFPLL, 1, 0, 0x2E }, 1150000, 1150000, 4 },
336 [18] = { { 1296000, HFPLL, 1, 0, 0x30 }, 1150000, 1150000, 4 },
337 [19] = { { 1350000, HFPLL, 1, 0, 0x32 }, 1150000, 1150000, 4 },
338 [20] = { { 1404000, HFPLL, 1, 0, 0x34 }, 1150000, 1150000, 4 },
339 [21] = { { 1458000, HFPLL, 1, 0, 0x36 }, 1150000, 1150000, 5 },
340 [22] = { { 1512000, HFPLL, 1, 0, 0x38 }, 1150000, 1150000, 5 },
341 [23] = { { 1566000, HFPLL, 1, 0, 0x3A }, 1150000, 1150000, 5 },
342 [24] = { { 1620000, HFPLL, 1, 0, 0x3C }, 1150000, 1150000, 5 },
343 [25] = { { 1674000, HFPLL, 1, 0, 0x3E }, 1150000, 1150000, 5 },
344};
345
346/* TODO: Update core voltages when data is available. */
347static struct acpu_level acpu_freq_tbl_8064[] = {
348 { 0, {STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 1050000 },
349 { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 1050000 },
350 { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(2), 1050000 },
351 { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(3), 1050000 },
352 { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(4), 1050000 },
353 { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(5), 1050000 },
354 { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 1050000 },
355 { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(7), 1050000 },
356 { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(8), 1150000 },
357 { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(9), 1150000 },
358 { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(10), 1150000 },
359 { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(11), 1150000 },
360 { 0, { 0 } }
361};
362
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700363static unsigned long acpuclk_8960_get_rate(int cpu)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700364{
365 return scalable[cpu].current_speed->khz;
366}
367
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700368/* Get the selected source on primary MUX. */
369static int get_pri_clk_src(struct scalable *sc)
370{
371 uint32_t regval;
372
Stephen Boyd469ed3e2011-09-29 16:41:19 -0700373 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374 return regval & 0x3;
375}
376
377/* Set the selected source on primary MUX. */
378static void set_pri_clk_src(struct scalable *sc, uint32_t pri_src_sel)
379{
380 uint32_t regval;
381
Stephen Boyd469ed3e2011-09-29 16:41:19 -0700382 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383 regval &= ~0x3;
384 regval |= (pri_src_sel & 0x3);
Stephen Boyd469ed3e2011-09-29 16:41:19 -0700385 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700386 /* Wait for switch to complete. */
387 mb();
388 udelay(1);
389}
390
391/* Get the selected source on secondary MUX. */
392static int get_sec_clk_src(struct scalable *sc)
393{
394 uint32_t regval;
395
Stephen Boyd469ed3e2011-09-29 16:41:19 -0700396 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700397 return (regval >> 2) & 0x3;
398}
399
400/* Set the selected source on secondary MUX. */
401static void set_sec_clk_src(struct scalable *sc, uint32_t sec_src_sel)
402{
403 uint32_t regval;
404
405 /* Disable secondary source clock gating during switch. */
Stephen Boyd469ed3e2011-09-29 16:41:19 -0700406 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407 regval |= SECCLKAGD;
Stephen Boyd469ed3e2011-09-29 16:41:19 -0700408 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700409
410 /* Program the MUX. */
411 regval &= ~(0x3 << 2);
412 regval |= ((sec_src_sel & 0x3) << 2);
Stephen Boyd469ed3e2011-09-29 16:41:19 -0700413 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700414
415 /* Wait for switch to complete. */
416 mb();
417 udelay(1);
418
419 /* Re-enable secondary source clock gating. */
420 regval &= ~SECCLKAGD;
Stephen Boyd469ed3e2011-09-29 16:41:19 -0700421 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700422}
423
424/* Enable an already-configured HFPLL. */
425static void hfpll_enable(struct scalable *sc)
426{
427 /* Disable PLL bypass mode. */
428 writel_relaxed(0x2, sc->hfpll_base + HFPLL_MODE);
429
430 /*
431 * H/W requires a 5us delay between disabling the bypass and
432 * de-asserting the reset. Delay 10us just to be safe.
433 */
434 mb();
435 udelay(10);
436
437 /* De-assert active-low PLL reset. */
438 writel_relaxed(0x6, sc->hfpll_base + HFPLL_MODE);
439
440 /* Wait for PLL to lock. */
441 mb();
442 udelay(60);
443
444 /* Enable PLL output. */
445 writel_relaxed(0x7, sc->hfpll_base + HFPLL_MODE);
446}
447
448/* Disable a HFPLL for power-savings or while its being reprogrammed. */
449static void hfpll_disable(struct scalable *sc)
450{
451 /*
452 * Disable the PLL output, disable test mode, enable
453 * the bypass mode, and assert the reset.
454 */
455 writel_relaxed(0, sc->hfpll_base + HFPLL_MODE);
456}
457
458/* Program the HFPLL rate. Assumes HFPLL is already disabled. */
459static void hfpll_set_rate(struct scalable *sc, struct core_speed *tgt_s)
460{
461 writel_relaxed(tgt_s->pll_l_val, sc->hfpll_base + HFPLL_L_VAL);
462}
463
464/* Return the L2 speed that should be applied. */
465static struct l2_level *compute_l2_level(struct scalable *sc,
466 struct l2_level *vote_l)
467{
468 struct l2_level *new_l;
469 int cpu;
470
471 /* Bounds check. */
Vikram Mulukutlaa00149c2011-07-21 18:43:26 -0700472 BUG_ON(vote_l >= (l2_freq_tbl + l2_freq_tbl_size));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473
474 /* Find max L2 speed vote. */
475 sc->l2_vote = vote_l;
476 new_l = l2_freq_tbl;
477 for_each_present_cpu(cpu)
478 new_l = max(new_l, scalable[cpu].l2_vote);
479
480 return new_l;
481}
482
483/* Update the bus bandwidth request. */
484static void set_bus_bw(unsigned int bw)
485{
486 int ret;
487
488 /* Bounds check. */
489 if (bw >= ARRAY_SIZE(bw_level_tbl)) {
490 pr_err("invalid bandwidth request (%d)\n", bw);
491 return;
492 }
493
494 /* Update bandwidth if request has changed. This may sleep. */
495 ret = msm_bus_scale_client_update_request(bus_perf_client, bw);
496 if (ret)
497 pr_err("bandwidth request failed (%d)\n", ret);
498}
499
500/* Set the CPU or L2 clock speed. */
501static void set_speed(struct scalable *sc, struct core_speed *tgt_s,
502 enum setrate_reason reason)
503{
504 struct core_speed *strt_s = sc->current_speed;
505
506 if (tgt_s == strt_s)
507 return;
508
509 if (strt_s->src == HFPLL && tgt_s->src == HFPLL) {
510 /* Move CPU to QSB source. */
511 /*
512 * TODO: If using QSB here requires elevating voltages,
513 * consider using PLL8 instead.
514 */
515 set_sec_clk_src(sc, SEC_SRC_SEL_QSB);
516 set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
517
518 /* Program CPU HFPLL. */
519 hfpll_disable(sc);
520 hfpll_set_rate(sc, tgt_s);
521 hfpll_enable(sc);
522
523 /* Move CPU to HFPLL source. */
524 set_pri_clk_src(sc, tgt_s->pri_src_sel);
525 } else if (strt_s->src == HFPLL && tgt_s->src != HFPLL) {
526 /* TODO: Enable source. */
527 /*
528 * If responding to CPU_DEAD we must be running on another
529 * CPU. Therefore, we can't access the downed CPU's CP15
530 * clock MUX registers from here and can't change clock sources.
531 * Just turn off the PLL- since the CPU is down already, halting
532 * its clock should be safe.
533 */
534 if (reason != SETRATE_HOTPLUG || sc == &scalable[L2]) {
535 set_sec_clk_src(sc, tgt_s->sec_src_sel);
536 set_pri_clk_src(sc, tgt_s->pri_src_sel);
537 }
538 hfpll_disable(sc);
539 } else if (strt_s->src != HFPLL && tgt_s->src == HFPLL) {
540 hfpll_set_rate(sc, tgt_s);
541 hfpll_enable(sc);
542 /*
543 * If responding to CPU_UP_PREPARE, we can't change CP15
544 * registers for the CPU that's coming up since we're not
545 * running on that CPU. That's okay though, since the MUX
546 * source was not changed on the way down, either.
547 */
548 if (reason != SETRATE_HOTPLUG || sc == &scalable[L2])
549 set_pri_clk_src(sc, tgt_s->pri_src_sel);
550 /* TODO: Disable source. */
551 } else {
552 /* TODO: Enable source. */
553 if (reason != SETRATE_HOTPLUG || sc == &scalable[L2])
554 set_sec_clk_src(sc, tgt_s->sec_src_sel);
555 /* TODO: Disable source. */
556 }
557
558 sc->current_speed = tgt_s;
559}
560
561/* Apply any per-cpu voltage increases. */
562static int increase_vdd(int cpu, unsigned int vdd_core, unsigned int vdd_mem,
563 unsigned int vdd_dig, enum setrate_reason reason)
564{
565 struct scalable *sc = &scalable[cpu];
566 int rc;
567
568 /*
Matt Wagantallabd55f02011-09-12 11:45:54 -0700569 * Increase vdd_mem active-set before vdd_dig.
570 * vdd_mem should be >= vdd_dig.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571 */
572 if (vdd_mem > sc->vreg[VREG_MEM].cur_vdd) {
573 rc = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
574 sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
575 sc->vreg[VREG_MEM].max_vdd, 0);
576 if (rc) {
577 pr_err("%s: vdd_mem (cpu%d) increase failed (%d)\n",
578 __func__, cpu, rc);
579 return rc;
580 }
581 sc->vreg[VREG_MEM].cur_vdd = vdd_mem;
582 }
583
584 /* Increase vdd_dig active-set vote. */
585 if (vdd_dig > sc->vreg[VREG_DIG].cur_vdd) {
586 rc = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
587 sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
588 sc->vreg[VREG_DIG].max_vdd, 0);
589 if (rc) {
590 pr_err("%s: vdd_dig (cpu%d) increase failed (%d)\n",
591 __func__, cpu, rc);
592 return rc;
593 }
594 sc->vreg[VREG_DIG].cur_vdd = vdd_dig;
595 }
596
597 /*
598 * Update per-CPU core voltage. Don't do this for the hotplug path for
599 * which it should already be correct. Attempting to set it is bad
600 * because we don't know what CPU we are running on at this point, but
601 * the CPU regulator API requires we call it from the affected CPU.
602 */
603 if (vdd_core > sc->vreg[VREG_CORE].cur_vdd
604 && reason != SETRATE_HOTPLUG) {
605 rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
606 sc->vreg[VREG_CORE].max_vdd);
607 if (rc) {
608 pr_err("%s: vdd_core (cpu%d) increase failed (%d)\n",
609 __func__, cpu, rc);
610 return rc;
611 }
612 sc->vreg[VREG_CORE].cur_vdd = vdd_core;
613 }
614
615 return rc;
616}
617
618/* Apply any per-cpu voltage decreases. */
619static void decrease_vdd(int cpu, unsigned int vdd_core, unsigned int vdd_mem,
620 unsigned int vdd_dig, enum setrate_reason reason)
621{
622 struct scalable *sc = &scalable[cpu];
623 int ret;
624
625 /*
626 * Update per-CPU core voltage. This must be called on the CPU
627 * that's being affected. Don't do this in the hotplug remove path,
628 * where the rail is off and we're executing on the other CPU.
629 */
630 if (vdd_core < sc->vreg[VREG_CORE].cur_vdd
631 && reason != SETRATE_HOTPLUG) {
632 ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
633 sc->vreg[VREG_CORE].max_vdd);
634 if (ret) {
635 pr_err("%s: vdd_core (cpu%d) decrease failed (%d)\n",
636 __func__, cpu, ret);
637 return;
638 }
639 sc->vreg[VREG_CORE].cur_vdd = vdd_core;
640 }
641
642 /* Decrease vdd_dig active-set vote. */
643 if (vdd_dig < sc->vreg[VREG_DIG].cur_vdd) {
644 ret = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
645 sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
646 sc->vreg[VREG_DIG].max_vdd, 0);
647 if (ret) {
648 pr_err("%s: vdd_dig (cpu%d) decrease failed (%d)\n",
649 __func__, cpu, ret);
650 return;
651 }
652 sc->vreg[VREG_DIG].cur_vdd = vdd_dig;
653 }
654
655 /*
Matt Wagantallabd55f02011-09-12 11:45:54 -0700656 * Decrease vdd_mem active-set after vdd_dig.
657 * vdd_mem should be >= vdd_dig.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700658 */
659 if (vdd_mem < sc->vreg[VREG_MEM].cur_vdd) {
660 ret = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
661 sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
662 sc->vreg[VREG_MEM].max_vdd, 0);
663 if (ret) {
664 pr_err("%s: vdd_mem (cpu%d) decrease failed (%d)\n",
665 __func__, cpu, ret);
666 return;
667 }
668 sc->vreg[VREG_MEM].cur_vdd = vdd_mem;
669 }
670}
671
672static unsigned int calculate_vdd_mem(struct acpu_level *tgt)
673{
Matt Wagantallabd55f02011-09-12 11:45:54 -0700674 return tgt->l2_level->vdd_mem;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700675}
676
677static unsigned int calculate_vdd_dig(struct acpu_level *tgt)
678{
679 unsigned int pll_vdd_dig;
680
681 if (tgt->l2_level->speed.pll_l_val > HFPLL_LOW_VDD_PLL_L_MAX)
682 pll_vdd_dig = HFPLL_NOMINAL_VDD;
683 else
684 pll_vdd_dig = HFPLL_LOW_VDD;
685
686 return max(tgt->l2_level->vdd_dig, pll_vdd_dig);
687}
688
689static unsigned int calculate_vdd_core(struct acpu_level *tgt)
690{
691 unsigned int pll_vdd_core;
692
693 if (tgt->speed.pll_l_val > HFPLL_LOW_VDD_PLL_L_MAX)
694 pll_vdd_core = HFPLL_NOMINAL_VDD;
695 else
696 pll_vdd_core = HFPLL_LOW_VDD;
697
698 return max(tgt->vdd_core, pll_vdd_core);
699}
700
701/* Set the CPU's clock rate and adjust the L2 rate, if appropriate. */
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700702static int acpuclk_8960_set_rate(int cpu, unsigned long rate,
703 enum setrate_reason reason)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700704{
705 struct core_speed *strt_acpu_s, *tgt_acpu_s;
706 struct l2_level *tgt_l2_l;
707 struct acpu_level *tgt;
708 unsigned int vdd_mem, vdd_dig, vdd_core;
709 unsigned long flags;
710 int rc = 0;
711
712 if (cpu > num_possible_cpus()) {
713 rc = -EINVAL;
714 goto out;
715 }
716
717 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
718 mutex_lock(&driver_lock);
719
720 strt_acpu_s = scalable[cpu].current_speed;
721
722 /* Return early if rate didn't change. */
723 if (rate == strt_acpu_s->khz && scalable[cpu].first_set_call == false)
724 goto out;
725
726 /* Find target frequency. */
727 for (tgt = acpu_freq_tbl; tgt->speed.khz != 0; tgt++) {
728 if (tgt->speed.khz == rate) {
729 tgt_acpu_s = &tgt->speed;
730 break;
731 }
732 }
733 if (tgt->speed.khz == 0) {
734 rc = -EINVAL;
735 goto out;
736 }
737
738 /* Calculate voltage requirements for the current CPU. */
739 vdd_mem = calculate_vdd_mem(tgt);
740 vdd_dig = calculate_vdd_dig(tgt);
741 vdd_core = calculate_vdd_core(tgt);
742
743 /* Increase VDD levels if needed. */
744 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) {
745 rc = increase_vdd(cpu, vdd_core, vdd_mem, vdd_dig, reason);
746 if (rc)
747 goto out;
748 }
749
750 pr_debug("Switching from ACPU%d rate %u KHz -> %u KHz\n",
751 cpu, strt_acpu_s->khz, tgt_acpu_s->khz);
752
753 /* Set the CPU speed. */
754 set_speed(&scalable[cpu], tgt_acpu_s, reason);
755
756 /*
757 * Update the L2 vote and apply the rate change. A spinlock is
758 * necessary to ensure L2 rate is calulated and set atomically,
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700759 * even if acpuclk_8960_set_rate() is called from an atomic context
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700760 * and the driver_lock mutex is not acquired.
761 */
762 spin_lock_irqsave(&l2_lock, flags);
763 tgt_l2_l = compute_l2_level(&scalable[cpu], tgt->l2_level);
764 set_speed(&scalable[L2], &tgt_l2_l->speed, reason);
765 spin_unlock_irqrestore(&l2_lock, flags);
766
767 /* Nothing else to do for power collapse or SWFI. */
768 if (reason == SETRATE_PC || reason == SETRATE_SWFI)
769 goto out;
770
771 /* Update bus bandwith request. */
772 set_bus_bw(tgt_l2_l->bw_level);
773
774 /* Drop VDD levels if we can. */
775 decrease_vdd(cpu, vdd_core, vdd_mem, vdd_dig, reason);
776
777 scalable[cpu].first_set_call = false;
778 pr_debug("ACPU%d speed change complete\n", cpu);
779
780out:
781 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
782 mutex_unlock(&driver_lock);
783 return rc;
784}
785
786/* Initialize a HFPLL at a given rate and enable it. */
787static void __init hfpll_init(struct scalable *sc, struct core_speed *tgt_s)
788{
789 pr_debug("Initializing HFPLL%d\n", sc - scalable);
790
791 /* Disable the PLL for re-programming. */
792 hfpll_disable(sc);
793
794 /* Configure PLL parameters for integer mode. */
795 writel_relaxed(0x7845C665, sc->hfpll_base + HFPLL_CONFIG_CTL);
796 writel_relaxed(0, sc->hfpll_base + HFPLL_M_VAL);
797 writel_relaxed(1, sc->hfpll_base + HFPLL_N_VAL);
798
799 /* Program droop controller. */
800 writel_relaxed(0x0108C000, sc->hfpll_base + HFPLL_DROOP_CTL);
801
802 /* Set an initial rate and enable the PLL. */
803 hfpll_set_rate(sc, tgt_s);
804 hfpll_enable(sc);
805}
806
807/* Voltage regulator initialization. */
808static void __init regulator_init(void)
809{
810 int cpu, ret;
811 struct scalable *sc;
812
813 for_each_possible_cpu(cpu) {
814 sc = &scalable[cpu];
815 sc->vreg[VREG_CORE].reg = regulator_get(NULL,
816 sc->vreg[VREG_CORE].name);
817 if (IS_ERR(sc->vreg[VREG_CORE].reg)) {
818 pr_err("regulator_get(%s) failed (%ld)\n",
819 sc->vreg[VREG_CORE].name,
820 PTR_ERR(sc->vreg[VREG_CORE].reg));
821 BUG();
822 }
823
824 ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
825 sc->vreg[VREG_CORE].max_vdd,
826 sc->vreg[VREG_CORE].max_vdd);
827 if (ret)
828 pr_err("regulator_set_voltage(%s) failed"
829 " (%d)\n", sc->vreg[VREG_CORE].name, ret);
830
831 ret = regulator_enable(sc->vreg[VREG_CORE].reg);
832 if (ret)
833 pr_err("regulator_enable(%s) failed (%d)\n",
834 sc->vreg[VREG_CORE].name, ret);
835 }
836}
837
838#define INIT_QSB_ID 0
839#define INIT_HFPLL_ID 1
840/* Set initial rate for a given core. */
841static void __init init_clock_sources(struct scalable *sc,
842 struct core_speed *tgt_s)
843{
844 uint32_t pri_src, regval;
845
846 /*
847 * If the HFPLL is in use, program AUX source for QSB, switch to it,
848 * re-initialize the HFPLL, and switch back to the HFPLL. Otherwise,
849 * the HFPLL is not in use, so we can switch directly to it.
850 */
851 pri_src = get_pri_clk_src(scalable);
852 if (pri_src == PRI_SRC_SEL_HFPLL || pri_src == PRI_SRC_SEL_HFPLL_DIV2) {
853 set_sec_clk_src(sc, SEC_SRC_SEL_QSB);
854 set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
855 }
856 hfpll_init(sc, tgt_s);
857
858 /* Set PRI_SRC_SEL_HFPLL_DIV2 divider to div-2. */
Stephen Boyd469ed3e2011-09-29 16:41:19 -0700859 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700860 regval &= ~(0x3 << 6);
Stephen Boyd469ed3e2011-09-29 16:41:19 -0700861 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700862
863 /* Select PLL8 as AUX source input to the secondary MUX. */
864 writel_relaxed(0x3, sc->aux_clk_sel);
865
866 set_pri_clk_src(sc, tgt_s->pri_src_sel);
867 sc->current_speed = tgt_s;
868
869 /*
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700870 * Set this flag so that the first call to acpuclk_8960_set_rate() can
871 * drop voltages and set initial bus bandwidth requests.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700872 */
873 sc->first_set_call = true;
874}
875
Matt Wagantall8e726c72011-08-06 00:49:28 -0700876static void __init per_cpu_init(void *data)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700877{
Matt Wagantall8e726c72011-08-06 00:49:28 -0700878 int cpu = smp_processor_id();
Vikram Mulukutlaa00149c2011-07-21 18:43:26 -0700879 init_clock_sources(&scalable[cpu], &acpu_freq_tbl[cpu_boot_idx].speed);
880 scalable[cpu].l2_vote = &l2_freq_tbl[l2_boot_idx];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700881}
882
883/* Register with bus driver. */
884static void __init bus_init(void)
885{
886 int ret;
887
888 bus_perf_client = msm_bus_scale_register_client(&bus_client_pdata);
889 if (!bus_perf_client) {
890 pr_err("unable to register bus client\n");
891 BUG();
892 }
893
894 ret = msm_bus_scale_client_update_request(bus_perf_client,
895 (ARRAY_SIZE(bw_level_tbl)-1));
896 if (ret)
897 pr_err("initial bandwidth request failed (%d)\n", ret);
898}
899
900#ifdef CONFIG_CPU_FREQ_MSM
901static struct cpufreq_frequency_table freq_table[NR_CPUS][30];
902
903static void __init cpufreq_table_init(void)
904{
905 int cpu;
906
907 for_each_possible_cpu(cpu) {
908 int i, freq_cnt = 0;
909 /* Construct the freq_table tables from acpu_freq_tbl. */
910 for (i = 0; acpu_freq_tbl[i].speed.khz != 0
911 && freq_cnt < ARRAY_SIZE(*freq_table); i++) {
912 if (acpu_freq_tbl[i].use_for_scaling) {
913 freq_table[cpu][freq_cnt].index = freq_cnt;
914 freq_table[cpu][freq_cnt].frequency
915 = acpu_freq_tbl[i].speed.khz;
916 freq_cnt++;
917 }
918 }
919 /* freq_table not big enough to store all usable freqs. */
920 BUG_ON(acpu_freq_tbl[i].speed.khz != 0);
921
922 freq_table[cpu][freq_cnt].index = freq_cnt;
923 freq_table[cpu][freq_cnt].frequency = CPUFREQ_TABLE_END;
924
925 pr_info("CPU%d: %d scaling frequencies supported.\n",
926 cpu, freq_cnt);
927
928 /* Register table with CPUFreq. */
929 cpufreq_frequency_table_get_attr(freq_table[cpu], cpu);
930 }
931}
932#else
933static void __init cpufreq_table_init(void) {}
934#endif
935
936#define HOT_UNPLUG_KHZ STBY_KHZ
937static int __cpuinit acpuclock_cpu_callback(struct notifier_block *nfb,
938 unsigned long action, void *hcpu)
939{
940 static int prev_khz[NR_CPUS];
941 static int prev_pri_src[NR_CPUS];
942 static int prev_sec_src[NR_CPUS];
943 int cpu = (int)hcpu;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700944
945 switch (action) {
946 case CPU_DYING:
947 case CPU_DYING_FROZEN:
948 /*
Matt Wagantall27663842011-08-25 15:11:48 -0700949 * On Krait v1, the primary and secondary muxes must be set
950 * to QSB before L2 power collapse and restored after.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700951 */
Matt Wagantall27663842011-08-25 15:11:48 -0700952 if (cpu_is_krait_v1()) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700953 prev_sec_src[cpu] = get_sec_clk_src(&scalable[cpu]);
954 prev_pri_src[cpu] = get_pri_clk_src(&scalable[cpu]);
955 set_sec_clk_src(&scalable[cpu], SEC_SRC_SEL_QSB);
956 set_pri_clk_src(&scalable[cpu], PRI_SRC_SEL_SEC_SRC);
957 }
958 break;
959 case CPU_DEAD:
960 case CPU_DEAD_FROZEN:
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700961 prev_khz[cpu] = acpuclk_8960_get_rate(cpu);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700962 /* Fall through. */
963 case CPU_UP_CANCELED:
964 case CPU_UP_CANCELED_FROZEN:
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700965 acpuclk_8960_set_rate(cpu, HOT_UNPLUG_KHZ, SETRATE_HOTPLUG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700966 break;
967 case CPU_UP_PREPARE:
968 case CPU_UP_PREPARE_FROZEN:
969 if (WARN_ON(!prev_khz[cpu]))
970 prev_khz[cpu] = acpu_freq_tbl->speed.khz;
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700971 acpuclk_8960_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700972 break;
973 case CPU_STARTING:
974 case CPU_STARTING_FROZEN:
Matt Wagantall27663842011-08-25 15:11:48 -0700975 if (cpu_is_krait_v1()) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700976 set_sec_clk_src(&scalable[cpu], prev_sec_src[cpu]);
977 set_pri_clk_src(&scalable[cpu], prev_pri_src[cpu]);
978 }
979 break;
980 default:
981 break;
982 }
983
984 return NOTIFY_OK;
985}
986
987static struct notifier_block __cpuinitdata acpuclock_cpu_notifier = {
988 .notifier_call = acpuclock_cpu_callback,
989};
990
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700991static struct acpuclk_data acpuclk_8960_data = {
992 .set_rate = acpuclk_8960_set_rate,
993 .get_rate = acpuclk_8960_get_rate,
994 .power_collapse_khz = STBY_KHZ,
995 .wait_for_irq_khz = STBY_KHZ,
996};
997
Matt Wagantallec57f062011-08-16 23:54:46 -0700998static int __init acpuclk_8960_init(struct acpuclk_soc_data *soc_data)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700999{
Vikram Mulukutlaa00149c2011-07-21 18:43:26 -07001000 if (cpu_is_msm8960()) {
1001 scalable = scalable_8960;
1002 acpu_freq_tbl = acpu_freq_tbl_8960;
1003 l2_freq_tbl = l2_freq_tbl_8960;
1004 l2_freq_tbl_size = ARRAY_SIZE(l2_freq_tbl_8960);
1005 l2_boot_idx = 11;
1006 cpu_boot_idx = 11;
1007 } else if (cpu_is_apq8064()) {
1008 scalable = scalable_8064;
1009 acpu_freq_tbl = acpu_freq_tbl_8064;
1010 l2_freq_tbl = l2_freq_tbl_8064;
1011 l2_freq_tbl_size = ARRAY_SIZE(l2_freq_tbl_8064);
1012 l2_boot_idx = 11;
1013 cpu_boot_idx = 11;
1014 }
1015
1016 init_clock_sources(&scalable[L2], &l2_freq_tbl[l2_boot_idx].speed);
Rohit Vaswanice74ba32011-08-15 15:28:36 -07001017 on_each_cpu(per_cpu_init, NULL, true);
Matt Wagantall8e726c72011-08-06 00:49:28 -07001018
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001019 regulator_init();
1020 bus_init();
1021 cpufreq_table_init();
Matt Wagantall6d9ebee2011-08-26 12:15:24 -07001022
1023 acpuclk_register(&acpuclk_8960_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001024 register_hotcpu_notifier(&acpuclock_cpu_notifier);
Matt Wagantall6d9ebee2011-08-26 12:15:24 -07001025
1026 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001027}
Matt Wagantallec57f062011-08-16 23:54:46 -07001028
1029struct acpuclk_soc_data acpuclk_8960_soc_data __initdata = {
1030 .init = acpuclk_8960_init,
1031};