blob: d827dd1fb3ff7a0941f300abb679d76eba27db04 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) "%s: " fmt, __func__
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/io.h>
18#include <linux/delay.h>
19#include <linux/mutex.h>
20#include <linux/err.h>
21#include <linux/errno.h>
22#include <linux/cpufreq.h>
23#include <linux/cpu.h>
24#include <linux/regulator/consumer.h>
25
26#include <asm/mach-types.h>
27#include <asm/cpu.h>
28
29#include <mach/board.h>
30#include <mach/msm_iomap.h>
31#include <mach/rpm-regulator.h>
32#include <mach/msm_bus.h>
33#include <mach/msm_bus_board.h>
34#include <mach/socinfo.h>
Stephen Boyd469ed3e2011-09-29 16:41:19 -070035#include <mach/msm-krait-l2-accessors.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036
37#include "acpuclock.h"
38
39/*
40 * Source IDs.
41 * These must be negative to not overlap with the source IDs
42 * used by the 8x60 local clock driver.
43 */
44#define PLL_8 0
45#define HFPLL -1
46#define QSB -2
47
48/* Mux source selects. */
49#define PRI_SRC_SEL_SEC_SRC 0
50#define PRI_SRC_SEL_HFPLL 1
51#define PRI_SRC_SEL_HFPLL_DIV2 2
52#define SEC_SRC_SEL_QSB 0
53
54/* HFPLL registers offsets. */
55#define HFPLL_MODE 0x00
56#define HFPLL_CONFIG_CTL 0x04
57#define HFPLL_L_VAL 0x08
58#define HFPLL_M_VAL 0x0C
59#define HFPLL_N_VAL 0x10
60#define HFPLL_DROOP_CTL 0x14
61
62/* CP15 L2 indirect addresses. */
63#define L2CPMR_IADDR 0x500
64#define L2CPUCPMR_IADDR 0x501
65
66#define STBY_KHZ 1
67
68#define HFPLL_NOMINAL_VDD 1050000
Matt Wagantallc5236722011-10-14 17:47:06 -070069#define HFPLL_LOW_VDD 945000
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070#define HFPLL_LOW_VDD_PLL_L_MAX 0x28
71
72#define SECCLKAGD BIT(4)
73
74enum scalables {
75 CPU0 = 0,
76 CPU1,
Vikram Mulukutlaa00149c2011-07-21 18:43:26 -070077 CPU2,
78 CPU3,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070079 L2,
80 NUM_SCALABLES
81};
82
83enum vregs {
84 VREG_CORE,
85 VREG_MEM,
86 VREG_DIG,
87 NUM_VREG
88};
89
90struct vreg {
91 const char name[15];
92 const unsigned int max_vdd;
93 const int rpm_vreg_voter;
94 const int rpm_vreg_id;
95 struct regulator *reg;
96 unsigned int cur_vdd;
97};
98
99struct core_speed {
100 unsigned int khz;
101 int src;
102 unsigned int pri_src_sel;
103 unsigned int sec_src_sel;
104 unsigned int pll_l_val;
105};
106
107struct l2_level {
108 struct core_speed speed;
109 unsigned int vdd_dig;
110 unsigned int vdd_mem;
111 unsigned int bw_level;
112};
113
114struct acpu_level {
115 unsigned int use_for_scaling;
116 struct core_speed speed;
117 struct l2_level *l2_level;
118 unsigned int vdd_core;
119};
120
121struct scalable {
122 void * __iomem const hfpll_base;
123 void * __iomem const aux_clk_sel;
124 const uint32_t l2cpmr_iaddr;
125 struct core_speed *current_speed;
126 struct l2_level *l2_vote;
127 struct vreg vreg[NUM_VREG];
128 bool first_set_call;
129};
130
Vikram Mulukutlaa00149c2011-07-21 18:43:26 -0700131static struct scalable scalable_8960[] = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700132 [CPU0] = {
133 .hfpll_base = MSM_HFPLL_BASE + 0x200,
134 .aux_clk_sel = MSM_ACC0_BASE + 0x014,
135 .l2cpmr_iaddr = L2CPUCPMR_IADDR,
136 .vreg[VREG_CORE] = { "krait0", 1150000 },
137 .vreg[VREG_MEM] = { "krait0_mem", 1150000,
138 RPM_VREG_VOTER1,
139 RPM_VREG_ID_PM8921_L24 },
140 .vreg[VREG_DIG] = { "krait0_dig", 1150000,
141 RPM_VREG_VOTER1,
142 RPM_VREG_ID_PM8921_S3 },
143 },
144 [CPU1] = {
145 .hfpll_base = MSM_HFPLL_BASE + 0x300,
146 .aux_clk_sel = MSM_ACC1_BASE + 0x014,
147 .l2cpmr_iaddr = L2CPUCPMR_IADDR,
148 .vreg[VREG_CORE] = { "krait1", 1150000 },
149 .vreg[VREG_MEM] = { "krait0_mem", 1150000,
150 RPM_VREG_VOTER2,
151 RPM_VREG_ID_PM8921_L24 },
152 .vreg[VREG_DIG] = { "krait0_dig", 1150000,
153 RPM_VREG_VOTER2,
154 RPM_VREG_ID_PM8921_S3 },
155 },
156 [L2] = {
157 .hfpll_base = MSM_HFPLL_BASE + 0x400,
158 .aux_clk_sel = MSM_APCS_GCC_BASE + 0x028,
159 .l2cpmr_iaddr = L2CPMR_IADDR,
160 },
161};
162
Stephen Boyd7ad84752011-08-05 14:04:28 -0700163static DEFINE_MUTEX(driver_lock);
164static DEFINE_SPINLOCK(l2_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700165
Vikram Mulukutlaa00149c2011-07-21 18:43:26 -0700166static struct scalable scalable_8064[] = {
167 [CPU0] = {
168 .hfpll_base = MSM_HFPLL_BASE + 0x200,
169 .aux_clk_sel = MSM_ACC0_BASE + 0x014,
170 .l2cpmr_iaddr = L2CPUCPMR_IADDR,
171 .vreg[VREG_CORE] = { "krait0", 1150000 },
172 .vreg[VREG_MEM] = { "krait0_mem", 1150000,
173 RPM_VREG_VOTER1,
174 RPM_VREG_ID_PM8921_L24 },
175 .vreg[VREG_DIG] = { "krait0_dig", 1150000,
176 RPM_VREG_VOTER1,
177 RPM_VREG_ID_PM8921_S3 },
178 },
179 [CPU1] = {
180 .hfpll_base = MSM_HFPLL_BASE + 0x240,
181 .aux_clk_sel = MSM_ACC1_BASE + 0x014,
182 .l2cpmr_iaddr = L2CPUCPMR_IADDR,
183 .vreg[VREG_CORE] = { "krait1", 1150000 },
184 .vreg[VREG_MEM] = { "krait0_mem", 1150000,
185 RPM_VREG_VOTER2,
186 RPM_VREG_ID_PM8921_L24 },
187 .vreg[VREG_DIG] = { "krait0_dig", 1150000,
188 RPM_VREG_VOTER2,
189 RPM_VREG_ID_PM8921_S3 },
190 },
191 [CPU2] = {
192 .hfpll_base = MSM_HFPLL_BASE + 0x280,
193 .aux_clk_sel = MSM_ACC2_BASE + 0x014,
194 .l2cpmr_iaddr = L2CPUCPMR_IADDR,
195 .vreg[VREG_CORE] = { "krait2", 1150000 },
196 .vreg[VREG_MEM] = { "krait0_mem", 1150000,
197 RPM_VREG_VOTER4,
198 RPM_VREG_ID_PM8921_L24 },
199 .vreg[VREG_DIG] = { "krait0_dig", 1150000,
200 RPM_VREG_VOTER4,
201 RPM_VREG_ID_PM8921_S3 },
202 },
203 [CPU3] = {
204 .hfpll_base = MSM_HFPLL_BASE + 0x2C0,
205 .aux_clk_sel = MSM_ACC3_BASE + 0x014,
206 .l2cpmr_iaddr = L2CPUCPMR_IADDR,
207 .vreg[VREG_CORE] = { "krait3", 1150000 },
208 .vreg[VREG_MEM] = { "krait0_mem", 1150000,
209 RPM_VREG_VOTER5,
210 RPM_VREG_ID_PM8921_L24 },
211 .vreg[VREG_DIG] = { "krait0_dig", 1150000,
212 RPM_VREG_VOTER5,
213 RPM_VREG_ID_PM8921_S3 },
214 },
215 [L2] = {
216 .hfpll_base = MSM_HFPLL_BASE + 0x300,
217 .aux_clk_sel = MSM_APCS_GCC_BASE + 0x028,
218 .l2cpmr_iaddr = L2CPMR_IADDR,
219 },
220};
221
222static struct scalable *scalable;
223static struct l2_level *l2_freq_tbl;
224static struct acpu_level *acpu_freq_tbl;
225static int l2_freq_tbl_size;
Vikram Mulukutlaa00149c2011-07-21 18:43:26 -0700226
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227/* Instantaneous bandwidth requests in MB/s. */
228#define BW_MBPS(_bw) \
229 { \
230 .vectors = (struct msm_bus_vectors[]){ \
231 {\
232 .src = MSM_BUS_MASTER_AMPSS_M0, \
233 .dst = MSM_BUS_SLAVE_EBI_CH0, \
234 .ib = (_bw) * 1000000UL, \
235 .ab = (_bw) * 100000UL, \
236 }, \
237 { \
238 .src = MSM_BUS_MASTER_AMPSS_M1, \
239 .dst = MSM_BUS_SLAVE_EBI_CH0, \
240 .ib = (_bw) * 1000000UL, \
241 .ab = (_bw) * 100000UL, \
242 }, \
243 }, \
244 .num_paths = 2, \
245 }
246static struct msm_bus_paths bw_level_tbl[] = {
247 [0] = BW_MBPS(616), /* At least 77 MHz on bus. */
248 [1] = BW_MBPS(1024), /* At least 128 MHz on bus. */
249 [2] = BW_MBPS(1536), /* At least 192 MHz on bus. */
250 [3] = BW_MBPS(2048), /* At least 256 MHz on bus. */
251 [4] = BW_MBPS(3080), /* At least 385 MHz on bus. */
252 [5] = BW_MBPS(3968), /* At least 496 MHz on bus. */
253};
254
255static struct msm_bus_scale_pdata bus_client_pdata = {
256 .usecase = bw_level_tbl,
257 .num_usecases = ARRAY_SIZE(bw_level_tbl),
258 .active_only = 1,
259 .name = "acpuclock",
260};
261
262static uint32_t bus_perf_client;
263
264/* TODO: Update vdd_dig and vdd_mem when voltage data is available. */
Vikram Mulukutlaa00149c2011-07-21 18:43:26 -0700265#define L2(x) (&l2_freq_tbl_8960[(x)])
266static struct l2_level l2_freq_tbl_8960[] = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700267 [0] = { {STBY_KHZ, QSB, 0, 0, 0x00 }, 1050000, 1050000, 0 },
Matt Wagantalle64d56a2011-07-14 19:35:27 -0700268 [1] = { { 384000, PLL_8, 0, 2, 0x00 }, 1050000, 1050000, 1 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269 [2] = { { 432000, HFPLL, 2, 0, 0x20 }, 1050000, 1050000, 1 },
270 [3] = { { 486000, HFPLL, 2, 0, 0x24 }, 1050000, 1050000, 1 },
271 [4] = { { 540000, HFPLL, 2, 0, 0x28 }, 1050000, 1050000, 1 },
272 [5] = { { 594000, HFPLL, 1, 0, 0x16 }, 1050000, 1050000, 2 },
273 [6] = { { 648000, HFPLL, 1, 0, 0x18 }, 1050000, 1050000, 2 },
274 [7] = { { 702000, HFPLL, 1, 0, 0x1A }, 1050000, 1050000, 2 },
Matt Wagantalle64d56a2011-07-14 19:35:27 -0700275 [8] = { { 756000, HFPLL, 1, 0, 0x1C }, 1150000, 1150000, 2 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276 [9] = { { 810000, HFPLL, 1, 0, 0x1E }, 1150000, 1150000, 3 },
277 [10] = { { 864000, HFPLL, 1, 0, 0x20 }, 1150000, 1150000, 3 },
278 [11] = { { 918000, HFPLL, 1, 0, 0x22 }, 1150000, 1150000, 3 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700279};
280
Vikram Mulukutlaa00149c2011-07-21 18:43:26 -0700281static struct acpu_level acpu_freq_tbl_8960[] = {
Matt Wagantallc5236722011-10-14 17:47:06 -0700282 { 0, {STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 950000 },
283 { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 950000 },
284 { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 950000 },
285 { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 962500 },
286 { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 962500 },
287 { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 987500 },
288 { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 1000000 },
289 { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 1025000 },
290 { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(11), 1050000 },
291 { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(11), 1087500 },
292 { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(11), 1125000 },
293 { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(11), 1137500 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700294 { 0, { 0 } }
295};
296
Vikram Mulukutlaa00149c2011-07-21 18:43:26 -0700297/* TODO: Update vdd_dig and vdd_mem when voltage data is available. */
298#undef L2
299#define L2(x) (&l2_freq_tbl_8064[(x)])
300static struct l2_level l2_freq_tbl_8064[] = {
301 [0] = { {STBY_KHZ, QSB, 0, 0, 0x00 }, 1050000, 1050000, 0 },
302 [1] = { { 384000, PLL_8, 0, 2, 0x00 }, 1050000, 1050000, 0 },
303 [2] = { { 432000, HFPLL, 2, 0, 0x20 }, 1050000, 1050000, 1 },
304 [3] = { { 486000, HFPLL, 2, 0, 0x24 }, 1050000, 1050000, 1 },
305 [4] = { { 540000, HFPLL, 2, 0, 0x28 }, 1050000, 1050000, 1 },
306 [5] = { { 594000, HFPLL, 1, 0, 0x16 }, 1050000, 1050000, 2 },
307 [6] = { { 648000, HFPLL, 1, 0, 0x18 }, 1050000, 1050000, 2 },
308 [7] = { { 702000, HFPLL, 1, 0, 0x1A }, 1050000, 1050000, 2 },
309 [8] = { { 756000, HFPLL, 1, 0, 0x1C }, 1150000, 1150000, 3 },
310 [9] = { { 810000, HFPLL, 1, 0, 0x1E }, 1150000, 1150000, 3 },
311 [10] = { { 864000, HFPLL, 1, 0, 0x20 }, 1150000, 1150000, 3 },
312 [11] = { { 918000, HFPLL, 1, 0, 0x22 }, 1150000, 1150000, 3 },
313 [12] = { { 972000, HFPLL, 1, 0, 0x24 }, 1150000, 1150000, 3 },
314 [13] = { { 1026000, HFPLL, 1, 0, 0x26 }, 1150000, 1150000, 3 },
315 [14] = { { 1080000, HFPLL, 1, 0, 0x28 }, 1150000, 1150000, 4 },
316 [15] = { { 1134000, HFPLL, 1, 0, 0x2A }, 1150000, 1150000, 4 },
317 [16] = { { 1188000, HFPLL, 1, 0, 0x2C }, 1150000, 1150000, 4 },
318 [17] = { { 1242000, HFPLL, 1, 0, 0x2E }, 1150000, 1150000, 4 },
319 [18] = { { 1296000, HFPLL, 1, 0, 0x30 }, 1150000, 1150000, 4 },
320 [19] = { { 1350000, HFPLL, 1, 0, 0x32 }, 1150000, 1150000, 4 },
321 [20] = { { 1404000, HFPLL, 1, 0, 0x34 }, 1150000, 1150000, 4 },
322 [21] = { { 1458000, HFPLL, 1, 0, 0x36 }, 1150000, 1150000, 5 },
323 [22] = { { 1512000, HFPLL, 1, 0, 0x38 }, 1150000, 1150000, 5 },
324 [23] = { { 1566000, HFPLL, 1, 0, 0x3A }, 1150000, 1150000, 5 },
325 [24] = { { 1620000, HFPLL, 1, 0, 0x3C }, 1150000, 1150000, 5 },
326 [25] = { { 1674000, HFPLL, 1, 0, 0x3E }, 1150000, 1150000, 5 },
327};
328
329/* TODO: Update core voltages when data is available. */
330static struct acpu_level acpu_freq_tbl_8064[] = {
331 { 0, {STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 1050000 },
332 { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 1050000 },
333 { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(2), 1050000 },
334 { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(3), 1050000 },
335 { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(4), 1050000 },
336 { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(5), 1050000 },
337 { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 1050000 },
338 { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(7), 1050000 },
339 { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(8), 1150000 },
340 { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(9), 1150000 },
341 { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(10), 1150000 },
342 { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(11), 1150000 },
343 { 0, { 0 } }
344};
345
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700346static unsigned long acpuclk_8960_get_rate(int cpu)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700347{
348 return scalable[cpu].current_speed->khz;
349}
350
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351/* Get the selected source on primary MUX. */
352static int get_pri_clk_src(struct scalable *sc)
353{
354 uint32_t regval;
355
Stephen Boyd469ed3e2011-09-29 16:41:19 -0700356 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700357 return regval & 0x3;
358}
359
360/* Set the selected source on primary MUX. */
361static void set_pri_clk_src(struct scalable *sc, uint32_t pri_src_sel)
362{
363 uint32_t regval;
364
Stephen Boyd469ed3e2011-09-29 16:41:19 -0700365 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366 regval &= ~0x3;
367 regval |= (pri_src_sel & 0x3);
Stephen Boyd469ed3e2011-09-29 16:41:19 -0700368 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369 /* Wait for switch to complete. */
370 mb();
371 udelay(1);
372}
373
374/* Get the selected source on secondary MUX. */
375static int get_sec_clk_src(struct scalable *sc)
376{
377 uint32_t regval;
378
Stephen Boyd469ed3e2011-09-29 16:41:19 -0700379 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700380 return (regval >> 2) & 0x3;
381}
382
383/* Set the selected source on secondary MUX. */
384static void set_sec_clk_src(struct scalable *sc, uint32_t sec_src_sel)
385{
386 uint32_t regval;
387
388 /* Disable secondary source clock gating during switch. */
Stephen Boyd469ed3e2011-09-29 16:41:19 -0700389 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390 regval |= SECCLKAGD;
Stephen Boyd469ed3e2011-09-29 16:41:19 -0700391 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700392
393 /* Program the MUX. */
394 regval &= ~(0x3 << 2);
395 regval |= ((sec_src_sel & 0x3) << 2);
Stephen Boyd469ed3e2011-09-29 16:41:19 -0700396 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700397
398 /* Wait for switch to complete. */
399 mb();
400 udelay(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700401}
402
403/* Enable an already-configured HFPLL. */
404static void hfpll_enable(struct scalable *sc)
405{
406 /* Disable PLL bypass mode. */
407 writel_relaxed(0x2, sc->hfpll_base + HFPLL_MODE);
408
409 /*
410 * H/W requires a 5us delay between disabling the bypass and
411 * de-asserting the reset. Delay 10us just to be safe.
412 */
413 mb();
414 udelay(10);
415
416 /* De-assert active-low PLL reset. */
417 writel_relaxed(0x6, sc->hfpll_base + HFPLL_MODE);
418
419 /* Wait for PLL to lock. */
420 mb();
421 udelay(60);
422
423 /* Enable PLL output. */
424 writel_relaxed(0x7, sc->hfpll_base + HFPLL_MODE);
425}
426
427/* Disable a HFPLL for power-savings or while its being reprogrammed. */
428static void hfpll_disable(struct scalable *sc)
429{
430 /*
431 * Disable the PLL output, disable test mode, enable
432 * the bypass mode, and assert the reset.
433 */
434 writel_relaxed(0, sc->hfpll_base + HFPLL_MODE);
435}
436
437/* Program the HFPLL rate. Assumes HFPLL is already disabled. */
438static void hfpll_set_rate(struct scalable *sc, struct core_speed *tgt_s)
439{
440 writel_relaxed(tgt_s->pll_l_val, sc->hfpll_base + HFPLL_L_VAL);
441}
442
443/* Return the L2 speed that should be applied. */
444static struct l2_level *compute_l2_level(struct scalable *sc,
445 struct l2_level *vote_l)
446{
447 struct l2_level *new_l;
448 int cpu;
449
450 /* Bounds check. */
Vikram Mulukutlaa00149c2011-07-21 18:43:26 -0700451 BUG_ON(vote_l >= (l2_freq_tbl + l2_freq_tbl_size));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700452
453 /* Find max L2 speed vote. */
454 sc->l2_vote = vote_l;
455 new_l = l2_freq_tbl;
456 for_each_present_cpu(cpu)
457 new_l = max(new_l, scalable[cpu].l2_vote);
458
459 return new_l;
460}
461
462/* Update the bus bandwidth request. */
463static void set_bus_bw(unsigned int bw)
464{
465 int ret;
466
467 /* Bounds check. */
468 if (bw >= ARRAY_SIZE(bw_level_tbl)) {
469 pr_err("invalid bandwidth request (%d)\n", bw);
470 return;
471 }
472
473 /* Update bandwidth if request has changed. This may sleep. */
474 ret = msm_bus_scale_client_update_request(bus_perf_client, bw);
475 if (ret)
476 pr_err("bandwidth request failed (%d)\n", ret);
477}
478
479/* Set the CPU or L2 clock speed. */
480static void set_speed(struct scalable *sc, struct core_speed *tgt_s,
481 enum setrate_reason reason)
482{
483 struct core_speed *strt_s = sc->current_speed;
484
485 if (tgt_s == strt_s)
486 return;
487
488 if (strt_s->src == HFPLL && tgt_s->src == HFPLL) {
489 /* Move CPU to QSB source. */
490 /*
491 * TODO: If using QSB here requires elevating voltages,
492 * consider using PLL8 instead.
493 */
494 set_sec_clk_src(sc, SEC_SRC_SEL_QSB);
495 set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
496
497 /* Program CPU HFPLL. */
498 hfpll_disable(sc);
499 hfpll_set_rate(sc, tgt_s);
500 hfpll_enable(sc);
501
502 /* Move CPU to HFPLL source. */
503 set_pri_clk_src(sc, tgt_s->pri_src_sel);
504 } else if (strt_s->src == HFPLL && tgt_s->src != HFPLL) {
505 /* TODO: Enable source. */
506 /*
507 * If responding to CPU_DEAD we must be running on another
508 * CPU. Therefore, we can't access the downed CPU's CP15
509 * clock MUX registers from here and can't change clock sources.
510 * Just turn off the PLL- since the CPU is down already, halting
511 * its clock should be safe.
512 */
513 if (reason != SETRATE_HOTPLUG || sc == &scalable[L2]) {
514 set_sec_clk_src(sc, tgt_s->sec_src_sel);
515 set_pri_clk_src(sc, tgt_s->pri_src_sel);
516 }
517 hfpll_disable(sc);
518 } else if (strt_s->src != HFPLL && tgt_s->src == HFPLL) {
519 hfpll_set_rate(sc, tgt_s);
520 hfpll_enable(sc);
521 /*
522 * If responding to CPU_UP_PREPARE, we can't change CP15
523 * registers for the CPU that's coming up since we're not
524 * running on that CPU. That's okay though, since the MUX
525 * source was not changed on the way down, either.
526 */
527 if (reason != SETRATE_HOTPLUG || sc == &scalable[L2])
528 set_pri_clk_src(sc, tgt_s->pri_src_sel);
529 /* TODO: Disable source. */
530 } else {
531 /* TODO: Enable source. */
532 if (reason != SETRATE_HOTPLUG || sc == &scalable[L2])
533 set_sec_clk_src(sc, tgt_s->sec_src_sel);
534 /* TODO: Disable source. */
535 }
536
537 sc->current_speed = tgt_s;
538}
539
540/* Apply any per-cpu voltage increases. */
541static int increase_vdd(int cpu, unsigned int vdd_core, unsigned int vdd_mem,
542 unsigned int vdd_dig, enum setrate_reason reason)
543{
544 struct scalable *sc = &scalable[cpu];
545 int rc;
546
547 /*
Matt Wagantallabd55f02011-09-12 11:45:54 -0700548 * Increase vdd_mem active-set before vdd_dig.
549 * vdd_mem should be >= vdd_dig.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700550 */
551 if (vdd_mem > sc->vreg[VREG_MEM].cur_vdd) {
552 rc = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
553 sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
554 sc->vreg[VREG_MEM].max_vdd, 0);
555 if (rc) {
556 pr_err("%s: vdd_mem (cpu%d) increase failed (%d)\n",
557 __func__, cpu, rc);
558 return rc;
559 }
560 sc->vreg[VREG_MEM].cur_vdd = vdd_mem;
561 }
562
563 /* Increase vdd_dig active-set vote. */
564 if (vdd_dig > sc->vreg[VREG_DIG].cur_vdd) {
565 rc = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
566 sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
567 sc->vreg[VREG_DIG].max_vdd, 0);
568 if (rc) {
569 pr_err("%s: vdd_dig (cpu%d) increase failed (%d)\n",
570 __func__, cpu, rc);
571 return rc;
572 }
573 sc->vreg[VREG_DIG].cur_vdd = vdd_dig;
574 }
575
576 /*
577 * Update per-CPU core voltage. Don't do this for the hotplug path for
578 * which it should already be correct. Attempting to set it is bad
579 * because we don't know what CPU we are running on at this point, but
580 * the CPU regulator API requires we call it from the affected CPU.
581 */
582 if (vdd_core > sc->vreg[VREG_CORE].cur_vdd
583 && reason != SETRATE_HOTPLUG) {
584 rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
585 sc->vreg[VREG_CORE].max_vdd);
586 if (rc) {
587 pr_err("%s: vdd_core (cpu%d) increase failed (%d)\n",
588 __func__, cpu, rc);
589 return rc;
590 }
591 sc->vreg[VREG_CORE].cur_vdd = vdd_core;
592 }
593
594 return rc;
595}
596
597/* Apply any per-cpu voltage decreases. */
598static void decrease_vdd(int cpu, unsigned int vdd_core, unsigned int vdd_mem,
599 unsigned int vdd_dig, enum setrate_reason reason)
600{
601 struct scalable *sc = &scalable[cpu];
602 int ret;
603
604 /*
605 * Update per-CPU core voltage. This must be called on the CPU
606 * that's being affected. Don't do this in the hotplug remove path,
607 * where the rail is off and we're executing on the other CPU.
608 */
609 if (vdd_core < sc->vreg[VREG_CORE].cur_vdd
610 && reason != SETRATE_HOTPLUG) {
611 ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
612 sc->vreg[VREG_CORE].max_vdd);
613 if (ret) {
614 pr_err("%s: vdd_core (cpu%d) decrease failed (%d)\n",
615 __func__, cpu, ret);
616 return;
617 }
618 sc->vreg[VREG_CORE].cur_vdd = vdd_core;
619 }
620
621 /* Decrease vdd_dig active-set vote. */
622 if (vdd_dig < sc->vreg[VREG_DIG].cur_vdd) {
623 ret = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
624 sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
625 sc->vreg[VREG_DIG].max_vdd, 0);
626 if (ret) {
627 pr_err("%s: vdd_dig (cpu%d) decrease failed (%d)\n",
628 __func__, cpu, ret);
629 return;
630 }
631 sc->vreg[VREG_DIG].cur_vdd = vdd_dig;
632 }
633
634 /*
Matt Wagantallabd55f02011-09-12 11:45:54 -0700635 * Decrease vdd_mem active-set after vdd_dig.
636 * vdd_mem should be >= vdd_dig.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700637 */
638 if (vdd_mem < sc->vreg[VREG_MEM].cur_vdd) {
639 ret = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
640 sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
641 sc->vreg[VREG_MEM].max_vdd, 0);
642 if (ret) {
643 pr_err("%s: vdd_mem (cpu%d) decrease failed (%d)\n",
644 __func__, cpu, ret);
645 return;
646 }
647 sc->vreg[VREG_MEM].cur_vdd = vdd_mem;
648 }
649}
650
651static unsigned int calculate_vdd_mem(struct acpu_level *tgt)
652{
Matt Wagantallabd55f02011-09-12 11:45:54 -0700653 return tgt->l2_level->vdd_mem;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700654}
655
656static unsigned int calculate_vdd_dig(struct acpu_level *tgt)
657{
658 unsigned int pll_vdd_dig;
659
660 if (tgt->l2_level->speed.pll_l_val > HFPLL_LOW_VDD_PLL_L_MAX)
661 pll_vdd_dig = HFPLL_NOMINAL_VDD;
662 else
663 pll_vdd_dig = HFPLL_LOW_VDD;
664
665 return max(tgt->l2_level->vdd_dig, pll_vdd_dig);
666}
667
668static unsigned int calculate_vdd_core(struct acpu_level *tgt)
669{
670 unsigned int pll_vdd_core;
671
672 if (tgt->speed.pll_l_val > HFPLL_LOW_VDD_PLL_L_MAX)
673 pll_vdd_core = HFPLL_NOMINAL_VDD;
674 else
675 pll_vdd_core = HFPLL_LOW_VDD;
676
677 return max(tgt->vdd_core, pll_vdd_core);
678}
679
680/* Set the CPU's clock rate and adjust the L2 rate, if appropriate. */
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700681static int acpuclk_8960_set_rate(int cpu, unsigned long rate,
682 enum setrate_reason reason)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700683{
684 struct core_speed *strt_acpu_s, *tgt_acpu_s;
685 struct l2_level *tgt_l2_l;
686 struct acpu_level *tgt;
687 unsigned int vdd_mem, vdd_dig, vdd_core;
688 unsigned long flags;
689 int rc = 0;
690
691 if (cpu > num_possible_cpus()) {
692 rc = -EINVAL;
693 goto out;
694 }
695
696 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
697 mutex_lock(&driver_lock);
698
699 strt_acpu_s = scalable[cpu].current_speed;
700
701 /* Return early if rate didn't change. */
702 if (rate == strt_acpu_s->khz && scalable[cpu].first_set_call == false)
703 goto out;
704
705 /* Find target frequency. */
706 for (tgt = acpu_freq_tbl; tgt->speed.khz != 0; tgt++) {
707 if (tgt->speed.khz == rate) {
708 tgt_acpu_s = &tgt->speed;
709 break;
710 }
711 }
712 if (tgt->speed.khz == 0) {
713 rc = -EINVAL;
714 goto out;
715 }
716
717 /* Calculate voltage requirements for the current CPU. */
718 vdd_mem = calculate_vdd_mem(tgt);
719 vdd_dig = calculate_vdd_dig(tgt);
720 vdd_core = calculate_vdd_core(tgt);
721
722 /* Increase VDD levels if needed. */
723 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) {
724 rc = increase_vdd(cpu, vdd_core, vdd_mem, vdd_dig, reason);
725 if (rc)
726 goto out;
727 }
728
729 pr_debug("Switching from ACPU%d rate %u KHz -> %u KHz\n",
730 cpu, strt_acpu_s->khz, tgt_acpu_s->khz);
731
732 /* Set the CPU speed. */
733 set_speed(&scalable[cpu], tgt_acpu_s, reason);
734
735 /*
736 * Update the L2 vote and apply the rate change. A spinlock is
737 * necessary to ensure L2 rate is calulated and set atomically,
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700738 * even if acpuclk_8960_set_rate() is called from an atomic context
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700739 * and the driver_lock mutex is not acquired.
740 */
741 spin_lock_irqsave(&l2_lock, flags);
742 tgt_l2_l = compute_l2_level(&scalable[cpu], tgt->l2_level);
743 set_speed(&scalable[L2], &tgt_l2_l->speed, reason);
744 spin_unlock_irqrestore(&l2_lock, flags);
745
746 /* Nothing else to do for power collapse or SWFI. */
747 if (reason == SETRATE_PC || reason == SETRATE_SWFI)
748 goto out;
749
750 /* Update bus bandwith request. */
751 set_bus_bw(tgt_l2_l->bw_level);
752
753 /* Drop VDD levels if we can. */
754 decrease_vdd(cpu, vdd_core, vdd_mem, vdd_dig, reason);
755
756 scalable[cpu].first_set_call = false;
757 pr_debug("ACPU%d speed change complete\n", cpu);
758
759out:
760 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
761 mutex_unlock(&driver_lock);
762 return rc;
763}
764
765/* Initialize a HFPLL at a given rate and enable it. */
766static void __init hfpll_init(struct scalable *sc, struct core_speed *tgt_s)
767{
768 pr_debug("Initializing HFPLL%d\n", sc - scalable);
769
770 /* Disable the PLL for re-programming. */
771 hfpll_disable(sc);
772
773 /* Configure PLL parameters for integer mode. */
774 writel_relaxed(0x7845C665, sc->hfpll_base + HFPLL_CONFIG_CTL);
775 writel_relaxed(0, sc->hfpll_base + HFPLL_M_VAL);
776 writel_relaxed(1, sc->hfpll_base + HFPLL_N_VAL);
777
778 /* Program droop controller. */
779 writel_relaxed(0x0108C000, sc->hfpll_base + HFPLL_DROOP_CTL);
780
781 /* Set an initial rate and enable the PLL. */
782 hfpll_set_rate(sc, tgt_s);
783 hfpll_enable(sc);
784}
785
786/* Voltage regulator initialization. */
787static void __init regulator_init(void)
788{
789 int cpu, ret;
790 struct scalable *sc;
791
792 for_each_possible_cpu(cpu) {
793 sc = &scalable[cpu];
794 sc->vreg[VREG_CORE].reg = regulator_get(NULL,
795 sc->vreg[VREG_CORE].name);
796 if (IS_ERR(sc->vreg[VREG_CORE].reg)) {
797 pr_err("regulator_get(%s) failed (%ld)\n",
798 sc->vreg[VREG_CORE].name,
799 PTR_ERR(sc->vreg[VREG_CORE].reg));
800 BUG();
801 }
802
803 ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
804 sc->vreg[VREG_CORE].max_vdd,
805 sc->vreg[VREG_CORE].max_vdd);
806 if (ret)
807 pr_err("regulator_set_voltage(%s) failed"
808 " (%d)\n", sc->vreg[VREG_CORE].name, ret);
809
810 ret = regulator_enable(sc->vreg[VREG_CORE].reg);
811 if (ret)
812 pr_err("regulator_enable(%s) failed (%d)\n",
813 sc->vreg[VREG_CORE].name, ret);
814 }
815}
816
817#define INIT_QSB_ID 0
818#define INIT_HFPLL_ID 1
819/* Set initial rate for a given core. */
820static void __init init_clock_sources(struct scalable *sc,
821 struct core_speed *tgt_s)
822{
823 uint32_t pri_src, regval;
824
825 /*
826 * If the HFPLL is in use, program AUX source for QSB, switch to it,
827 * re-initialize the HFPLL, and switch back to the HFPLL. Otherwise,
828 * the HFPLL is not in use, so we can switch directly to it.
829 */
830 pri_src = get_pri_clk_src(scalable);
831 if (pri_src == PRI_SRC_SEL_HFPLL || pri_src == PRI_SRC_SEL_HFPLL_DIV2) {
832 set_sec_clk_src(sc, SEC_SRC_SEL_QSB);
833 set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
834 }
835 hfpll_init(sc, tgt_s);
836
837 /* Set PRI_SRC_SEL_HFPLL_DIV2 divider to div-2. */
Stephen Boyd469ed3e2011-09-29 16:41:19 -0700838 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700839 regval &= ~(0x3 << 6);
Stephen Boyd469ed3e2011-09-29 16:41:19 -0700840 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700841
842 /* Select PLL8 as AUX source input to the secondary MUX. */
843 writel_relaxed(0x3, sc->aux_clk_sel);
844
845 set_pri_clk_src(sc, tgt_s->pri_src_sel);
846 sc->current_speed = tgt_s;
847
848 /*
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700849 * Set this flag so that the first call to acpuclk_8960_set_rate() can
850 * drop voltages and set initial bus bandwidth requests.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700851 */
852 sc->first_set_call = true;
853}
854
Matt Wagantall8e726c72011-08-06 00:49:28 -0700855static void __init per_cpu_init(void *data)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700856{
Matt Wagantall6b013ca2011-10-12 14:15:45 -0700857 struct acpu_level *max_acpu_level = data;
Matt Wagantall8e726c72011-08-06 00:49:28 -0700858 int cpu = smp_processor_id();
Matt Wagantall6b013ca2011-10-12 14:15:45 -0700859
860 init_clock_sources(&scalable[cpu], &max_acpu_level->speed);
861 scalable[cpu].l2_vote = max_acpu_level->l2_level;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700862}
863
864/* Register with bus driver. */
865static void __init bus_init(void)
866{
867 int ret;
868
869 bus_perf_client = msm_bus_scale_register_client(&bus_client_pdata);
870 if (!bus_perf_client) {
871 pr_err("unable to register bus client\n");
872 BUG();
873 }
874
875 ret = msm_bus_scale_client_update_request(bus_perf_client,
876 (ARRAY_SIZE(bw_level_tbl)-1));
877 if (ret)
878 pr_err("initial bandwidth request failed (%d)\n", ret);
879}
880
881#ifdef CONFIG_CPU_FREQ_MSM
882static struct cpufreq_frequency_table freq_table[NR_CPUS][30];
883
884static void __init cpufreq_table_init(void)
885{
886 int cpu;
887
888 for_each_possible_cpu(cpu) {
889 int i, freq_cnt = 0;
890 /* Construct the freq_table tables from acpu_freq_tbl. */
891 for (i = 0; acpu_freq_tbl[i].speed.khz != 0
892 && freq_cnt < ARRAY_SIZE(*freq_table); i++) {
893 if (acpu_freq_tbl[i].use_for_scaling) {
894 freq_table[cpu][freq_cnt].index = freq_cnt;
895 freq_table[cpu][freq_cnt].frequency
896 = acpu_freq_tbl[i].speed.khz;
897 freq_cnt++;
898 }
899 }
900 /* freq_table not big enough to store all usable freqs. */
901 BUG_ON(acpu_freq_tbl[i].speed.khz != 0);
902
903 freq_table[cpu][freq_cnt].index = freq_cnt;
904 freq_table[cpu][freq_cnt].frequency = CPUFREQ_TABLE_END;
905
906 pr_info("CPU%d: %d scaling frequencies supported.\n",
907 cpu, freq_cnt);
908
909 /* Register table with CPUFreq. */
910 cpufreq_frequency_table_get_attr(freq_table[cpu], cpu);
911 }
912}
913#else
914static void __init cpufreq_table_init(void) {}
915#endif
916
917#define HOT_UNPLUG_KHZ STBY_KHZ
918static int __cpuinit acpuclock_cpu_callback(struct notifier_block *nfb,
919 unsigned long action, void *hcpu)
920{
921 static int prev_khz[NR_CPUS];
922 static int prev_pri_src[NR_CPUS];
923 static int prev_sec_src[NR_CPUS];
924 int cpu = (int)hcpu;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700925
926 switch (action) {
927 case CPU_DYING:
928 case CPU_DYING_FROZEN:
929 /*
Matt Wagantall27663842011-08-25 15:11:48 -0700930 * On Krait v1, the primary and secondary muxes must be set
931 * to QSB before L2 power collapse and restored after.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700932 */
Matt Wagantall27663842011-08-25 15:11:48 -0700933 if (cpu_is_krait_v1()) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700934 prev_sec_src[cpu] = get_sec_clk_src(&scalable[cpu]);
935 prev_pri_src[cpu] = get_pri_clk_src(&scalable[cpu]);
936 set_sec_clk_src(&scalable[cpu], SEC_SRC_SEL_QSB);
937 set_pri_clk_src(&scalable[cpu], PRI_SRC_SEL_SEC_SRC);
938 }
939 break;
940 case CPU_DEAD:
941 case CPU_DEAD_FROZEN:
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700942 prev_khz[cpu] = acpuclk_8960_get_rate(cpu);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700943 /* Fall through. */
944 case CPU_UP_CANCELED:
945 case CPU_UP_CANCELED_FROZEN:
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700946 acpuclk_8960_set_rate(cpu, HOT_UNPLUG_KHZ, SETRATE_HOTPLUG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700947 break;
948 case CPU_UP_PREPARE:
949 case CPU_UP_PREPARE_FROZEN:
950 if (WARN_ON(!prev_khz[cpu]))
951 prev_khz[cpu] = acpu_freq_tbl->speed.khz;
Matt Wagantall6d9ebee2011-08-26 12:15:24 -0700952 acpuclk_8960_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700953 break;
954 case CPU_STARTING:
955 case CPU_STARTING_FROZEN:
Matt Wagantall27663842011-08-25 15:11:48 -0700956 if (cpu_is_krait_v1()) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700957 set_sec_clk_src(&scalable[cpu], prev_sec_src[cpu]);
958 set_pri_clk_src(&scalable[cpu], prev_pri_src[cpu]);
959 }
960 break;
961 default:
962 break;
963 }
964
965 return NOTIFY_OK;
966}
967
968static struct notifier_block __cpuinitdata acpuclock_cpu_notifier = {
969 .notifier_call = acpuclock_cpu_callback,
970};
971
Matt Wagantall6b013ca2011-10-12 14:15:45 -0700972static struct acpu_level * __init select_freq_plan(void)
973{
974 struct acpu_level *l, *max_acpu_level = NULL;
975
976 /* Select frequency tables. */
977 if (cpu_is_msm8960()) {
978 scalable = scalable_8960;
979 acpu_freq_tbl = acpu_freq_tbl_8960;
980 l2_freq_tbl = l2_freq_tbl_8960;
981 l2_freq_tbl_size = ARRAY_SIZE(l2_freq_tbl_8960);
982 } else if (cpu_is_apq8064()) {
983 scalable = scalable_8064;
984 acpu_freq_tbl = acpu_freq_tbl_8064;
985 l2_freq_tbl = l2_freq_tbl_8064;
986 l2_freq_tbl_size = ARRAY_SIZE(l2_freq_tbl_8064);
987 } else {
988 BUG();
989 }
990
991 /* Find the max supported scaling frequency. */
992 for (l = acpu_freq_tbl; l->speed.khz != 0; l++)
993 if (l->use_for_scaling)
994 max_acpu_level = l;
995 BUG_ON(!max_acpu_level);
996 pr_info("Max ACPU freq: %u KHz\n", max_acpu_level->speed.khz);
997
998 return max_acpu_level;
999}
1000
Matt Wagantall6d9ebee2011-08-26 12:15:24 -07001001static struct acpuclk_data acpuclk_8960_data = {
1002 .set_rate = acpuclk_8960_set_rate,
1003 .get_rate = acpuclk_8960_get_rate,
1004 .power_collapse_khz = STBY_KHZ,
1005 .wait_for_irq_khz = STBY_KHZ,
1006};
1007
Matt Wagantallec57f062011-08-16 23:54:46 -07001008static int __init acpuclk_8960_init(struct acpuclk_soc_data *soc_data)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001009{
Matt Wagantall6b013ca2011-10-12 14:15:45 -07001010 struct acpu_level *max_acpu_level = select_freq_plan();
1011 init_clock_sources(&scalable[L2], &max_acpu_level->l2_level->speed);
1012 on_each_cpu(per_cpu_init, max_acpu_level, true);
Matt Wagantall8e726c72011-08-06 00:49:28 -07001013
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001014 regulator_init();
1015 bus_init();
1016 cpufreq_table_init();
Matt Wagantall6d9ebee2011-08-26 12:15:24 -07001017
1018 acpuclk_register(&acpuclk_8960_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001019 register_hotcpu_notifier(&acpuclock_cpu_notifier);
Matt Wagantall6d9ebee2011-08-26 12:15:24 -07001020
1021 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001022}
Matt Wagantallec57f062011-08-16 23:54:46 -07001023
1024struct acpuclk_soc_data acpuclk_8960_soc_data __initdata = {
1025 .init = acpuclk_8960_init,
1026};