blob: 24b81b983ce497f14d8d6c9222067a1a6034b3e4 [file] [log] [blame]
Matt Wagantall44f672e2011-09-07 20:31:16 -07001/*
Vikram Mulukutla01d06b82012-01-10 14:19:44 -08002 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Matt Wagantall44f672e2011-09-07 20:31:16 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/io.h>
19#include <linux/delay.h>
20#include <linux/mutex.h>
21#include <linux/spinlock.h>
22#include <linux/errno.h>
23#include <linux/cpufreq.h>
24#include <linux/clk.h>
25
26#include <asm/cpu.h>
27
28#include <mach/board.h>
29#include <mach/msm_iomap.h>
Matt Wagantall095760f2011-10-18 15:21:36 -070030#include <mach/msm_bus.h>
31#include <mach/msm_bus_board.h>
Matt Wagantallb1be5662011-10-18 13:28:58 -070032#include <mach/rpm-regulator.h>
Matt Wagantall44f672e2011-09-07 20:31:16 -070033
34#include "acpuclock.h"
35
36#define REG_CLKSEL_0 (MSM_APCS_GLB_BASE + 0x08)
37#define REG_CLKDIV_0 (MSM_APCS_GLB_BASE + 0x0C)
38#define REG_CLKSEL_1 (MSM_APCS_GLB_BASE + 0x10)
39#define REG_CLKDIV_1 (MSM_APCS_GLB_BASE + 0x14)
40#define REG_CLKOUTSEL (MSM_APCS_GLB_BASE + 0x18)
41
Matt Wagantallb1be5662011-10-18 13:28:58 -070042#define MAX_VDD_MEM 1150000
43
Matt Wagantall44f672e2011-09-07 20:31:16 -070044enum clk_src {
45 SRC_CXO,
46 SRC_PLL0,
47 SRC_PLL8,
48 SRC_PLL9,
49 NUM_SRC,
50};
51
52struct src_clock {
53 struct clk *clk;
54 const char *name;
55};
56
57static struct src_clock clocks[NUM_SRC] = {
Matt Wagantall44f672e2011-09-07 20:31:16 -070058 [SRC_PLL0].name = "pll0",
59 [SRC_PLL8].name = "pll8",
60 [SRC_PLL9].name = "pll9",
61};
62
63struct clkctl_acpu_speed {
64 bool use_for_scaling;
65 unsigned int khz;
66 int src;
67 unsigned int src_sel;
68 unsigned int src_div;
Matt Wagantallb1be5662011-10-18 13:28:58 -070069 unsigned int vdd_cpu;
70 unsigned int vdd_mem;
Matt Wagantall095760f2011-10-18 15:21:36 -070071 unsigned int bw_level;
Matt Wagantall44f672e2011-09-07 20:31:16 -070072};
73
74struct acpuclk_state {
75 struct mutex lock;
76 struct clkctl_acpu_speed *current_speed;
77};
78
79static struct acpuclk_state drv_state = {
80 .current_speed = &(struct clkctl_acpu_speed){ 0 },
81};
82
Matt Wagantall095760f2011-10-18 15:21:36 -070083/* Instantaneous bandwidth requests in MB/s. */
84#define BW_MBPS(_bw) \
85 { \
86 .vectors = &(struct msm_bus_vectors){ \
87 .src = MSM_BUS_MASTER_AMPSS_M0, \
88 .dst = MSM_BUS_SLAVE_EBI_CH0, \
89 .ib = (_bw) * 1000000UL, \
90 .ab = 0, \
91 }, \
92 .num_paths = 1, \
93 }
94static struct msm_bus_paths bw_level_tbl[] = {
95 [0] = BW_MBPS(152), /* At least 19 MHz on bus. */
96 [1] = BW_MBPS(368), /* At least 46 MHz on bus. */
97 [2] = BW_MBPS(552), /* At least 69 MHz on bus. */
98 [3] = BW_MBPS(736), /* At least 92 MHz on bus. */
99 [4] = BW_MBPS(1064), /* At least 133 MHz on bus. */
Vikram Mulukutla01d06b82012-01-10 14:19:44 -0800100 [5] = BW_MBPS(1536), /* At least 192 MHz on bus. */
Matt Wagantall095760f2011-10-18 15:21:36 -0700101};
102
103static struct msm_bus_scale_pdata bus_client_pdata = {
104 .usecase = bw_level_tbl,
105 .num_usecases = ARRAY_SIZE(bw_level_tbl),
106 .active_only = 1,
107 .name = "acpuclock",
108};
109
110static uint32_t bus_perf_client;
111
Matt Wagantall44f672e2011-09-07 20:31:16 -0700112static struct clkctl_acpu_speed acpu_freq_tbl[] = {
Vikram Mulukutla5ebc9aa2012-06-01 11:35:41 -0700113 { 0, 19200, SRC_CXO, 0, 0, RPM_VREG_CORNER_LOW, 1050000, 0 },
114 { 1, 138000, SRC_PLL0, 6, 1, RPM_VREG_CORNER_LOW, 1050000, 2 },
115 { 1, 276000, SRC_PLL0, 6, 0, RPM_VREG_CORNER_NOMINAL, 1050000, 2 },
116 { 1, 384000, SRC_PLL8, 3, 0, RPM_VREG_CORNER_HIGH, 1150000, 4 },
Vikram Mulukutla01d06b82012-01-10 14:19:44 -0800117 /* The row below may be changed at runtime depending on hw rev. */
Vikram Mulukutla5ebc9aa2012-06-01 11:35:41 -0700118 { 1, 440000, SRC_PLL9, 2, 0, RPM_VREG_CORNER_HIGH, 1150000, 4 },
Matt Wagantall44f672e2011-09-07 20:31:16 -0700119 { 0 }
120};
121
122static void select_clk_source_div(struct clkctl_acpu_speed *s)
123{
124 static void * __iomem const sel_reg[] = {REG_CLKSEL_0, REG_CLKSEL_1};
125 static void * __iomem const div_reg[] = {REG_CLKDIV_0, REG_CLKDIV_1};
126 uint32_t next_bank;
127
128 next_bank = !(readl_relaxed(REG_CLKOUTSEL) & 1);
129 writel_relaxed(s->src_sel, sel_reg[next_bank]);
130 writel_relaxed(s->src_div, div_reg[next_bank]);
131 writel_relaxed(next_bank, REG_CLKOUTSEL);
132
133 /* Wait for switch to complete. */
134 mb();
135 udelay(1);
136}
137
Matt Wagantall095760f2011-10-18 15:21:36 -0700138/* Update the bus bandwidth request. */
139static void set_bus_bw(unsigned int bw)
140{
141 int ret;
142
143 /* Bounds check. */
144 if (bw >= ARRAY_SIZE(bw_level_tbl)) {
145 pr_err("invalid bandwidth request (%d)\n", bw);
146 return;
147 }
148
149 /* Update bandwidth if request has changed. This may sleep. */
150 ret = msm_bus_scale_client_update_request(bus_perf_client, bw);
151 if (ret)
152 pr_err("bandwidth request failed (%d)\n", ret);
153
154 return;
155}
156
Matt Wagantallb1be5662011-10-18 13:28:58 -0700157/* Apply any per-cpu voltage increases. */
158static int increase_vdd(unsigned int vdd_cpu, unsigned int vdd_mem)
159{
160 int rc = 0;
161
162 /*
163 * Increase vdd_mem active-set before vdd_cpu.
164 * vdd_mem should be >= vdd_cpu.
165 */
166 rc = rpm_vreg_set_voltage(RPM_VREG_ID_PM8018_L9, RPM_VREG_VOTER1,
167 vdd_mem, MAX_VDD_MEM, 0);
168 if (rc) {
169 pr_err("vdd_mem increase failed (%d)\n", rc);
170 return rc;
171 }
172
Vikram Mulukutla5ebc9aa2012-06-01 11:35:41 -0700173 rc = rpm_vreg_set_voltage(RPM_VREG_ID_PM8018_VDD_DIG_CORNER,
174 RPM_VREG_VOTER1, vdd_cpu, RPM_VREG_CORNER_HIGH, 0);
Matt Wagantallb1be5662011-10-18 13:28:58 -0700175 if (rc)
176 pr_err("vdd_cpu increase failed (%d)\n", rc);
177
178 return rc;
179}
180
181/* Apply any per-cpu voltage decreases. */
182static void decrease_vdd(unsigned int vdd_cpu, unsigned int vdd_mem)
183{
184 int ret;
185
186 /* Update CPU voltage. */
Vikram Mulukutla5ebc9aa2012-06-01 11:35:41 -0700187 ret = rpm_vreg_set_voltage(RPM_VREG_ID_PM8018_VDD_DIG_CORNER,
188 RPM_VREG_VOTER1, vdd_cpu, RPM_VREG_CORNER_HIGH, 0);
189
Matt Wagantallb1be5662011-10-18 13:28:58 -0700190 if (ret) {
191 pr_err("vdd_cpu decrease failed (%d)\n", ret);
192 return;
193 }
194
195 /*
196 * Decrease vdd_mem active-set after vdd_cpu.
197 * vdd_mem should be >= vdd_cpu.
198 */
199 ret = rpm_vreg_set_voltage(RPM_VREG_ID_PM8018_L9, RPM_VREG_VOTER1,
200 vdd_mem, MAX_VDD_MEM, 0);
201 if (ret)
202 pr_err("vdd_mem decrease failed (%d)\n", ret);
203}
204
Matt Wagantall44f672e2011-09-07 20:31:16 -0700205static int acpuclk_9615_set_rate(int cpu, unsigned long rate,
206 enum setrate_reason reason)
207{
208 struct clkctl_acpu_speed *tgt_s, *strt_s;
209 int rc = 0;
210
211 if (reason == SETRATE_CPUFREQ)
212 mutex_lock(&drv_state.lock);
213
214 strt_s = drv_state.current_speed;
215
216 /* Return early if rate didn't change. */
217 if (rate == strt_s->khz)
218 goto out;
219
220 /* Find target frequency. */
221 for (tgt_s = acpu_freq_tbl; tgt_s->khz != 0; tgt_s++)
222 if (tgt_s->khz == rate)
223 break;
224 if (tgt_s->khz == 0) {
225 rc = -EINVAL;
226 goto out;
227 }
228
Matt Wagantallb1be5662011-10-18 13:28:58 -0700229 /* Increase VDD levels if needed. */
230 if ((reason == SETRATE_CPUFREQ || reason == SETRATE_INIT)
231 && (tgt_s->khz > strt_s->khz)) {
232 rc = increase_vdd(tgt_s->vdd_cpu, tgt_s->vdd_mem);
233 if (rc)
234 goto out;
235 }
236
Matt Wagantallf5d64072011-10-13 14:15:19 -0700237 pr_debug("Switching from CPU rate %u KHz -> %u KHz\n",
Matt Wagantall44f672e2011-09-07 20:31:16 -0700238 strt_s->khz, tgt_s->khz);
239
240 /* Switch CPU speed. */
241 clk_enable(clocks[tgt_s->src].clk);
242 select_clk_source_div(tgt_s);
243 clk_disable(clocks[strt_s->src].clk);
244
245 drv_state.current_speed = tgt_s;
Matt Wagantallf5d64072011-10-13 14:15:19 -0700246 pr_debug("CPU speed change complete\n");
Matt Wagantall44f672e2011-09-07 20:31:16 -0700247
Matt Wagantallb1be5662011-10-18 13:28:58 -0700248 /* Nothing else to do for SWFI or power-collapse. */
249 if (reason == SETRATE_SWFI || reason == SETRATE_PC)
250 goto out;
251
Matt Wagantall095760f2011-10-18 15:21:36 -0700252 /* Update bus bandwith request. */
253 set_bus_bw(tgt_s->bw_level);
254
Matt Wagantallb1be5662011-10-18 13:28:58 -0700255 /* Drop VDD levels if we can. */
256 if (tgt_s->khz < strt_s->khz)
257 decrease_vdd(tgt_s->vdd_cpu, tgt_s->vdd_mem);
258
Matt Wagantall44f672e2011-09-07 20:31:16 -0700259out:
260 if (reason == SETRATE_CPUFREQ)
261 mutex_unlock(&drv_state.lock);
262 return rc;
263}
264
265static unsigned long acpuclk_9615_get_rate(int cpu)
266{
267 return drv_state.current_speed->khz;
268}
269
270#ifdef CONFIG_CPU_FREQ_MSM
271static struct cpufreq_frequency_table freq_table[30];
272
273static void __init cpufreq_table_init(void)
274{
275 int i, freq_cnt = 0;
276
277 /* Construct the freq_table tables from acpu_freq_tbl. */
278 for (i = 0; acpu_freq_tbl[i].khz != 0
279 && freq_cnt < ARRAY_SIZE(freq_table); i++) {
280 if (acpu_freq_tbl[i].use_for_scaling) {
281 freq_table[freq_cnt].index = freq_cnt;
282 freq_table[freq_cnt].frequency
283 = acpu_freq_tbl[i].khz;
284 freq_cnt++;
285 }
286 }
287 /* freq_table not big enough to store all usable freqs. */
288 BUG_ON(acpu_freq_tbl[i].khz != 0);
289
290 freq_table[freq_cnt].index = freq_cnt;
291 freq_table[freq_cnt].frequency = CPUFREQ_TABLE_END;
292
293 pr_info("CPU: %d scaling frequencies supported.\n", freq_cnt);
294
295 /* Register table with CPUFreq. */
296 cpufreq_frequency_table_get_attr(freq_table, smp_processor_id());
297}
298#else
299static void __init cpufreq_table_init(void) {}
300#endif
301
302static struct acpuclk_data acpuclk_9615_data = {
303 .set_rate = acpuclk_9615_set_rate,
304 .get_rate = acpuclk_9615_get_rate,
305 .power_collapse_khz = 19200,
306 .wait_for_irq_khz = 19200,
307};
308
309static int __init acpuclk_9615_init(struct acpuclk_soc_data *soc_data)
310{
311 unsigned long max_cpu_khz = 0;
312 int i;
313
314 mutex_init(&drv_state.lock);
Matt Wagantall095760f2011-10-18 15:21:36 -0700315
316 bus_perf_client = msm_bus_scale_register_client(&bus_client_pdata);
317 if (!bus_perf_client) {
318 pr_err("Unable to register bus client\n");
319 BUG();
320 }
321
Matt Wagantall44f672e2011-09-07 20:31:16 -0700322 for (i = 0; i < NUM_SRC; i++) {
323 if (clocks[i].name) {
Vikram Mulukutla17cdf1c2011-11-16 18:22:14 -0800324 clocks[i].clk = clk_get_sys("acpu", clocks[i].name);
Matt Wagantall44f672e2011-09-07 20:31:16 -0700325 BUG_ON(IS_ERR(clocks[i].clk));
Stephen Boyd2636ef72012-01-30 23:10:09 -0800326 /*
327 * Prepare the PLLs because we enable/disable them
328 * in atomic context during power collapse/restore.
329 */
330 BUG_ON(clk_prepare(clocks[i].clk));
Matt Wagantall44f672e2011-09-07 20:31:16 -0700331 }
332 }
333
Vikram Mulukutla01d06b82012-01-10 14:19:44 -0800334 /* Determine the rate of PLL9 and fixup tables accordingly */
335 if (clk_get_rate(clocks[SRC_PLL9].clk) == 550000000) {
336 for (i = 0; i < ARRAY_SIZE(acpu_freq_tbl); i++)
337 if (acpu_freq_tbl[i].src == SRC_PLL9) {
338 acpu_freq_tbl[i].khz = 550000;
339 acpu_freq_tbl[i].bw_level = 5;
340 }
341 }
342
Matt Wagantall44f672e2011-09-07 20:31:16 -0700343 /* Improve boot time by ramping up CPU immediately. */
344 for (i = 0; acpu_freq_tbl[i].khz != 0; i++)
345 max_cpu_khz = acpu_freq_tbl[i].khz;
346 acpuclk_9615_set_rate(smp_processor_id(), max_cpu_khz, SETRATE_INIT);
347
348 acpuclk_register(&acpuclk_9615_data);
349 cpufreq_table_init();
350
351 return 0;
352}
353
354struct acpuclk_soc_data acpuclk_9615_soc_data __initdata = {
355 .init = acpuclk_9615_init,
356};