blob: 1f112f65c3b00b397303ff7e40b3a32fc88fa502 [file] [log] [blame]
Matt Wagantall44f672e2011-09-07 20:31:16 -07001/*
2 * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/io.h>
19#include <linux/delay.h>
20#include <linux/mutex.h>
21#include <linux/spinlock.h>
22#include <linux/errno.h>
23#include <linux/cpufreq.h>
24#include <linux/clk.h>
25
26#include <asm/cpu.h>
27
28#include <mach/board.h>
29#include <mach/msm_iomap.h>
Matt Wagantall095760f2011-10-18 15:21:36 -070030#include <mach/msm_bus.h>
31#include <mach/msm_bus_board.h>
Matt Wagantallb1be5662011-10-18 13:28:58 -070032#include <mach/rpm-regulator.h>
Matt Wagantall44f672e2011-09-07 20:31:16 -070033
34#include "acpuclock.h"
35
36#define REG_CLKSEL_0 (MSM_APCS_GLB_BASE + 0x08)
37#define REG_CLKDIV_0 (MSM_APCS_GLB_BASE + 0x0C)
38#define REG_CLKSEL_1 (MSM_APCS_GLB_BASE + 0x10)
39#define REG_CLKDIV_1 (MSM_APCS_GLB_BASE + 0x14)
40#define REG_CLKOUTSEL (MSM_APCS_GLB_BASE + 0x18)
41
Matt Wagantallb1be5662011-10-18 13:28:58 -070042#define MAX_VDD_CPU 1150000
43#define MAX_VDD_MEM 1150000
44
Matt Wagantall44f672e2011-09-07 20:31:16 -070045enum clk_src {
46 SRC_CXO,
47 SRC_PLL0,
48 SRC_PLL8,
49 SRC_PLL9,
50 NUM_SRC,
51};
52
53struct src_clock {
54 struct clk *clk;
55 const char *name;
56};
57
58static struct src_clock clocks[NUM_SRC] = {
Matt Wagantall44f672e2011-09-07 20:31:16 -070059 [SRC_PLL0].name = "pll0",
60 [SRC_PLL8].name = "pll8",
61 [SRC_PLL9].name = "pll9",
62};
63
64struct clkctl_acpu_speed {
65 bool use_for_scaling;
66 unsigned int khz;
67 int src;
68 unsigned int src_sel;
69 unsigned int src_div;
Matt Wagantallb1be5662011-10-18 13:28:58 -070070 unsigned int vdd_cpu;
71 unsigned int vdd_mem;
Matt Wagantall095760f2011-10-18 15:21:36 -070072 unsigned int bw_level;
Matt Wagantall44f672e2011-09-07 20:31:16 -070073};
74
75struct acpuclk_state {
76 struct mutex lock;
77 struct clkctl_acpu_speed *current_speed;
78};
79
80static struct acpuclk_state drv_state = {
81 .current_speed = &(struct clkctl_acpu_speed){ 0 },
82};
83
Matt Wagantall095760f2011-10-18 15:21:36 -070084/* Instantaneous bandwidth requests in MB/s. */
85#define BW_MBPS(_bw) \
86 { \
87 .vectors = &(struct msm_bus_vectors){ \
88 .src = MSM_BUS_MASTER_AMPSS_M0, \
89 .dst = MSM_BUS_SLAVE_EBI_CH0, \
90 .ib = (_bw) * 1000000UL, \
91 .ab = 0, \
92 }, \
93 .num_paths = 1, \
94 }
95static struct msm_bus_paths bw_level_tbl[] = {
96 [0] = BW_MBPS(152), /* At least 19 MHz on bus. */
97 [1] = BW_MBPS(368), /* At least 46 MHz on bus. */
98 [2] = BW_MBPS(552), /* At least 69 MHz on bus. */
99 [3] = BW_MBPS(736), /* At least 92 MHz on bus. */
100 [4] = BW_MBPS(1064), /* At least 133 MHz on bus. */
101};
102
103static struct msm_bus_scale_pdata bus_client_pdata = {
104 .usecase = bw_level_tbl,
105 .num_usecases = ARRAY_SIZE(bw_level_tbl),
106 .active_only = 1,
107 .name = "acpuclock",
108};
109
110static uint32_t bus_perf_client;
111
Matt Wagantall44f672e2011-09-07 20:31:16 -0700112static struct clkctl_acpu_speed acpu_freq_tbl[] = {
Matt Wagantall095760f2011-10-18 15:21:36 -0700113 { 0, 19200, SRC_CXO, 0, 0, 950000, 1050000, 0 },
114 { 1, 138000, SRC_PLL0, 6, 1, 950000, 1050000, 2 },
115 { 1, 276000, SRC_PLL0, 6, 0, 1050000, 1050000, 2 },
116 { 1, 384000, SRC_PLL8, 3, 0, 1150000, 1150000, 4 },
117 { 1, 440000, SRC_PLL9, 2, 0, 1150000, 1150000, 4 },
Matt Wagantall44f672e2011-09-07 20:31:16 -0700118 { 0 }
119};
120
121static void select_clk_source_div(struct clkctl_acpu_speed *s)
122{
123 static void * __iomem const sel_reg[] = {REG_CLKSEL_0, REG_CLKSEL_1};
124 static void * __iomem const div_reg[] = {REG_CLKDIV_0, REG_CLKDIV_1};
125 uint32_t next_bank;
126
127 next_bank = !(readl_relaxed(REG_CLKOUTSEL) & 1);
128 writel_relaxed(s->src_sel, sel_reg[next_bank]);
129 writel_relaxed(s->src_div, div_reg[next_bank]);
130 writel_relaxed(next_bank, REG_CLKOUTSEL);
131
132 /* Wait for switch to complete. */
133 mb();
134 udelay(1);
135}
136
Matt Wagantall095760f2011-10-18 15:21:36 -0700137/* Update the bus bandwidth request. */
138static void set_bus_bw(unsigned int bw)
139{
140 int ret;
141
142 /* Bounds check. */
143 if (bw >= ARRAY_SIZE(bw_level_tbl)) {
144 pr_err("invalid bandwidth request (%d)\n", bw);
145 return;
146 }
147
148 /* Update bandwidth if request has changed. This may sleep. */
149 ret = msm_bus_scale_client_update_request(bus_perf_client, bw);
150 if (ret)
151 pr_err("bandwidth request failed (%d)\n", ret);
152
153 return;
154}
155
Matt Wagantallb1be5662011-10-18 13:28:58 -0700156/* Apply any per-cpu voltage increases. */
157static int increase_vdd(unsigned int vdd_cpu, unsigned int vdd_mem)
158{
159 int rc = 0;
160
161 /*
162 * Increase vdd_mem active-set before vdd_cpu.
163 * vdd_mem should be >= vdd_cpu.
164 */
165 rc = rpm_vreg_set_voltage(RPM_VREG_ID_PM8018_L9, RPM_VREG_VOTER1,
166 vdd_mem, MAX_VDD_MEM, 0);
167 if (rc) {
168 pr_err("vdd_mem increase failed (%d)\n", rc);
169 return rc;
170 }
171
172 rc = rpm_vreg_set_voltage(RPM_VREG_ID_PM8018_S1, RPM_VREG_VOTER1,
173 vdd_cpu, MAX_VDD_CPU, 0);
174 if (rc)
175 pr_err("vdd_cpu increase failed (%d)\n", rc);
176
177 return rc;
178}
179
180/* Apply any per-cpu voltage decreases. */
181static void decrease_vdd(unsigned int vdd_cpu, unsigned int vdd_mem)
182{
183 int ret;
184
185 /* Update CPU voltage. */
186 ret = rpm_vreg_set_voltage(RPM_VREG_ID_PM8018_S1, RPM_VREG_VOTER1,
187 vdd_cpu, MAX_VDD_CPU, 0);
188 if (ret) {
189 pr_err("vdd_cpu decrease failed (%d)\n", ret);
190 return;
191 }
192
193 /*
194 * Decrease vdd_mem active-set after vdd_cpu.
195 * vdd_mem should be >= vdd_cpu.
196 */
197 ret = rpm_vreg_set_voltage(RPM_VREG_ID_PM8018_L9, RPM_VREG_VOTER1,
198 vdd_mem, MAX_VDD_MEM, 0);
199 if (ret)
200 pr_err("vdd_mem decrease failed (%d)\n", ret);
201}
202
Matt Wagantall44f672e2011-09-07 20:31:16 -0700203static int acpuclk_9615_set_rate(int cpu, unsigned long rate,
204 enum setrate_reason reason)
205{
206 struct clkctl_acpu_speed *tgt_s, *strt_s;
207 int rc = 0;
208
209 if (reason == SETRATE_CPUFREQ)
210 mutex_lock(&drv_state.lock);
211
212 strt_s = drv_state.current_speed;
213
214 /* Return early if rate didn't change. */
215 if (rate == strt_s->khz)
216 goto out;
217
218 /* Find target frequency. */
219 for (tgt_s = acpu_freq_tbl; tgt_s->khz != 0; tgt_s++)
220 if (tgt_s->khz == rate)
221 break;
222 if (tgt_s->khz == 0) {
223 rc = -EINVAL;
224 goto out;
225 }
226
Matt Wagantallb1be5662011-10-18 13:28:58 -0700227 /* Increase VDD levels if needed. */
228 if ((reason == SETRATE_CPUFREQ || reason == SETRATE_INIT)
229 && (tgt_s->khz > strt_s->khz)) {
230 rc = increase_vdd(tgt_s->vdd_cpu, tgt_s->vdd_mem);
231 if (rc)
232 goto out;
233 }
234
Matt Wagantallf5d64072011-10-13 14:15:19 -0700235 pr_debug("Switching from CPU rate %u KHz -> %u KHz\n",
Matt Wagantall44f672e2011-09-07 20:31:16 -0700236 strt_s->khz, tgt_s->khz);
237
238 /* Switch CPU speed. */
239 clk_enable(clocks[tgt_s->src].clk);
240 select_clk_source_div(tgt_s);
241 clk_disable(clocks[strt_s->src].clk);
242
243 drv_state.current_speed = tgt_s;
Matt Wagantallf5d64072011-10-13 14:15:19 -0700244 pr_debug("CPU speed change complete\n");
Matt Wagantall44f672e2011-09-07 20:31:16 -0700245
Matt Wagantallb1be5662011-10-18 13:28:58 -0700246 /* Nothing else to do for SWFI or power-collapse. */
247 if (reason == SETRATE_SWFI || reason == SETRATE_PC)
248 goto out;
249
Matt Wagantall095760f2011-10-18 15:21:36 -0700250 /* Update bus bandwith request. */
251 set_bus_bw(tgt_s->bw_level);
252
Matt Wagantallb1be5662011-10-18 13:28:58 -0700253 /* Drop VDD levels if we can. */
254 if (tgt_s->khz < strt_s->khz)
255 decrease_vdd(tgt_s->vdd_cpu, tgt_s->vdd_mem);
256
Matt Wagantall44f672e2011-09-07 20:31:16 -0700257out:
258 if (reason == SETRATE_CPUFREQ)
259 mutex_unlock(&drv_state.lock);
260 return rc;
261}
262
263static unsigned long acpuclk_9615_get_rate(int cpu)
264{
265 return drv_state.current_speed->khz;
266}
267
268#ifdef CONFIG_CPU_FREQ_MSM
269static struct cpufreq_frequency_table freq_table[30];
270
271static void __init cpufreq_table_init(void)
272{
273 int i, freq_cnt = 0;
274
275 /* Construct the freq_table tables from acpu_freq_tbl. */
276 for (i = 0; acpu_freq_tbl[i].khz != 0
277 && freq_cnt < ARRAY_SIZE(freq_table); i++) {
278 if (acpu_freq_tbl[i].use_for_scaling) {
279 freq_table[freq_cnt].index = freq_cnt;
280 freq_table[freq_cnt].frequency
281 = acpu_freq_tbl[i].khz;
282 freq_cnt++;
283 }
284 }
285 /* freq_table not big enough to store all usable freqs. */
286 BUG_ON(acpu_freq_tbl[i].khz != 0);
287
288 freq_table[freq_cnt].index = freq_cnt;
289 freq_table[freq_cnt].frequency = CPUFREQ_TABLE_END;
290
291 pr_info("CPU: %d scaling frequencies supported.\n", freq_cnt);
292
293 /* Register table with CPUFreq. */
294 cpufreq_frequency_table_get_attr(freq_table, smp_processor_id());
295}
296#else
297static void __init cpufreq_table_init(void) {}
298#endif
299
300static struct acpuclk_data acpuclk_9615_data = {
301 .set_rate = acpuclk_9615_set_rate,
302 .get_rate = acpuclk_9615_get_rate,
303 .power_collapse_khz = 19200,
304 .wait_for_irq_khz = 19200,
305};
306
307static int __init acpuclk_9615_init(struct acpuclk_soc_data *soc_data)
308{
309 unsigned long max_cpu_khz = 0;
310 int i;
311
312 mutex_init(&drv_state.lock);
Matt Wagantall095760f2011-10-18 15:21:36 -0700313
314 bus_perf_client = msm_bus_scale_register_client(&bus_client_pdata);
315 if (!bus_perf_client) {
316 pr_err("Unable to register bus client\n");
317 BUG();
318 }
319
Matt Wagantall44f672e2011-09-07 20:31:16 -0700320 for (i = 0; i < NUM_SRC; i++) {
321 if (clocks[i].name) {
Vikram Mulukutla17cdf1c2011-11-16 18:22:14 -0800322 clocks[i].clk = clk_get_sys("acpu", clocks[i].name);
Matt Wagantall44f672e2011-09-07 20:31:16 -0700323 BUG_ON(IS_ERR(clocks[i].clk));
324 }
325 }
326
327 /* Improve boot time by ramping up CPU immediately. */
328 for (i = 0; acpu_freq_tbl[i].khz != 0; i++)
329 max_cpu_khz = acpu_freq_tbl[i].khz;
330 acpuclk_9615_set_rate(smp_processor_id(), max_cpu_khz, SETRATE_INIT);
331
332 acpuclk_register(&acpuclk_9615_data);
333 cpufreq_table_init();
334
335 return 0;
336}
337
338struct acpuclk_soc_data acpuclk_9615_soc_data __initdata = {
339 .init = acpuclk_9615_init,
340};