blob: 48a4a9058864eea72a09e21f54fb2fcd0ad8c50d [file] [log] [blame]
Kukjin Kimf7d77072011-06-01 14:18:22 -07001/*
Jaecheol Lee83efc742010-10-12 09:19:38 +09002 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * CPU frequency scaling for S5PC110/S5PV210
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10*/
11
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/err.h>
16#include <linux/clk.h>
17#include <linux/io.h>
18#include <linux/cpufreq.h>
19
20#include <mach/map.h>
21#include <mach/regs-clock.h>
22
23static struct clk *cpu_clk;
24static struct clk *dmc0_clk;
25static struct clk *dmc1_clk;
26static struct cpufreq_freqs freqs;
27
28/* APLL M,P,S values for 1G/800Mhz */
29#define APLL_VAL_1000 ((1 << 31) | (125 << 16) | (3 << 8) | 1)
30#define APLL_VAL_800 ((1 << 31) | (100 << 16) | (3 << 8) | 1)
31
32/*
Huisung Kang90d5d0a2011-06-24 16:04:13 +090033 * relation has an additional symantics other than the standard of cpufreq
34 * DISALBE_FURTHER_CPUFREQ: disable further access to target
35 * ENABLE_FURTUER_CPUFREQ: enable access to target
36 */
37enum cpufreq_access {
38 DISABLE_FURTHER_CPUFREQ = 0x10,
39 ENABLE_FURTHER_CPUFREQ = 0x20,
40};
41
42static bool no_cpufreq_access;
43
44/*
Jaecheol Lee83efc742010-10-12 09:19:38 +090045 * DRAM configurations to calculate refresh counter for changing
46 * frequency of memory.
47 */
48struct dram_conf {
49 unsigned long freq; /* HZ */
50 unsigned long refresh; /* DRAM refresh counter * 1000 */
51};
52
53/* DRAM configuration (DMC0 and DMC1) */
54static struct dram_conf s5pv210_dram_conf[2];
55
56enum perf_level {
57 L0, L1, L2, L3, L4,
58};
59
60enum s5pv210_mem_type {
61 LPDDR = 0x1,
62 LPDDR2 = 0x2,
63 DDR2 = 0x4,
64};
65
66enum s5pv210_dmc_port {
67 DMC0 = 0,
68 DMC1,
69};
70
71static struct cpufreq_frequency_table s5pv210_freq_table[] = {
72 {L0, 1000*1000},
73 {L1, 800*1000},
74 {L2, 400*1000},
75 {L3, 200*1000},
76 {L4, 100*1000},
77 {0, CPUFREQ_TABLE_END},
78};
79
80static u32 clkdiv_val[5][11] = {
81 /*
82 * Clock divider value for following
83 * { APLL, A2M, HCLK_MSYS, PCLK_MSYS,
84 * HCLK_DSYS, PCLK_DSYS, HCLK_PSYS, PCLK_PSYS,
85 * ONEDRAM, MFC, G3D }
86 */
87
88 /* L0 : [1000/200/100][166/83][133/66][200/200] */
89 {0, 4, 4, 1, 3, 1, 4, 1, 3, 0, 0},
90
91 /* L1 : [800/200/100][166/83][133/66][200/200] */
92 {0, 3, 3, 1, 3, 1, 4, 1, 3, 0, 0},
93
94 /* L2 : [400/200/100][166/83][133/66][200/200] */
95 {1, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
96
97 /* L3 : [200/200/100][166/83][133/66][200/200] */
98 {3, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
99
100 /* L4 : [100/100/100][83/83][66/66][100/100] */
101 {7, 7, 0, 0, 7, 0, 9, 0, 7, 0, 0},
102};
103
104/*
105 * This function set DRAM refresh counter
106 * accoriding to operating frequency of DRAM
107 * ch: DMC port number 0 or 1
108 * freq: Operating frequency of DRAM(KHz)
109 */
110static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
111{
112 unsigned long tmp, tmp1;
113 void __iomem *reg = NULL;
114
Jonghwan Choid62fa312011-05-12 18:31:20 +0900115 if (ch == DMC0) {
Jaecheol Lee83efc742010-10-12 09:19:38 +0900116 reg = (S5P_VA_DMC0 + 0x30);
Jonghwan Choid62fa312011-05-12 18:31:20 +0900117 } else if (ch == DMC1) {
Jaecheol Lee83efc742010-10-12 09:19:38 +0900118 reg = (S5P_VA_DMC1 + 0x30);
Jonghwan Choid62fa312011-05-12 18:31:20 +0900119 } else {
Jaecheol Lee83efc742010-10-12 09:19:38 +0900120 printk(KERN_ERR "Cannot find DMC port\n");
Jonghwan Choid62fa312011-05-12 18:31:20 +0900121 return;
122 }
Jaecheol Lee83efc742010-10-12 09:19:38 +0900123
124 /* Find current DRAM frequency */
125 tmp = s5pv210_dram_conf[ch].freq;
126
127 do_div(tmp, freq);
128
129 tmp1 = s5pv210_dram_conf[ch].refresh;
130
131 do_div(tmp1, tmp);
132
133 __raw_writel(tmp1, reg);
134}
135
136int s5pv210_verify_speed(struct cpufreq_policy *policy)
137{
138 if (policy->cpu)
139 return -EINVAL;
140
141 return cpufreq_frequency_table_verify(policy, s5pv210_freq_table);
142}
143
144unsigned int s5pv210_getspeed(unsigned int cpu)
145{
146 if (cpu)
147 return 0;
148
149 return clk_get_rate(cpu_clk) / 1000;
150}
151
152static int s5pv210_target(struct cpufreq_policy *policy,
153 unsigned int target_freq,
154 unsigned int relation)
155{
156 unsigned long reg;
157 unsigned int index, priv_index;
158 unsigned int pll_changing = 0;
159 unsigned int bus_speed_changing = 0;
160
Huisung Kang90d5d0a2011-06-24 16:04:13 +0900161 if (relation & ENABLE_FURTHER_CPUFREQ)
162 no_cpufreq_access = false;
163
164 if (no_cpufreq_access) {
165#ifdef CONFIG_PM_VERBOSE
166 pr_err("%s:%d denied access to %s as it is disabled"
167 "temporarily\n", __FILE__, __LINE__, __func__);
168#endif
169 return -EINVAL;
170 }
171
172 if (relation & DISABLE_FURTHER_CPUFREQ)
173 no_cpufreq_access = true;
174
175 relation &= ~(ENABLE_FURTHER_CPUFREQ | DISABLE_FURTHER_CPUFREQ);
176
Jaecheol Lee83efc742010-10-12 09:19:38 +0900177 freqs.old = s5pv210_getspeed(0);
178
179 if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
180 target_freq, relation, &index))
181 return -EINVAL;
182
183 freqs.new = s5pv210_freq_table[index].frequency;
184 freqs.cpu = 0;
185
186 if (freqs.new == freqs.old)
187 return 0;
188
189 /* Finding current running level index */
190 if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
191 freqs.old, relation, &priv_index))
192 return -EINVAL;
193
194 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
195
196 if (freqs.new > freqs.old) {
197 /* Voltage up: will be implemented */
198 }
199
200 /* Check if there need to change PLL */
201 if ((index == L0) || (priv_index == L0))
202 pll_changing = 1;
203
204 /* Check if there need to change System bus clock */
205 if ((index == L4) || (priv_index == L4))
206 bus_speed_changing = 1;
207
208 if (bus_speed_changing) {
209 /*
210 * Reconfigure DRAM refresh counter value for minimum
211 * temporary clock while changing divider.
212 * expected clock is 83Mhz : 7.8usec/(1/83Mhz) = 0x287
213 */
214 if (pll_changing)
215 s5pv210_set_refresh(DMC1, 83000);
216 else
217 s5pv210_set_refresh(DMC1, 100000);
218
219 s5pv210_set_refresh(DMC0, 83000);
220 }
221
222 /*
223 * APLL should be changed in this level
224 * APLL -> MPLL(for stable transition) -> APLL
225 * Some clock source's clock API are not prepared.
226 * Do not use clock API in below code.
227 */
228 if (pll_changing) {
229 /*
230 * 1. Temporary Change divider for MFC and G3D
231 * SCLKA2M(200/1=200)->(200/4=50)Mhz
232 */
233 reg = __raw_readl(S5P_CLK_DIV2);
234 reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
235 reg |= (3 << S5P_CLKDIV2_G3D_SHIFT) |
236 (3 << S5P_CLKDIV2_MFC_SHIFT);
237 __raw_writel(reg, S5P_CLK_DIV2);
238
239 /* For MFC, G3D dividing */
240 do {
241 reg = __raw_readl(S5P_CLKDIV_STAT0);
242 } while (reg & ((1 << 16) | (1 << 17)));
243
244 /*
245 * 2. Change SCLKA2M(200Mhz)to SCLKMPLL in MFC_MUX, G3D MUX
246 * (200/4=50)->(667/4=166)Mhz
247 */
248 reg = __raw_readl(S5P_CLK_SRC2);
249 reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
250 reg |= (1 << S5P_CLKSRC2_G3D_SHIFT) |
251 (1 << S5P_CLKSRC2_MFC_SHIFT);
252 __raw_writel(reg, S5P_CLK_SRC2);
253
254 do {
255 reg = __raw_readl(S5P_CLKMUX_STAT1);
256 } while (reg & ((1 << 7) | (1 << 3)));
257
258 /*
259 * 3. DMC1 refresh count for 133Mhz if (index == L4) is
260 * true refresh counter is already programed in upper
261 * code. 0x287@83Mhz
262 */
263 if (!bus_speed_changing)
264 s5pv210_set_refresh(DMC1, 133000);
265
266 /* 4. SCLKAPLL -> SCLKMPLL */
267 reg = __raw_readl(S5P_CLK_SRC0);
268 reg &= ~(S5P_CLKSRC0_MUX200_MASK);
269 reg |= (0x1 << S5P_CLKSRC0_MUX200_SHIFT);
270 __raw_writel(reg, S5P_CLK_SRC0);
271
272 do {
273 reg = __raw_readl(S5P_CLKMUX_STAT0);
274 } while (reg & (0x1 << 18));
275
276 }
277
278 /* Change divider */
279 reg = __raw_readl(S5P_CLK_DIV0);
280
281 reg &= ~(S5P_CLKDIV0_APLL_MASK | S5P_CLKDIV0_A2M_MASK |
282 S5P_CLKDIV0_HCLK200_MASK | S5P_CLKDIV0_PCLK100_MASK |
283 S5P_CLKDIV0_HCLK166_MASK | S5P_CLKDIV0_PCLK83_MASK |
284 S5P_CLKDIV0_HCLK133_MASK | S5P_CLKDIV0_PCLK66_MASK);
285
286 reg |= ((clkdiv_val[index][0] << S5P_CLKDIV0_APLL_SHIFT) |
287 (clkdiv_val[index][1] << S5P_CLKDIV0_A2M_SHIFT) |
288 (clkdiv_val[index][2] << S5P_CLKDIV0_HCLK200_SHIFT) |
289 (clkdiv_val[index][3] << S5P_CLKDIV0_PCLK100_SHIFT) |
290 (clkdiv_val[index][4] << S5P_CLKDIV0_HCLK166_SHIFT) |
291 (clkdiv_val[index][5] << S5P_CLKDIV0_PCLK83_SHIFT) |
292 (clkdiv_val[index][6] << S5P_CLKDIV0_HCLK133_SHIFT) |
293 (clkdiv_val[index][7] << S5P_CLKDIV0_PCLK66_SHIFT));
294
295 __raw_writel(reg, S5P_CLK_DIV0);
296
297 do {
298 reg = __raw_readl(S5P_CLKDIV_STAT0);
299 } while (reg & 0xff);
300
301 /* ARM MCS value changed */
302 reg = __raw_readl(S5P_ARM_MCS_CON);
303 reg &= ~0x3;
304 if (index >= L3)
305 reg |= 0x3;
306 else
307 reg |= 0x1;
308
309 __raw_writel(reg, S5P_ARM_MCS_CON);
310
311 if (pll_changing) {
312 /* 5. Set Lock time = 30us*24Mhz = 0x2cf */
313 __raw_writel(0x2cf, S5P_APLL_LOCK);
314
315 /*
316 * 6. Turn on APLL
317 * 6-1. Set PMS values
318 * 6-2. Wait untile the PLL is locked
319 */
320 if (index == L0)
321 __raw_writel(APLL_VAL_1000, S5P_APLL_CON);
322 else
323 __raw_writel(APLL_VAL_800, S5P_APLL_CON);
324
325 do {
326 reg = __raw_readl(S5P_APLL_CON);
327 } while (!(reg & (0x1 << 29)));
328
329 /*
330 * 7. Change souce clock from SCLKMPLL(667Mhz)
331 * to SCLKA2M(200Mhz) in MFC_MUX and G3D MUX
332 * (667/4=166)->(200/4=50)Mhz
333 */
334 reg = __raw_readl(S5P_CLK_SRC2);
335 reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
336 reg |= (0 << S5P_CLKSRC2_G3D_SHIFT) |
337 (0 << S5P_CLKSRC2_MFC_SHIFT);
338 __raw_writel(reg, S5P_CLK_SRC2);
339
340 do {
341 reg = __raw_readl(S5P_CLKMUX_STAT1);
342 } while (reg & ((1 << 7) | (1 << 3)));
343
344 /*
345 * 8. Change divider for MFC and G3D
346 * (200/4=50)->(200/1=200)Mhz
347 */
348 reg = __raw_readl(S5P_CLK_DIV2);
349 reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
350 reg |= (clkdiv_val[index][10] << S5P_CLKDIV2_G3D_SHIFT) |
351 (clkdiv_val[index][9] << S5P_CLKDIV2_MFC_SHIFT);
352 __raw_writel(reg, S5P_CLK_DIV2);
353
354 /* For MFC, G3D dividing */
355 do {
356 reg = __raw_readl(S5P_CLKDIV_STAT0);
357 } while (reg & ((1 << 16) | (1 << 17)));
358
359 /* 9. Change MPLL to APLL in MSYS_MUX */
360 reg = __raw_readl(S5P_CLK_SRC0);
361 reg &= ~(S5P_CLKSRC0_MUX200_MASK);
362 reg |= (0x0 << S5P_CLKSRC0_MUX200_SHIFT);
363 __raw_writel(reg, S5P_CLK_SRC0);
364
365 do {
366 reg = __raw_readl(S5P_CLKMUX_STAT0);
367 } while (reg & (0x1 << 18));
368
369 /*
370 * 10. DMC1 refresh counter
371 * L4 : DMC1 = 100Mhz 7.8us/(1/100) = 0x30c
372 * Others : DMC1 = 200Mhz 7.8us/(1/200) = 0x618
373 */
374 if (!bus_speed_changing)
375 s5pv210_set_refresh(DMC1, 200000);
376 }
377
378 /*
379 * L4 level need to change memory bus speed, hence onedram clock divier
380 * and memory refresh parameter should be changed
381 */
382 if (bus_speed_changing) {
383 reg = __raw_readl(S5P_CLK_DIV6);
384 reg &= ~S5P_CLKDIV6_ONEDRAM_MASK;
385 reg |= (clkdiv_val[index][8] << S5P_CLKDIV6_ONEDRAM_SHIFT);
386 __raw_writel(reg, S5P_CLK_DIV6);
387
388 do {
389 reg = __raw_readl(S5P_CLKDIV_STAT1);
390 } while (reg & (1 << 15));
391
392 /* Reconfigure DRAM refresh counter value */
393 if (index != L4) {
394 /*
395 * DMC0 : 166Mhz
396 * DMC1 : 200Mhz
397 */
398 s5pv210_set_refresh(DMC0, 166000);
399 s5pv210_set_refresh(DMC1, 200000);
400 } else {
401 /*
402 * DMC0 : 83Mhz
403 * DMC1 : 100Mhz
404 */
405 s5pv210_set_refresh(DMC0, 83000);
406 s5pv210_set_refresh(DMC1, 100000);
407 }
408 }
409
410 if (freqs.new < freqs.old) {
411 /* Voltage down: will be implemented */
412 }
413
414 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
415
416 printk(KERN_DEBUG "Perf changed[L%d]\n", index);
417
418 return 0;
419}
420
421#ifdef CONFIG_PM
Rafael J. Wysocki7ca64e22011-03-10 21:13:05 +0100422static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy)
Jaecheol Lee83efc742010-10-12 09:19:38 +0900423{
424 return 0;
425}
426
427static int s5pv210_cpufreq_resume(struct cpufreq_policy *policy)
428{
429 return 0;
430}
431#endif
432
433static int check_mem_type(void __iomem *dmc_reg)
434{
435 unsigned long val;
436
437 val = __raw_readl(dmc_reg + 0x4);
438 val = (val & (0xf << 8));
439
440 return val >> 8;
441}
442
443static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
444{
445 unsigned long mem_type;
Julia Lawall4911ca12011-06-06 18:59:02 -0700446 int ret;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900447
448 cpu_clk = clk_get(NULL, "armclk");
449 if (IS_ERR(cpu_clk))
450 return PTR_ERR(cpu_clk);
451
452 dmc0_clk = clk_get(NULL, "sclk_dmc0");
453 if (IS_ERR(dmc0_clk)) {
Julia Lawall4911ca12011-06-06 18:59:02 -0700454 ret = PTR_ERR(dmc0_clk);
455 goto out_dmc0;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900456 }
457
458 dmc1_clk = clk_get(NULL, "hclk_msys");
459 if (IS_ERR(dmc1_clk)) {
Julia Lawall4911ca12011-06-06 18:59:02 -0700460 ret = PTR_ERR(dmc1_clk);
461 goto out_dmc1;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900462 }
463
Julia Lawall4911ca12011-06-06 18:59:02 -0700464 if (policy->cpu != 0) {
465 ret = -EINVAL;
466 goto out_dmc1;
467 }
Jaecheol Lee83efc742010-10-12 09:19:38 +0900468
469 /*
470 * check_mem_type : This driver only support LPDDR & LPDDR2.
471 * other memory type is not supported.
472 */
473 mem_type = check_mem_type(S5P_VA_DMC0);
474
475 if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
476 printk(KERN_ERR "CPUFreq doesn't support this memory type\n");
Julia Lawall4911ca12011-06-06 18:59:02 -0700477 ret = -EINVAL;
478 goto out_dmc1;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900479 }
480
481 /* Find current refresh counter and frequency each DMC */
482 s5pv210_dram_conf[0].refresh = (__raw_readl(S5P_VA_DMC0 + 0x30) * 1000);
483 s5pv210_dram_conf[0].freq = clk_get_rate(dmc0_clk);
484
485 s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
486 s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
487
488 policy->cur = policy->min = policy->max = s5pv210_getspeed(0);
489
490 cpufreq_frequency_table_get_attr(s5pv210_freq_table, policy->cpu);
491
492 policy->cpuinfo.transition_latency = 40000;
493
494 return cpufreq_frequency_table_cpuinfo(policy, s5pv210_freq_table);
Julia Lawall4911ca12011-06-06 18:59:02 -0700495
496out_dmc1:
497 clk_put(dmc0_clk);
498out_dmc0:
499 clk_put(cpu_clk);
500 return ret;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900501}
502
503static struct cpufreq_driver s5pv210_driver = {
504 .flags = CPUFREQ_STICKY,
505 .verify = s5pv210_verify_speed,
506 .target = s5pv210_target,
507 .get = s5pv210_getspeed,
508 .init = s5pv210_cpu_init,
509 .name = "s5pv210",
510#ifdef CONFIG_PM
511 .suspend = s5pv210_cpufreq_suspend,
512 .resume = s5pv210_cpufreq_resume,
513#endif
514};
515
516static int __init s5pv210_cpufreq_init(void)
517{
518 return cpufreq_register_driver(&s5pv210_driver);
519}
520
521late_initcall(s5pv210_cpufreq_init);