blob: 05bd56ef4b6c5a70ed14b5fca92f0e264ec55aed [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/cpufreq.c
2 *
3 * MSM architecture cpufreq driver
4 *
5 * Copyright (C) 2007 Google, Inc.
Vikram Mulukutlabc2e9572011-11-04 03:41:38 -07006 * Copyright (c) 2007-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007 * Author: Mike A. Chan <mikechan@google.com>
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20#include <linux/earlysuspend.h>
21#include <linux/init.h>
Praveen Chidambaram696a5612012-05-25 17:29:11 -060022#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070023#include <linux/cpufreq.h>
24#include <linux/workqueue.h>
25#include <linux/completion.h>
26#include <linux/cpu.h>
27#include <linux/cpumask.h>
28#include <linux/sched.h>
29#include <linux/suspend.h>
Stepan Moskovchenkoaf25dd92011-08-05 18:12:48 -070030#include <mach/socinfo.h>
Praveen Chidambaram696a5612012-05-25 17:29:11 -060031#include <mach/cpufreq.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032
33#include "acpuclock.h"
34
35#ifdef CONFIG_SMP
36struct cpufreq_work_struct {
37 struct work_struct work;
38 struct cpufreq_policy *policy;
39 struct completion complete;
40 int frequency;
41 int status;
42};
43
44static DEFINE_PER_CPU(struct cpufreq_work_struct, cpufreq_work);
45static struct workqueue_struct *msm_cpufreq_wq;
46#endif
47
48struct cpufreq_suspend_t {
49 struct mutex suspend_mutex;
50 int device_suspended;
51};
52
53static DEFINE_PER_CPU(struct cpufreq_suspend_t, cpufreq_suspend);
54
Praveen Chidambaram696a5612012-05-25 17:29:11 -060055struct cpu_freq {
56 uint32_t max;
57 uint32_t min;
58 uint32_t allowed_max;
59 uint32_t allowed_min;
60 uint32_t limits_init;
61};
62
63static DEFINE_PER_CPU(struct cpu_freq, cpu_freq_info);
64
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq)
66{
67 int ret = 0;
68 struct cpufreq_freqs freqs;
Praveen Chidambaram696a5612012-05-25 17:29:11 -060069 struct cpu_freq *limit = &per_cpu(cpu_freq_info, policy->cpu);
70
71 if (limit->limits_init) {
72 if (new_freq > limit->allowed_max) {
73 new_freq = limit->allowed_max;
74 pr_debug("max: limiting freq to %d\n", new_freq);
75 }
76
77 if (new_freq < limit->allowed_min) {
78 new_freq = limit->allowed_min;
79 pr_debug("min: limiting freq to %d\n", new_freq);
80 }
81 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082
83 freqs.old = policy->cur;
David Ng3f76d272012-02-08 10:43:37 -080084 freqs.new = new_freq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085 freqs.cpu = policy->cpu;
86 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
87 ret = acpuclk_set_rate(policy->cpu, new_freq, SETRATE_CPUFREQ);
88 if (!ret)
89 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
90
91 return ret;
92}
93
94#ifdef CONFIG_SMP
Narayanan Gopalakrishnan4f5e7132012-07-17 16:07:50 -070095static int __cpuinit msm_cpufreq_cpu_callback(struct notifier_block *nfb,
96 unsigned long action, void *hcpu)
97{
98 unsigned int cpu = (unsigned long)hcpu;
99
100 switch (action) {
101 case CPU_ONLINE:
102 case CPU_ONLINE_FROZEN:
103 per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
104 break;
105 case CPU_DOWN_PREPARE:
106 case CPU_DOWN_PREPARE_FROZEN:
107 mutex_lock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
108 per_cpu(cpufreq_suspend, cpu).device_suspended = 1;
109 mutex_unlock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
110 break;
111 case CPU_DOWN_FAILED:
112 case CPU_DOWN_FAILED_FROZEN:
113 per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
114 break;
115 }
116 return NOTIFY_OK;
117}
118
119static struct notifier_block __refdata msm_cpufreq_cpu_notifier = {
120 .notifier_call = msm_cpufreq_cpu_callback,
121};
122
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700123static void set_cpu_work(struct work_struct *work)
124{
125 struct cpufreq_work_struct *cpu_work =
126 container_of(work, struct cpufreq_work_struct, work);
127
128 cpu_work->status = set_cpu_freq(cpu_work->policy, cpu_work->frequency);
129 complete(&cpu_work->complete);
130}
131#endif
132
133static int msm_cpufreq_target(struct cpufreq_policy *policy,
134 unsigned int target_freq,
135 unsigned int relation)
136{
137 int ret = -EFAULT;
138 int index;
139 struct cpufreq_frequency_table *table;
140#ifdef CONFIG_SMP
141 struct cpufreq_work_struct *cpu_work = NULL;
142 cpumask_var_t mask;
143
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144 if (!cpu_active(policy->cpu)) {
145 pr_info("cpufreq: cpu %d is not active.\n", policy->cpu);
146 return -ENODEV;
147 }
Praveen Chidambaramfdf788b2012-04-03 17:46:09 -0600148
149 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
150 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700151#endif
152
153 mutex_lock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex);
154
155 if (per_cpu(cpufreq_suspend, policy->cpu).device_suspended) {
156 pr_debug("cpufreq: cpu%d scheduling frequency change "
157 "in suspend.\n", policy->cpu);
158 ret = -EFAULT;
159 goto done;
160 }
161
162 table = cpufreq_frequency_get_table(policy->cpu);
163 if (cpufreq_frequency_table_target(policy, table, target_freq, relation,
164 &index)) {
165 pr_err("cpufreq: invalid target_freq: %d\n", target_freq);
166 ret = -EINVAL;
167 goto done;
168 }
169
170#ifdef CONFIG_CPU_FREQ_DEBUG
171 pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n",
172 policy->cpu, target_freq, relation,
173 policy->min, policy->max, table[index].frequency);
174#endif
175
176#ifdef CONFIG_SMP
177 cpu_work = &per_cpu(cpufreq_work, policy->cpu);
178 cpu_work->policy = policy;
179 cpu_work->frequency = table[index].frequency;
180 cpu_work->status = -ENODEV;
181
182 cpumask_clear(mask);
183 cpumask_set_cpu(policy->cpu, mask);
184 if (cpumask_equal(mask, &current->cpus_allowed)) {
185 ret = set_cpu_freq(cpu_work->policy, cpu_work->frequency);
186 goto done;
187 } else {
188 cancel_work_sync(&cpu_work->work);
189 INIT_COMPLETION(cpu_work->complete);
190 queue_work_on(policy->cpu, msm_cpufreq_wq, &cpu_work->work);
191 wait_for_completion(&cpu_work->complete);
192 }
193
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194 ret = cpu_work->status;
195#else
196 ret = set_cpu_freq(policy, table[index].frequency);
197#endif
198
199done:
Praveen Chidambaramfdf788b2012-04-03 17:46:09 -0600200#ifdef CONFIG_SMP
201 free_cpumask_var(mask);
202#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700203 mutex_unlock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex);
204 return ret;
205}
206
207static int msm_cpufreq_verify(struct cpufreq_policy *policy)
208{
209 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
210 policy->cpuinfo.max_freq);
211 return 0;
212}
213
Praveen Chidambaram696a5612012-05-25 17:29:11 -0600214static unsigned int msm_cpufreq_get_freq(unsigned int cpu)
215{
216 return acpuclk_get_rate(cpu);
217}
218
219static inline int msm_cpufreq_limits_init(void)
220{
221 int cpu = 0;
222 int i = 0;
223 struct cpufreq_frequency_table *table = NULL;
224 uint32_t min = (uint32_t) -1;
225 uint32_t max = 0;
226 struct cpu_freq *limit = NULL;
227
228 for_each_possible_cpu(cpu) {
229 limit = &per_cpu(cpu_freq_info, cpu);
230 table = cpufreq_frequency_get_table(cpu);
231 if (table == NULL) {
232 pr_err("%s: error reading cpufreq table for cpu %d\n",
233 __func__, cpu);
234 continue;
235 }
236 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
237 if (table[i].frequency > max)
238 max = table[i].frequency;
239 if (table[i].frequency < min)
240 min = table[i].frequency;
241 }
242 limit->allowed_min = min;
243 limit->allowed_max = max;
244 limit->min = min;
245 limit->max = max;
246 limit->limits_init = 1;
247 }
248
249 return 0;
250}
251
252int msm_cpufreq_set_freq_limits(uint32_t cpu, uint32_t min, uint32_t max)
253{
254 struct cpu_freq *limit = &per_cpu(cpu_freq_info, cpu);
255
256 if (!limit->limits_init)
257 msm_cpufreq_limits_init();
258
259 if ((min != MSM_CPUFREQ_NO_LIMIT) &&
260 min >= limit->min && min <= limit->max)
261 limit->allowed_min = min;
262 else
263 limit->allowed_min = limit->min;
264
265
266 if ((max != MSM_CPUFREQ_NO_LIMIT) &&
267 max <= limit->max && max >= limit->min)
268 limit->allowed_max = max;
269 else
270 limit->allowed_max = limit->max;
271
272 pr_debug("%s: Limiting cpu %d min = %d, max = %d\n",
273 __func__, cpu,
274 limit->allowed_min, limit->allowed_max);
275
276 return 0;
277}
278EXPORT_SYMBOL(msm_cpufreq_set_freq_limits);
279
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700280static int __cpuinit msm_cpufreq_init(struct cpufreq_policy *policy)
281{
282 int cur_freq;
283 int index;
284 struct cpufreq_frequency_table *table;
285#ifdef CONFIG_SMP
286 struct cpufreq_work_struct *cpu_work = NULL;
287#endif
288
Pankaj Kumaref7c3942012-04-03 18:00:52 +0530289
290 table = cpufreq_frequency_get_table(policy->cpu);
291 if (table == NULL)
292 return -ENODEV;
Pankaj Kumar0f263682012-01-05 17:01:57 +0530293 /*
294 * In 8625 both cpu core's frequency can not
295 * be changed independently. Each cpu is bound to
296 * same frequency. Hence set the cpumask to all cpu.
297 */
298 if (cpu_is_msm8625())
299 cpumask_setall(policy->cpus);
300
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301 if (cpufreq_frequency_table_cpuinfo(policy, table)) {
302#ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX
303 policy->cpuinfo.min_freq = CONFIG_MSM_CPU_FREQ_MIN;
304 policy->cpuinfo.max_freq = CONFIG_MSM_CPU_FREQ_MAX;
305#endif
306 }
307#ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX
308 policy->min = CONFIG_MSM_CPU_FREQ_MIN;
309 policy->max = CONFIG_MSM_CPU_FREQ_MAX;
310#endif
311
312 cur_freq = acpuclk_get_rate(policy->cpu);
313 if (cpufreq_frequency_table_target(policy, table, cur_freq,
Matt Wagantallb31e4682011-10-12 12:50:27 -0700314 CPUFREQ_RELATION_H, &index) &&
315 cpufreq_frequency_table_target(policy, table, cur_freq,
316 CPUFREQ_RELATION_L, &index)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700317 pr_info("cpufreq: cpu%d at invalid freq: %d\n",
318 policy->cpu, cur_freq);
319 return -EINVAL;
320 }
321
322 if (cur_freq != table[index].frequency) {
323 int ret = 0;
324 ret = acpuclk_set_rate(policy->cpu, table[index].frequency,
325 SETRATE_CPUFREQ);
326 if (ret)
327 return ret;
328 pr_info("cpufreq: cpu%d init at %d switching to %d\n",
329 policy->cpu, cur_freq, table[index].frequency);
330 cur_freq = table[index].frequency;
331 }
332
333 policy->cur = cur_freq;
334
335 policy->cpuinfo.transition_latency =
336 acpuclk_get_switch_time() * NSEC_PER_USEC;
337#ifdef CONFIG_SMP
338 cpu_work = &per_cpu(cpufreq_work, policy->cpu);
339 INIT_WORK(&cpu_work->work, set_cpu_work);
340 init_completion(&cpu_work->complete);
341#endif
342
343 return 0;
344}
345
346static int msm_cpufreq_suspend(void)
347{
348 int cpu;
349
350 for_each_possible_cpu(cpu) {
351 mutex_lock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
352 per_cpu(cpufreq_suspend, cpu).device_suspended = 1;
353 mutex_unlock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
354 }
355
356 return NOTIFY_DONE;
357}
358
359static int msm_cpufreq_resume(void)
360{
361 int cpu;
362
363 for_each_possible_cpu(cpu) {
364 per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
365 }
366
367 return NOTIFY_DONE;
368}
369
370static int msm_cpufreq_pm_event(struct notifier_block *this,
371 unsigned long event, void *ptr)
372{
373 switch (event) {
374 case PM_POST_HIBERNATION:
375 case PM_POST_SUSPEND:
376 return msm_cpufreq_resume();
377 case PM_HIBERNATION_PREPARE:
378 case PM_SUSPEND_PREPARE:
379 return msm_cpufreq_suspend();
380 default:
381 return NOTIFY_DONE;
382 }
383}
384
Stepan Moskovchenko5627bb42011-10-13 16:25:41 -0700385static struct freq_attr *msm_freq_attr[] = {
386 &cpufreq_freq_attr_scaling_available_freqs,
387 NULL,
388};
389
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390static struct cpufreq_driver msm_cpufreq_driver = {
391 /* lps calculations are handled here. */
392 .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
393 .init = msm_cpufreq_init,
394 .verify = msm_cpufreq_verify,
395 .target = msm_cpufreq_target,
Praveen Chidambaram696a5612012-05-25 17:29:11 -0600396 .get = msm_cpufreq_get_freq,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700397 .name = "msm",
Stepan Moskovchenko5627bb42011-10-13 16:25:41 -0700398 .attr = msm_freq_attr,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700399};
400
401static struct notifier_block msm_cpufreq_pm_notifier = {
402 .notifier_call = msm_cpufreq_pm_event,
403};
404
405static int __init msm_cpufreq_register(void)
406{
407 int cpu;
408
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700409 for_each_possible_cpu(cpu) {
410 mutex_init(&(per_cpu(cpufreq_suspend, cpu).suspend_mutex));
411 per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
412 }
413
414#ifdef CONFIG_SMP
415 msm_cpufreq_wq = create_workqueue("msm-cpufreq");
Narayanan Gopalakrishnan4f5e7132012-07-17 16:07:50 -0700416 register_hotcpu_notifier(&msm_cpufreq_cpu_notifier);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700417#endif
418
419 register_pm_notifier(&msm_cpufreq_pm_notifier);
420 return cpufreq_register_driver(&msm_cpufreq_driver);
421}
422
423late_initcall(msm_cpufreq_register);