blob: 0fa1e2d79a5308710ee11cc5dd348e6e3a5e35aa [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/cpufreq.c
2 *
3 * MSM architecture cpufreq driver
4 *
5 * Copyright (C) 2007 Google, Inc.
Vikram Mulukutlabc2e9572011-11-04 03:41:38 -07006 * Copyright (c) 2007-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007 * Author: Mike A. Chan <mikechan@google.com>
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20#include <linux/earlysuspend.h>
21#include <linux/init.h>
Praveen Chidambaram696a5612012-05-25 17:29:11 -060022#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070023#include <linux/cpufreq.h>
24#include <linux/workqueue.h>
25#include <linux/completion.h>
26#include <linux/cpu.h>
27#include <linux/cpumask.h>
28#include <linux/sched.h>
29#include <linux/suspend.h>
Stepan Moskovchenkoaf25dd92011-08-05 18:12:48 -070030#include <mach/socinfo.h>
Praveen Chidambaram696a5612012-05-25 17:29:11 -060031#include <mach/cpufreq.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032
33#include "acpuclock.h"
34
35#ifdef CONFIG_SMP
36struct cpufreq_work_struct {
37 struct work_struct work;
38 struct cpufreq_policy *policy;
39 struct completion complete;
40 int frequency;
41 int status;
42};
43
44static DEFINE_PER_CPU(struct cpufreq_work_struct, cpufreq_work);
45static struct workqueue_struct *msm_cpufreq_wq;
46#endif
47
48struct cpufreq_suspend_t {
49 struct mutex suspend_mutex;
50 int device_suspended;
51};
52
53static DEFINE_PER_CPU(struct cpufreq_suspend_t, cpufreq_suspend);
54
Praveen Chidambaram696a5612012-05-25 17:29:11 -060055struct cpu_freq {
56 uint32_t max;
57 uint32_t min;
58 uint32_t allowed_max;
59 uint32_t allowed_min;
60 uint32_t limits_init;
61};
62
63static DEFINE_PER_CPU(struct cpu_freq, cpu_freq_info);
64
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq)
66{
67 int ret = 0;
68 struct cpufreq_freqs freqs;
Praveen Chidambaram696a5612012-05-25 17:29:11 -060069 struct cpu_freq *limit = &per_cpu(cpu_freq_info, policy->cpu);
70
71 if (limit->limits_init) {
72 if (new_freq > limit->allowed_max) {
73 new_freq = limit->allowed_max;
74 pr_debug("max: limiting freq to %d\n", new_freq);
75 }
76
77 if (new_freq < limit->allowed_min) {
78 new_freq = limit->allowed_min;
79 pr_debug("min: limiting freq to %d\n", new_freq);
80 }
81 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082
83 freqs.old = policy->cur;
David Ng3f76d272012-02-08 10:43:37 -080084 freqs.new = new_freq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085 freqs.cpu = policy->cpu;
86 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
87 ret = acpuclk_set_rate(policy->cpu, new_freq, SETRATE_CPUFREQ);
88 if (!ret)
89 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
90
91 return ret;
92}
93
94#ifdef CONFIG_SMP
95static void set_cpu_work(struct work_struct *work)
96{
97 struct cpufreq_work_struct *cpu_work =
98 container_of(work, struct cpufreq_work_struct, work);
99
100 cpu_work->status = set_cpu_freq(cpu_work->policy, cpu_work->frequency);
101 complete(&cpu_work->complete);
102}
103#endif
104
105static int msm_cpufreq_target(struct cpufreq_policy *policy,
106 unsigned int target_freq,
107 unsigned int relation)
108{
109 int ret = -EFAULT;
110 int index;
111 struct cpufreq_frequency_table *table;
112#ifdef CONFIG_SMP
113 struct cpufreq_work_struct *cpu_work = NULL;
114 cpumask_var_t mask;
115
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700116 if (!cpu_active(policy->cpu)) {
117 pr_info("cpufreq: cpu %d is not active.\n", policy->cpu);
118 return -ENODEV;
119 }
Praveen Chidambaramfdf788b2012-04-03 17:46:09 -0600120
121 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
122 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700123#endif
124
125 mutex_lock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex);
126
127 if (per_cpu(cpufreq_suspend, policy->cpu).device_suspended) {
128 pr_debug("cpufreq: cpu%d scheduling frequency change "
129 "in suspend.\n", policy->cpu);
130 ret = -EFAULT;
131 goto done;
132 }
133
134 table = cpufreq_frequency_get_table(policy->cpu);
135 if (cpufreq_frequency_table_target(policy, table, target_freq, relation,
136 &index)) {
137 pr_err("cpufreq: invalid target_freq: %d\n", target_freq);
138 ret = -EINVAL;
139 goto done;
140 }
141
142#ifdef CONFIG_CPU_FREQ_DEBUG
143 pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n",
144 policy->cpu, target_freq, relation,
145 policy->min, policy->max, table[index].frequency);
146#endif
147
148#ifdef CONFIG_SMP
149 cpu_work = &per_cpu(cpufreq_work, policy->cpu);
150 cpu_work->policy = policy;
151 cpu_work->frequency = table[index].frequency;
152 cpu_work->status = -ENODEV;
153
154 cpumask_clear(mask);
155 cpumask_set_cpu(policy->cpu, mask);
156 if (cpumask_equal(mask, &current->cpus_allowed)) {
157 ret = set_cpu_freq(cpu_work->policy, cpu_work->frequency);
158 goto done;
159 } else {
160 cancel_work_sync(&cpu_work->work);
161 INIT_COMPLETION(cpu_work->complete);
162 queue_work_on(policy->cpu, msm_cpufreq_wq, &cpu_work->work);
163 wait_for_completion(&cpu_work->complete);
164 }
165
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700166 ret = cpu_work->status;
167#else
168 ret = set_cpu_freq(policy, table[index].frequency);
169#endif
170
171done:
Praveen Chidambaramfdf788b2012-04-03 17:46:09 -0600172#ifdef CONFIG_SMP
173 free_cpumask_var(mask);
174#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175 mutex_unlock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex);
176 return ret;
177}
178
179static int msm_cpufreq_verify(struct cpufreq_policy *policy)
180{
181 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
182 policy->cpuinfo.max_freq);
183 return 0;
184}
185
Praveen Chidambaram696a5612012-05-25 17:29:11 -0600186static unsigned int msm_cpufreq_get_freq(unsigned int cpu)
187{
188 return acpuclk_get_rate(cpu);
189}
190
191static inline int msm_cpufreq_limits_init(void)
192{
193 int cpu = 0;
194 int i = 0;
195 struct cpufreq_frequency_table *table = NULL;
196 uint32_t min = (uint32_t) -1;
197 uint32_t max = 0;
198 struct cpu_freq *limit = NULL;
199
200 for_each_possible_cpu(cpu) {
201 limit = &per_cpu(cpu_freq_info, cpu);
202 table = cpufreq_frequency_get_table(cpu);
203 if (table == NULL) {
204 pr_err("%s: error reading cpufreq table for cpu %d\n",
205 __func__, cpu);
206 continue;
207 }
208 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
209 if (table[i].frequency > max)
210 max = table[i].frequency;
211 if (table[i].frequency < min)
212 min = table[i].frequency;
213 }
214 limit->allowed_min = min;
215 limit->allowed_max = max;
216 limit->min = min;
217 limit->max = max;
218 limit->limits_init = 1;
219 }
220
221 return 0;
222}
223
224int msm_cpufreq_set_freq_limits(uint32_t cpu, uint32_t min, uint32_t max)
225{
226 struct cpu_freq *limit = &per_cpu(cpu_freq_info, cpu);
227
228 if (!limit->limits_init)
229 msm_cpufreq_limits_init();
230
231 if ((min != MSM_CPUFREQ_NO_LIMIT) &&
232 min >= limit->min && min <= limit->max)
233 limit->allowed_min = min;
234 else
235 limit->allowed_min = limit->min;
236
237
238 if ((max != MSM_CPUFREQ_NO_LIMIT) &&
239 max <= limit->max && max >= limit->min)
240 limit->allowed_max = max;
241 else
242 limit->allowed_max = limit->max;
243
244 pr_debug("%s: Limiting cpu %d min = %d, max = %d\n",
245 __func__, cpu,
246 limit->allowed_min, limit->allowed_max);
247
248 return 0;
249}
250EXPORT_SYMBOL(msm_cpufreq_set_freq_limits);
251
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700252static int __cpuinit msm_cpufreq_init(struct cpufreq_policy *policy)
253{
254 int cur_freq;
255 int index;
256 struct cpufreq_frequency_table *table;
257#ifdef CONFIG_SMP
258 struct cpufreq_work_struct *cpu_work = NULL;
259#endif
260
Pankaj Kumaref7c3942012-04-03 18:00:52 +0530261
262 table = cpufreq_frequency_get_table(policy->cpu);
263 if (table == NULL)
264 return -ENODEV;
Pankaj Kumar0f263682012-01-05 17:01:57 +0530265 /*
266 * In 8625 both cpu core's frequency can not
267 * be changed independently. Each cpu is bound to
268 * same frequency. Hence set the cpumask to all cpu.
269 */
270 if (cpu_is_msm8625())
271 cpumask_setall(policy->cpus);
272
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700273 if (cpufreq_frequency_table_cpuinfo(policy, table)) {
274#ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX
275 policy->cpuinfo.min_freq = CONFIG_MSM_CPU_FREQ_MIN;
276 policy->cpuinfo.max_freq = CONFIG_MSM_CPU_FREQ_MAX;
277#endif
278 }
279#ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX
280 policy->min = CONFIG_MSM_CPU_FREQ_MIN;
281 policy->max = CONFIG_MSM_CPU_FREQ_MAX;
282#endif
283
284 cur_freq = acpuclk_get_rate(policy->cpu);
285 if (cpufreq_frequency_table_target(policy, table, cur_freq,
Matt Wagantallb31e4682011-10-12 12:50:27 -0700286 CPUFREQ_RELATION_H, &index) &&
287 cpufreq_frequency_table_target(policy, table, cur_freq,
288 CPUFREQ_RELATION_L, &index)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700289 pr_info("cpufreq: cpu%d at invalid freq: %d\n",
290 policy->cpu, cur_freq);
291 return -EINVAL;
292 }
293
294 if (cur_freq != table[index].frequency) {
295 int ret = 0;
296 ret = acpuclk_set_rate(policy->cpu, table[index].frequency,
297 SETRATE_CPUFREQ);
298 if (ret)
299 return ret;
300 pr_info("cpufreq: cpu%d init at %d switching to %d\n",
301 policy->cpu, cur_freq, table[index].frequency);
302 cur_freq = table[index].frequency;
303 }
304
305 policy->cur = cur_freq;
306
307 policy->cpuinfo.transition_latency =
308 acpuclk_get_switch_time() * NSEC_PER_USEC;
309#ifdef CONFIG_SMP
310 cpu_work = &per_cpu(cpufreq_work, policy->cpu);
311 INIT_WORK(&cpu_work->work, set_cpu_work);
312 init_completion(&cpu_work->complete);
313#endif
314
315 return 0;
316}
317
318static int msm_cpufreq_suspend(void)
319{
320 int cpu;
321
322 for_each_possible_cpu(cpu) {
323 mutex_lock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
324 per_cpu(cpufreq_suspend, cpu).device_suspended = 1;
325 mutex_unlock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
326 }
327
328 return NOTIFY_DONE;
329}
330
331static int msm_cpufreq_resume(void)
332{
333 int cpu;
334
335 for_each_possible_cpu(cpu) {
336 per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
337 }
338
339 return NOTIFY_DONE;
340}
341
342static int msm_cpufreq_pm_event(struct notifier_block *this,
343 unsigned long event, void *ptr)
344{
345 switch (event) {
346 case PM_POST_HIBERNATION:
347 case PM_POST_SUSPEND:
348 return msm_cpufreq_resume();
349 case PM_HIBERNATION_PREPARE:
350 case PM_SUSPEND_PREPARE:
351 return msm_cpufreq_suspend();
352 default:
353 return NOTIFY_DONE;
354 }
355}
356
Stepan Moskovchenko5627bb42011-10-13 16:25:41 -0700357static struct freq_attr *msm_freq_attr[] = {
358 &cpufreq_freq_attr_scaling_available_freqs,
359 NULL,
360};
361
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362static struct cpufreq_driver msm_cpufreq_driver = {
363 /* lps calculations are handled here. */
364 .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
365 .init = msm_cpufreq_init,
366 .verify = msm_cpufreq_verify,
367 .target = msm_cpufreq_target,
Praveen Chidambaram696a5612012-05-25 17:29:11 -0600368 .get = msm_cpufreq_get_freq,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369 .name = "msm",
Stepan Moskovchenko5627bb42011-10-13 16:25:41 -0700370 .attr = msm_freq_attr,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700371};
372
373static struct notifier_block msm_cpufreq_pm_notifier = {
374 .notifier_call = msm_cpufreq_pm_event,
375};
376
377static int __init msm_cpufreq_register(void)
378{
379 int cpu;
380
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700381 for_each_possible_cpu(cpu) {
382 mutex_init(&(per_cpu(cpufreq_suspend, cpu).suspend_mutex));
383 per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
384 }
385
386#ifdef CONFIG_SMP
387 msm_cpufreq_wq = create_workqueue("msm-cpufreq");
388#endif
389
390 register_pm_notifier(&msm_cpufreq_pm_notifier);
391 return cpufreq_register_driver(&msm_cpufreq_driver);
392}
393
394late_initcall(msm_cpufreq_register);