blob: f270335653cdf9799f08b71f881400203565d6ce [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/cpufreq.c
2 *
3 * MSM architecture cpufreq driver
4 *
5 * Copyright (C) 2007 Google, Inc.
Duy Truonge833aca2013-02-12 13:35:08 -08006 * Copyright (c) 2007-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007 * Author: Mike A. Chan <mikechan@google.com>
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20#include <linux/earlysuspend.h>
21#include <linux/init.h>
Praveen Chidambaram696a5612012-05-25 17:29:11 -060022#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070023#include <linux/cpufreq.h>
24#include <linux/workqueue.h>
25#include <linux/completion.h>
26#include <linux/cpu.h>
27#include <linux/cpumask.h>
28#include <linux/sched.h>
29#include <linux/suspend.h>
Stepan Moskovchenkoaf25dd92011-08-05 18:12:48 -070030#include <mach/socinfo.h>
Praveen Chidambaram696a5612012-05-25 17:29:11 -060031#include <mach/cpufreq.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032
33#include "acpuclock.h"
34
Praveen Chidambaram241ded32013-03-11 14:50:06 -060035struct cpufreq_work_struct {
36 struct work_struct work;
37 struct cpufreq_policy *policy;
38 struct completion complete;
39 int frequency;
40 int status;
41};
42
43static DEFINE_PER_CPU(struct cpufreq_work_struct, cpufreq_work);
44static struct workqueue_struct *msm_cpufreq_wq;
45
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046struct cpufreq_suspend_t {
47 struct mutex suspend_mutex;
48 int device_suspended;
49};
50
51static DEFINE_PER_CPU(struct cpufreq_suspend_t, cpufreq_suspend);
52
Praveen Chidambaram696a5612012-05-25 17:29:11 -060053struct cpu_freq {
54 uint32_t max;
55 uint32_t min;
56 uint32_t allowed_max;
57 uint32_t allowed_min;
58 uint32_t limits_init;
59};
60
61static DEFINE_PER_CPU(struct cpu_freq, cpu_freq_info);
62
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq)
64{
65 int ret = 0;
Narayanan Gopalakrishnan85e433b2012-11-01 10:08:55 -070066 int saved_sched_policy = -EINVAL;
67 int saved_sched_rt_prio = -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068 struct cpufreq_freqs freqs;
Praveen Chidambaram696a5612012-05-25 17:29:11 -060069 struct cpu_freq *limit = &per_cpu(cpu_freq_info, policy->cpu);
Narayanan Gopalakrishnan85e433b2012-11-01 10:08:55 -070070 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
Praveen Chidambaram696a5612012-05-25 17:29:11 -060071
72 if (limit->limits_init) {
73 if (new_freq > limit->allowed_max) {
74 new_freq = limit->allowed_max;
75 pr_debug("max: limiting freq to %d\n", new_freq);
76 }
77
78 if (new_freq < limit->allowed_min) {
79 new_freq = limit->allowed_min;
80 pr_debug("min: limiting freq to %d\n", new_freq);
81 }
82 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070083
84 freqs.old = policy->cur;
David Ng3f76d272012-02-08 10:43:37 -080085 freqs.new = new_freq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086 freqs.cpu = policy->cpu;
Narayanan Gopalakrishnan85e433b2012-11-01 10:08:55 -070087
88 /*
89 * Put the caller into SCHED_FIFO priority to avoid cpu starvation
90 * in the acpuclk_set_rate path while increasing frequencies
91 */
92
93 if (freqs.new > freqs.old && current->policy != SCHED_FIFO) {
94 saved_sched_policy = current->policy;
95 saved_sched_rt_prio = current->rt_priority;
96 sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
97 }
98
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070099 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
Narayanan Gopalakrishnan85e433b2012-11-01 10:08:55 -0700100
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101 ret = acpuclk_set_rate(policy->cpu, new_freq, SETRATE_CPUFREQ);
102 if (!ret)
103 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
104
Narayanan Gopalakrishnan85e433b2012-11-01 10:08:55 -0700105 /* Restore priority after clock ramp-up */
106 if (freqs.new > freqs.old && saved_sched_policy >= 0) {
107 param.sched_priority = saved_sched_rt_prio;
108 sched_setscheduler_nocheck(current, saved_sched_policy, &param);
109 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700110 return ret;
111}
112
Praveen Chidambaram241ded32013-03-11 14:50:06 -0600113static void set_cpu_work(struct work_struct *work)
114{
115 struct cpufreq_work_struct *cpu_work =
116 container_of(work, struct cpufreq_work_struct, work);
117
118 cpu_work->status = set_cpu_freq(cpu_work->policy, cpu_work->frequency);
119 complete(&cpu_work->complete);
120}
121
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122static int msm_cpufreq_target(struct cpufreq_policy *policy,
123 unsigned int target_freq,
124 unsigned int relation)
125{
126 int ret = -EFAULT;
127 int index;
128 struct cpufreq_frequency_table *table;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700129
Praveen Chidambaram241ded32013-03-11 14:50:06 -0600130 struct cpufreq_work_struct *cpu_work = NULL;
131 cpumask_var_t mask;
132
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133 if (!cpu_active(policy->cpu)) {
134 pr_info("cpufreq: cpu %d is not active.\n", policy->cpu);
135 return -ENODEV;
136 }
Praveen Chidambaramfdf788b2012-04-03 17:46:09 -0600137
Praveen Chidambaram241ded32013-03-11 14:50:06 -0600138 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
139 return -ENOMEM;
140
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141 mutex_lock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex);
142
143 if (per_cpu(cpufreq_suspend, policy->cpu).device_suspended) {
144 pr_debug("cpufreq: cpu%d scheduling frequency change "
145 "in suspend.\n", policy->cpu);
146 ret = -EFAULT;
147 goto done;
148 }
149
150 table = cpufreq_frequency_get_table(policy->cpu);
151 if (cpufreq_frequency_table_target(policy, table, target_freq, relation,
152 &index)) {
153 pr_err("cpufreq: invalid target_freq: %d\n", target_freq);
154 ret = -EINVAL;
155 goto done;
156 }
157
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700158 pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n",
159 policy->cpu, target_freq, relation,
160 policy->min, policy->max, table[index].frequency);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700161
Praveen Chidambaram241ded32013-03-11 14:50:06 -0600162 cpu_work = &per_cpu(cpufreq_work, policy->cpu);
163 cpu_work->policy = policy;
164 cpu_work->frequency = table[index].frequency;
165 cpu_work->status = -ENODEV;
166
167 cpumask_clear(mask);
168 cpumask_set_cpu(policy->cpu, mask);
169 if (cpumask_equal(mask, &current->cpus_allowed)) {
170 ret = set_cpu_freq(cpu_work->policy, cpu_work->frequency);
171 goto done;
172 } else {
173 cancel_work_sync(&cpu_work->work);
174 INIT_COMPLETION(cpu_work->complete);
175 queue_work_on(policy->cpu, msm_cpufreq_wq, &cpu_work->work);
176 wait_for_completion(&cpu_work->complete);
177 }
178
179 ret = cpu_work->status;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180
181done:
Praveen Chidambaram241ded32013-03-11 14:50:06 -0600182 free_cpumask_var(mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183 mutex_unlock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex);
184 return ret;
185}
186
187static int msm_cpufreq_verify(struct cpufreq_policy *policy)
188{
189 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
190 policy->cpuinfo.max_freq);
191 return 0;
192}
193
Praveen Chidambaram696a5612012-05-25 17:29:11 -0600194static unsigned int msm_cpufreq_get_freq(unsigned int cpu)
195{
196 return acpuclk_get_rate(cpu);
197}
198
199static inline int msm_cpufreq_limits_init(void)
200{
201 int cpu = 0;
202 int i = 0;
203 struct cpufreq_frequency_table *table = NULL;
204 uint32_t min = (uint32_t) -1;
205 uint32_t max = 0;
206 struct cpu_freq *limit = NULL;
207
208 for_each_possible_cpu(cpu) {
209 limit = &per_cpu(cpu_freq_info, cpu);
210 table = cpufreq_frequency_get_table(cpu);
211 if (table == NULL) {
212 pr_err("%s: error reading cpufreq table for cpu %d\n",
213 __func__, cpu);
214 continue;
215 }
216 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
217 if (table[i].frequency > max)
218 max = table[i].frequency;
219 if (table[i].frequency < min)
220 min = table[i].frequency;
221 }
222 limit->allowed_min = min;
223 limit->allowed_max = max;
224 limit->min = min;
225 limit->max = max;
226 limit->limits_init = 1;
227 }
228
229 return 0;
230}
231
232int msm_cpufreq_set_freq_limits(uint32_t cpu, uint32_t min, uint32_t max)
233{
234 struct cpu_freq *limit = &per_cpu(cpu_freq_info, cpu);
235
236 if (!limit->limits_init)
237 msm_cpufreq_limits_init();
238
239 if ((min != MSM_CPUFREQ_NO_LIMIT) &&
240 min >= limit->min && min <= limit->max)
241 limit->allowed_min = min;
242 else
243 limit->allowed_min = limit->min;
244
245
246 if ((max != MSM_CPUFREQ_NO_LIMIT) &&
247 max <= limit->max && max >= limit->min)
248 limit->allowed_max = max;
249 else
250 limit->allowed_max = limit->max;
251
252 pr_debug("%s: Limiting cpu %d min = %d, max = %d\n",
253 __func__, cpu,
254 limit->allowed_min, limit->allowed_max);
255
256 return 0;
257}
258EXPORT_SYMBOL(msm_cpufreq_set_freq_limits);
259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260static int __cpuinit msm_cpufreq_init(struct cpufreq_policy *policy)
261{
262 int cur_freq;
263 int index;
264 struct cpufreq_frequency_table *table;
Praveen Chidambaram241ded32013-03-11 14:50:06 -0600265 struct cpufreq_work_struct *cpu_work = NULL;
Pankaj Kumaref7c3942012-04-03 18:00:52 +0530266
267 table = cpufreq_frequency_get_table(policy->cpu);
268 if (table == NULL)
269 return -ENODEV;
Pankaj Kumar0f263682012-01-05 17:01:57 +0530270 /*
271 * In 8625 both cpu core's frequency can not
272 * be changed independently. Each cpu is bound to
273 * same frequency. Hence set the cpumask to all cpu.
274 */
275 if (cpu_is_msm8625())
276 cpumask_setall(policy->cpus);
277
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278 if (cpufreq_frequency_table_cpuinfo(policy, table)) {
279#ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX
280 policy->cpuinfo.min_freq = CONFIG_MSM_CPU_FREQ_MIN;
281 policy->cpuinfo.max_freq = CONFIG_MSM_CPU_FREQ_MAX;
282#endif
283 }
284#ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX
285 policy->min = CONFIG_MSM_CPU_FREQ_MIN;
286 policy->max = CONFIG_MSM_CPU_FREQ_MAX;
287#endif
288
289 cur_freq = acpuclk_get_rate(policy->cpu);
290 if (cpufreq_frequency_table_target(policy, table, cur_freq,
Matt Wagantallb31e4682011-10-12 12:50:27 -0700291 CPUFREQ_RELATION_H, &index) &&
292 cpufreq_frequency_table_target(policy, table, cur_freq,
293 CPUFREQ_RELATION_L, &index)) {
Praveen Chidambaram241ded32013-03-11 14:50:06 -0600294 pr_info("cpufreq: cpu%d at invalid freq: %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295 policy->cpu, cur_freq);
296 return -EINVAL;
297 }
298
299 if (cur_freq != table[index].frequency) {
300 int ret = 0;
301 ret = acpuclk_set_rate(policy->cpu, table[index].frequency,
302 SETRATE_CPUFREQ);
303 if (ret)
304 return ret;
305 pr_info("cpufreq: cpu%d init at %d switching to %d\n",
306 policy->cpu, cur_freq, table[index].frequency);
307 cur_freq = table[index].frequency;
308 }
309
310 policy->cur = cur_freq;
311
312 policy->cpuinfo.transition_latency =
313 acpuclk_get_switch_time() * NSEC_PER_USEC;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700314
Praveen Chidambaram241ded32013-03-11 14:50:06 -0600315 cpu_work = &per_cpu(cpufreq_work, policy->cpu);
316 INIT_WORK(&cpu_work->work, set_cpu_work);
317 init_completion(&cpu_work->complete);
318
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319 return 0;
320}
321
Praveen Chidambaramaa75efa2012-09-18 13:42:40 -0600322static int __cpuinit msm_cpufreq_cpu_callback(struct notifier_block *nfb,
323 unsigned long action, void *hcpu)
324{
325 unsigned int cpu = (unsigned long)hcpu;
326
327 switch (action) {
328 case CPU_ONLINE:
329 case CPU_ONLINE_FROZEN:
330 per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
331 break;
332 case CPU_DOWN_PREPARE:
333 case CPU_DOWN_PREPARE_FROZEN:
334 mutex_lock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
335 per_cpu(cpufreq_suspend, cpu).device_suspended = 1;
336 mutex_unlock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
337 break;
338 case CPU_DOWN_FAILED:
339 case CPU_DOWN_FAILED_FROZEN:
340 per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
341 break;
342 }
343
344 return NOTIFY_OK;
345}
346
347static struct notifier_block __refdata msm_cpufreq_cpu_notifier = {
348 .notifier_call = msm_cpufreq_cpu_callback,
349};
350
Anji Jonnalab2408f42012-12-13 14:03:54 +0530351/*
352 * Define suspend/resume for cpufreq_driver. Kernel will call
353 * these during suspend/resume with interrupts disabled. This
354 * helps the suspend/resume variable get's updated before cpufreq
355 * governor tries to change the frequency after coming out of suspend.
356 */
357static int msm_cpufreq_suspend(struct cpufreq_policy *policy)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358{
359 int cpu;
360
361 for_each_possible_cpu(cpu) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362 per_cpu(cpufreq_suspend, cpu).device_suspended = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700363 }
364
Anji Jonnalab2408f42012-12-13 14:03:54 +0530365 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366}
367
Anji Jonnalab2408f42012-12-13 14:03:54 +0530368static int msm_cpufreq_resume(struct cpufreq_policy *policy)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369{
370 int cpu;
371
372 for_each_possible_cpu(cpu) {
373 per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
374 }
375
Anji Jonnalab2408f42012-12-13 14:03:54 +0530376 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377}
378
Stepan Moskovchenko5627bb42011-10-13 16:25:41 -0700379static struct freq_attr *msm_freq_attr[] = {
380 &cpufreq_freq_attr_scaling_available_freqs,
381 NULL,
382};
383
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384static struct cpufreq_driver msm_cpufreq_driver = {
385 /* lps calculations are handled here. */
386 .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
387 .init = msm_cpufreq_init,
388 .verify = msm_cpufreq_verify,
389 .target = msm_cpufreq_target,
Praveen Chidambaram696a5612012-05-25 17:29:11 -0600390 .get = msm_cpufreq_get_freq,
Anji Jonnalab2408f42012-12-13 14:03:54 +0530391 .suspend = msm_cpufreq_suspend,
392 .resume = msm_cpufreq_resume,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393 .name = "msm",
Stepan Moskovchenko5627bb42011-10-13 16:25:41 -0700394 .attr = msm_freq_attr,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395};
396
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700397static int __init msm_cpufreq_register(void)
398{
399 int cpu;
400
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700401 for_each_possible_cpu(cpu) {
402 mutex_init(&(per_cpu(cpufreq_suspend, cpu).suspend_mutex));
403 per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
404 }
405
Praveen Chidambaram241ded32013-03-11 14:50:06 -0600406 msm_cpufreq_wq = create_workqueue("msm-cpufreq");
Narayanan Gopalakrishnan4f5e7132012-07-17 16:07:50 -0700407 register_hotcpu_notifier(&msm_cpufreq_cpu_notifier);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700409 return cpufreq_register_driver(&msm_cpufreq_driver);
410}
411
412late_initcall(msm_cpufreq_register);