blob: 99dfe11f1377788be44d061a0b3bfad078d72ca2 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/cpufreq.c
2 *
3 * MSM architecture cpufreq driver
4 *
5 * Copyright (C) 2007 Google, Inc.
6 * Copyright (c) 2007-2010, Code Aurora Forum. All rights reserved.
7 * Author: Mike A. Chan <mikechan@google.com>
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20#include <linux/earlysuspend.h>
21#include <linux/init.h>
22#include <linux/cpufreq.h>
23#include <linux/workqueue.h>
24#include <linux/completion.h>
25#include <linux/cpu.h>
26#include <linux/cpumask.h>
27#include <linux/sched.h>
28#include <linux/suspend.h>
29
30#include "acpuclock.h"
31
32#ifdef CONFIG_SMP
33struct cpufreq_work_struct {
34 struct work_struct work;
35 struct cpufreq_policy *policy;
36 struct completion complete;
37 int frequency;
38 int status;
39};
40
41static DEFINE_PER_CPU(struct cpufreq_work_struct, cpufreq_work);
42static struct workqueue_struct *msm_cpufreq_wq;
43#endif
44
45struct cpufreq_suspend_t {
46 struct mutex suspend_mutex;
47 int device_suspended;
48};
49
50static DEFINE_PER_CPU(struct cpufreq_suspend_t, cpufreq_suspend);
51
David Ngc79a2e02011-03-26 06:13:32 -070052static int override_cpu;
53
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq)
55{
56 int ret = 0;
57 struct cpufreq_freqs freqs;
58
59 freqs.old = policy->cur;
David Ngc79a2e02011-03-26 06:13:32 -070060 if (override_cpu) {
61 if (policy->cur == policy->max)
62 return 0;
63 else
64 freqs.new = policy->max;
65 } else
66 freqs.new = new_freq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067 freqs.cpu = policy->cpu;
68 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
69 ret = acpuclk_set_rate(policy->cpu, new_freq, SETRATE_CPUFREQ);
70 if (!ret)
71 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
72
73 return ret;
74}
75
76#ifdef CONFIG_SMP
77static void set_cpu_work(struct work_struct *work)
78{
79 struct cpufreq_work_struct *cpu_work =
80 container_of(work, struct cpufreq_work_struct, work);
81
82 cpu_work->status = set_cpu_freq(cpu_work->policy, cpu_work->frequency);
83 complete(&cpu_work->complete);
84}
85#endif
86
87static int msm_cpufreq_target(struct cpufreq_policy *policy,
88 unsigned int target_freq,
89 unsigned int relation)
90{
91 int ret = -EFAULT;
92 int index;
93 struct cpufreq_frequency_table *table;
94#ifdef CONFIG_SMP
95 struct cpufreq_work_struct *cpu_work = NULL;
96 cpumask_var_t mask;
97
98 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
99 return -ENOMEM;
100
101 if (!cpu_active(policy->cpu)) {
102 pr_info("cpufreq: cpu %d is not active.\n", policy->cpu);
103 return -ENODEV;
104 }
105#endif
106
107 mutex_lock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex);
108
109 if (per_cpu(cpufreq_suspend, policy->cpu).device_suspended) {
110 pr_debug("cpufreq: cpu%d scheduling frequency change "
111 "in suspend.\n", policy->cpu);
112 ret = -EFAULT;
113 goto done;
114 }
115
116 table = cpufreq_frequency_get_table(policy->cpu);
117 if (cpufreq_frequency_table_target(policy, table, target_freq, relation,
118 &index)) {
119 pr_err("cpufreq: invalid target_freq: %d\n", target_freq);
120 ret = -EINVAL;
121 goto done;
122 }
123
124#ifdef CONFIG_CPU_FREQ_DEBUG
125 pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n",
126 policy->cpu, target_freq, relation,
127 policy->min, policy->max, table[index].frequency);
128#endif
129
130#ifdef CONFIG_SMP
131 cpu_work = &per_cpu(cpufreq_work, policy->cpu);
132 cpu_work->policy = policy;
133 cpu_work->frequency = table[index].frequency;
134 cpu_work->status = -ENODEV;
135
136 cpumask_clear(mask);
137 cpumask_set_cpu(policy->cpu, mask);
138 if (cpumask_equal(mask, &current->cpus_allowed)) {
139 ret = set_cpu_freq(cpu_work->policy, cpu_work->frequency);
140 goto done;
141 } else {
142 cancel_work_sync(&cpu_work->work);
143 INIT_COMPLETION(cpu_work->complete);
144 queue_work_on(policy->cpu, msm_cpufreq_wq, &cpu_work->work);
145 wait_for_completion(&cpu_work->complete);
146 }
147
148 free_cpumask_var(mask);
149 ret = cpu_work->status;
150#else
151 ret = set_cpu_freq(policy, table[index].frequency);
152#endif
153
154done:
155 mutex_unlock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex);
156 return ret;
157}
158
159static int msm_cpufreq_verify(struct cpufreq_policy *policy)
160{
161 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
162 policy->cpuinfo.max_freq);
163 return 0;
164}
165
166static int __cpuinit msm_cpufreq_init(struct cpufreq_policy *policy)
167{
168 int cur_freq;
169 int index;
170 struct cpufreq_frequency_table *table;
171#ifdef CONFIG_SMP
172 struct cpufreq_work_struct *cpu_work = NULL;
173#endif
174
175 table = cpufreq_frequency_get_table(policy->cpu);
176 if (cpufreq_frequency_table_cpuinfo(policy, table)) {
177#ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX
178 policy->cpuinfo.min_freq = CONFIG_MSM_CPU_FREQ_MIN;
179 policy->cpuinfo.max_freq = CONFIG_MSM_CPU_FREQ_MAX;
180#endif
181 }
182#ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX
183 policy->min = CONFIG_MSM_CPU_FREQ_MIN;
184 policy->max = CONFIG_MSM_CPU_FREQ_MAX;
185#endif
186
187 cur_freq = acpuclk_get_rate(policy->cpu);
188 if (cpufreq_frequency_table_target(policy, table, cur_freq,
189 CPUFREQ_RELATION_H, &index)) {
190 pr_info("cpufreq: cpu%d at invalid freq: %d\n",
191 policy->cpu, cur_freq);
192 return -EINVAL;
193 }
194
195 if (cur_freq != table[index].frequency) {
196 int ret = 0;
197 ret = acpuclk_set_rate(policy->cpu, table[index].frequency,
198 SETRATE_CPUFREQ);
199 if (ret)
200 return ret;
201 pr_info("cpufreq: cpu%d init at %d switching to %d\n",
202 policy->cpu, cur_freq, table[index].frequency);
203 cur_freq = table[index].frequency;
204 }
205
206 policy->cur = cur_freq;
207
208 policy->cpuinfo.transition_latency =
209 acpuclk_get_switch_time() * NSEC_PER_USEC;
210#ifdef CONFIG_SMP
211 cpu_work = &per_cpu(cpufreq_work, policy->cpu);
212 INIT_WORK(&cpu_work->work, set_cpu_work);
213 init_completion(&cpu_work->complete);
214#endif
215
216 return 0;
217}
218
219static int msm_cpufreq_suspend(void)
220{
221 int cpu;
222
223 for_each_possible_cpu(cpu) {
224 mutex_lock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
225 per_cpu(cpufreq_suspend, cpu).device_suspended = 1;
226 mutex_unlock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
227 }
228
229 return NOTIFY_DONE;
230}
231
232static int msm_cpufreq_resume(void)
233{
234 int cpu;
235
236 for_each_possible_cpu(cpu) {
237 per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
238 }
239
240 return NOTIFY_DONE;
241}
242
243static int msm_cpufreq_pm_event(struct notifier_block *this,
244 unsigned long event, void *ptr)
245{
246 switch (event) {
247 case PM_POST_HIBERNATION:
248 case PM_POST_SUSPEND:
249 return msm_cpufreq_resume();
250 case PM_HIBERNATION_PREPARE:
251 case PM_SUSPEND_PREPARE:
252 return msm_cpufreq_suspend();
253 default:
254 return NOTIFY_DONE;
255 }
256}
257
David Ngc79a2e02011-03-26 06:13:32 -0700258static ssize_t store_mfreq(struct sysdev_class *class,
259 struct sysdev_class_attribute *attr,
260 const char *buf, size_t count)
261{
262 u64 val;
263
264 if (strict_strtoull(buf, 0, &val) < 0) {
265 pr_err("Invalid parameter to mfreq\n");
266 return 0;
267 }
268 if (val)
269 override_cpu = 1;
270 else
271 override_cpu = 0;
272 return count;
273}
274
275static SYSDEV_CLASS_ATTR(mfreq, 0200, NULL, store_mfreq);
276
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700277static struct cpufreq_driver msm_cpufreq_driver = {
278 /* lps calculations are handled here. */
279 .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
280 .init = msm_cpufreq_init,
281 .verify = msm_cpufreq_verify,
282 .target = msm_cpufreq_target,
283 .name = "msm",
284};
285
286static struct notifier_block msm_cpufreq_pm_notifier = {
287 .notifier_call = msm_cpufreq_pm_event,
288};
289
290static int __init msm_cpufreq_register(void)
291{
292 int cpu;
293
David Ngc79a2e02011-03-26 06:13:32 -0700294 int err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
295 &attr_mfreq.attr);
296 if (err)
297 pr_err("Failed to create sysfs mfreq\n");
298
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299 for_each_possible_cpu(cpu) {
300 mutex_init(&(per_cpu(cpufreq_suspend, cpu).suspend_mutex));
301 per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
302 }
303
304#ifdef CONFIG_SMP
305 msm_cpufreq_wq = create_workqueue("msm-cpufreq");
306#endif
307
308 register_pm_notifier(&msm_cpufreq_pm_notifier);
309 return cpufreq_register_driver(&msm_cpufreq_driver);
310}
311
312late_initcall(msm_cpufreq_register);
313