blob: c01332a74ad3a82b908825fe646e5ef1a5b55910 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/cpufreq.c
2 *
3 * MSM architecture cpufreq driver
4 *
5 * Copyright (C) 2007 Google, Inc.
6 * Copyright (c) 2007-2010, Code Aurora Forum. All rights reserved.
7 * Author: Mike A. Chan <mikechan@google.com>
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20#include <linux/earlysuspend.h>
21#include <linux/init.h>
22#include <linux/cpufreq.h>
23#include <linux/workqueue.h>
24#include <linux/completion.h>
25#include <linux/cpu.h>
26#include <linux/cpumask.h>
27#include <linux/sched.h>
28#include <linux/suspend.h>
29
30#include "acpuclock.h"
31
32#ifdef CONFIG_SMP
33struct cpufreq_work_struct {
34 struct work_struct work;
35 struct cpufreq_policy *policy;
36 struct completion complete;
37 int frequency;
38 int status;
39};
40
41static DEFINE_PER_CPU(struct cpufreq_work_struct, cpufreq_work);
42static struct workqueue_struct *msm_cpufreq_wq;
43#endif
44
45struct cpufreq_suspend_t {
46 struct mutex suspend_mutex;
47 int device_suspended;
48};
49
50static DEFINE_PER_CPU(struct cpufreq_suspend_t, cpufreq_suspend);
51
52static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq)
53{
54 int ret = 0;
55 struct cpufreq_freqs freqs;
56
57 freqs.old = policy->cur;
58 freqs.new = new_freq;
59 freqs.cpu = policy->cpu;
60 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
61 ret = acpuclk_set_rate(policy->cpu, new_freq, SETRATE_CPUFREQ);
62 if (!ret)
63 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
64
65 return ret;
66}
67
68#ifdef CONFIG_SMP
69static void set_cpu_work(struct work_struct *work)
70{
71 struct cpufreq_work_struct *cpu_work =
72 container_of(work, struct cpufreq_work_struct, work);
73
74 cpu_work->status = set_cpu_freq(cpu_work->policy, cpu_work->frequency);
75 complete(&cpu_work->complete);
76}
77#endif
78
79static int msm_cpufreq_target(struct cpufreq_policy *policy,
80 unsigned int target_freq,
81 unsigned int relation)
82{
83 int ret = -EFAULT;
84 int index;
85 struct cpufreq_frequency_table *table;
86#ifdef CONFIG_SMP
87 struct cpufreq_work_struct *cpu_work = NULL;
88 cpumask_var_t mask;
89
90 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
91 return -ENOMEM;
92
93 if (!cpu_active(policy->cpu)) {
94 pr_info("cpufreq: cpu %d is not active.\n", policy->cpu);
95 return -ENODEV;
96 }
97#endif
98
99 mutex_lock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex);
100
101 if (per_cpu(cpufreq_suspend, policy->cpu).device_suspended) {
102 pr_debug("cpufreq: cpu%d scheduling frequency change "
103 "in suspend.\n", policy->cpu);
104 ret = -EFAULT;
105 goto done;
106 }
107
108 table = cpufreq_frequency_get_table(policy->cpu);
109 if (cpufreq_frequency_table_target(policy, table, target_freq, relation,
110 &index)) {
111 pr_err("cpufreq: invalid target_freq: %d\n", target_freq);
112 ret = -EINVAL;
113 goto done;
114 }
115
116#ifdef CONFIG_CPU_FREQ_DEBUG
117 pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n",
118 policy->cpu, target_freq, relation,
119 policy->min, policy->max, table[index].frequency);
120#endif
121
122#ifdef CONFIG_SMP
123 cpu_work = &per_cpu(cpufreq_work, policy->cpu);
124 cpu_work->policy = policy;
125 cpu_work->frequency = table[index].frequency;
126 cpu_work->status = -ENODEV;
127
128 cpumask_clear(mask);
129 cpumask_set_cpu(policy->cpu, mask);
130 if (cpumask_equal(mask, &current->cpus_allowed)) {
131 ret = set_cpu_freq(cpu_work->policy, cpu_work->frequency);
132 goto done;
133 } else {
134 cancel_work_sync(&cpu_work->work);
135 INIT_COMPLETION(cpu_work->complete);
136 queue_work_on(policy->cpu, msm_cpufreq_wq, &cpu_work->work);
137 wait_for_completion(&cpu_work->complete);
138 }
139
140 free_cpumask_var(mask);
141 ret = cpu_work->status;
142#else
143 ret = set_cpu_freq(policy, table[index].frequency);
144#endif
145
146done:
147 mutex_unlock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex);
148 return ret;
149}
150
151static int msm_cpufreq_verify(struct cpufreq_policy *policy)
152{
153 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
154 policy->cpuinfo.max_freq);
155 return 0;
156}
157
158static int __cpuinit msm_cpufreq_init(struct cpufreq_policy *policy)
159{
160 int cur_freq;
161 int index;
162 struct cpufreq_frequency_table *table;
163#ifdef CONFIG_SMP
164 struct cpufreq_work_struct *cpu_work = NULL;
165#endif
166
167 table = cpufreq_frequency_get_table(policy->cpu);
168 if (cpufreq_frequency_table_cpuinfo(policy, table)) {
169#ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX
170 policy->cpuinfo.min_freq = CONFIG_MSM_CPU_FREQ_MIN;
171 policy->cpuinfo.max_freq = CONFIG_MSM_CPU_FREQ_MAX;
172#endif
173 }
174#ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX
175 policy->min = CONFIG_MSM_CPU_FREQ_MIN;
176 policy->max = CONFIG_MSM_CPU_FREQ_MAX;
177#endif
178
179 cur_freq = acpuclk_get_rate(policy->cpu);
180 if (cpufreq_frequency_table_target(policy, table, cur_freq,
181 CPUFREQ_RELATION_H, &index)) {
182 pr_info("cpufreq: cpu%d at invalid freq: %d\n",
183 policy->cpu, cur_freq);
184 return -EINVAL;
185 }
186
187 if (cur_freq != table[index].frequency) {
188 int ret = 0;
189 ret = acpuclk_set_rate(policy->cpu, table[index].frequency,
190 SETRATE_CPUFREQ);
191 if (ret)
192 return ret;
193 pr_info("cpufreq: cpu%d init at %d switching to %d\n",
194 policy->cpu, cur_freq, table[index].frequency);
195 cur_freq = table[index].frequency;
196 }
197
198 policy->cur = cur_freq;
199
200 policy->cpuinfo.transition_latency =
201 acpuclk_get_switch_time() * NSEC_PER_USEC;
202#ifdef CONFIG_SMP
203 cpu_work = &per_cpu(cpufreq_work, policy->cpu);
204 INIT_WORK(&cpu_work->work, set_cpu_work);
205 init_completion(&cpu_work->complete);
206#endif
207
208 return 0;
209}
210
211static int msm_cpufreq_suspend(void)
212{
213 int cpu;
214
215 for_each_possible_cpu(cpu) {
216 mutex_lock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
217 per_cpu(cpufreq_suspend, cpu).device_suspended = 1;
218 mutex_unlock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
219 }
220
221 return NOTIFY_DONE;
222}
223
224static int msm_cpufreq_resume(void)
225{
226 int cpu;
227
228 for_each_possible_cpu(cpu) {
229 per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
230 }
231
232 return NOTIFY_DONE;
233}
234
235static int msm_cpufreq_pm_event(struct notifier_block *this,
236 unsigned long event, void *ptr)
237{
238 switch (event) {
239 case PM_POST_HIBERNATION:
240 case PM_POST_SUSPEND:
241 return msm_cpufreq_resume();
242 case PM_HIBERNATION_PREPARE:
243 case PM_SUSPEND_PREPARE:
244 return msm_cpufreq_suspend();
245 default:
246 return NOTIFY_DONE;
247 }
248}
249
250static struct cpufreq_driver msm_cpufreq_driver = {
251 /* lps calculations are handled here. */
252 .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
253 .init = msm_cpufreq_init,
254 .verify = msm_cpufreq_verify,
255 .target = msm_cpufreq_target,
256 .name = "msm",
257};
258
259static struct notifier_block msm_cpufreq_pm_notifier = {
260 .notifier_call = msm_cpufreq_pm_event,
261};
262
263static int __init msm_cpufreq_register(void)
264{
265 int cpu;
266
267 for_each_possible_cpu(cpu) {
268 mutex_init(&(per_cpu(cpufreq_suspend, cpu).suspend_mutex));
269 per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
270 }
271
272#ifdef CONFIG_SMP
273 msm_cpufreq_wq = create_workqueue("msm-cpufreq");
274#endif
275
276 register_pm_notifier(&msm_cpufreq_pm_notifier);
277 return cpufreq_register_driver(&msm_cpufreq_driver);
278}
279
280late_initcall(msm_cpufreq_register);
281