blob: 9486275cb05d94d56791b569d199bfc9dc253a75 [file] [log] [blame]
Mike Chan9d49b702010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd4edd5d2012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynorbc819a22012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chan9d49b702010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/tick.h>
27#include <linux/time.h>
28#include <linux/timer.h>
29#include <linux/workqueue.h>
30#include <linux/kthread.h>
Todd Poynor7820a652012-04-02 17:17:14 -070031#include <linux/slab.h>
Todd Poynor9fb15312012-04-23 20:42:41 -070032#include <asm/cputime.h>
Mike Chan9d49b702010-06-22 11:26:45 -070033
Todd Poynora1e19512012-02-16 16:27:59 -080034#define CREATE_TRACE_POINTS
35#include <trace/events/cpufreq_interactive.h>
36
Lianwei Wang9f8d5372013-01-07 14:15:51 +080037static int active_count;
Mike Chan9d49b702010-06-22 11:26:45 -070038
39struct cpufreq_interactive_cpuinfo {
40 struct timer_list cpu_timer;
Todd Poynor264e2912012-12-18 17:50:10 -080041 struct timer_list cpu_slack_timer;
Todd Poynor07a9e292012-12-11 16:05:03 -080042 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chan9d49b702010-06-22 11:26:45 -070043 u64 time_in_idle;
Todd Poynorae7f28c2012-10-08 20:14:34 -070044 u64 time_in_idle_timestamp;
Todd Poynor07a9e292012-12-11 16:05:03 -080045 u64 cputime_speedadj;
46 u64 cputime_speedadj_timestamp;
Mike Chan9d49b702010-06-22 11:26:45 -070047 struct cpufreq_policy *policy;
48 struct cpufreq_frequency_table *freq_table;
49 unsigned int target_freq;
Todd Poynoraad27322012-04-26 21:41:40 -070050 unsigned int floor_freq;
51 u64 floor_validate_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -070052 u64 hispeed_validate_time;
Todd Poynorbc819a22012-12-18 17:50:44 -080053 struct rw_semaphore enable_sem;
Mike Chan9d49b702010-06-22 11:26:45 -070054 int governor_enabled;
Steve Kondika4371942012-10-10 12:20:25 -070055 int cpu_load;
Mike Chan9d49b702010-06-22 11:26:45 -070056};
57
58static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
59
Todd Poynor02442cf2012-07-16 17:07:15 -070060/* realtime thread handles frequency scaling */
61static struct task_struct *speedchange_task;
62static cpumask_t speedchange_cpumask;
63static spinlock_t speedchange_cpumask_lock;
Lianwei Wang9f8d5372013-01-07 14:15:51 +080064static struct mutex gov_lock;
Mike Chan9d49b702010-06-22 11:26:45 -070065
66/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynorf090ef02012-10-03 00:39:56 -070067static unsigned int hispeed_freq;
Mike Chan9d49b702010-06-22 11:26:45 -070068
69/* Go to hi speed when CPU load at or above this value. */
Todd Poynored028682012-12-21 15:13:01 -080070#define DEFAULT_GO_HISPEED_LOAD 99
Todd Poynor9f58f842012-12-21 15:32:21 -080071static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
Mike Chan9d49b702010-06-22 11:26:45 -070072
Todd Poynor6ecca112012-11-28 17:58:17 -080073/* Target load. Lower values result in higher CPU speeds. */
74#define DEFAULT_TARGET_LOAD 90
Todd Poynor21df1ca2012-11-14 11:41:21 -080075static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
76static spinlock_t target_loads_lock;
77static unsigned int *target_loads = default_target_loads;
78static int ntarget_loads = ARRAY_SIZE(default_target_loads);
Todd Poynor6ecca112012-11-28 17:58:17 -080079
Mike Chan9d49b702010-06-22 11:26:45 -070080/*
81 * The minimum amount of time to spend at a frequency before we can ramp down.
82 */
Todd Poynora0ec4362012-04-17 17:39:34 -070083#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Todd Poynor9f58f842012-12-21 15:32:21 -080084static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Mike Chan9d49b702010-06-22 11:26:45 -070085
86/*
87 * The sample rate of the timer used to increase frequency
88 */
Todd Poynora0ec4362012-04-17 17:39:34 -070089#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Todd Poynor9f58f842012-12-21 15:32:21 -080090static unsigned long timer_rate = DEFAULT_TIMER_RATE;
Mike Chan9d49b702010-06-22 11:26:45 -070091
Todd Poynor596cf1f2012-04-13 20:18:02 -070092/*
93 * Wait this long before raising speed above hispeed, by default a single
94 * timer interval.
95 */
96#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
Todd Poynor9f58f842012-12-21 15:32:21 -080097static unsigned long above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
Todd Poynor596cf1f2012-04-13 20:18:02 -070098
Todd Poynor29835472012-12-14 17:31:19 -080099/* Non-zero means indefinite speed boost active */
Todd Poynor9fb15312012-04-23 20:42:41 -0700100static int boost_val;
Todd Poynor29835472012-12-14 17:31:19 -0800101/* Duration of a boot pulse in usecs */
102static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
103/* End time of boost pulse in ktime converted to usecs */
104static u64 boostpulse_endtime;
Todd Poynor9fb15312012-04-23 20:42:41 -0700105
Todd Poynor264e2912012-12-18 17:50:10 -0800106/*
107 * Max additional time to wait in idle, beyond timer_rate, at speeds above
108 * minimum before wakeup to reduce speed, or -1 if unnecessary.
109 */
110#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
111static int timer_slack_val = DEFAULT_TIMER_SLACK;
Lianwei Wangd4edd5d2012-11-01 09:59:52 +0800112
Mike Chan9d49b702010-06-22 11:26:45 -0700113static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
114 unsigned int event);
115
116#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
117static
118#endif
119struct cpufreq_governor cpufreq_gov_interactive = {
120 .name = "interactive",
121 .governor = cpufreq_governor_interactive,
122 .max_transition_latency = 10000000,
123 .owner = THIS_MODULE,
124};
125
Todd Poynorae7f28c2012-10-08 20:14:34 -0700126static void cpufreq_interactive_timer_resched(
127 struct cpufreq_interactive_cpuinfo *pcpu)
128{
Todd Poynor264e2912012-12-18 17:50:10 -0800129 unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
Todd Poynor8a42a032013-01-02 13:14:00 -0800130 unsigned long flags;
Todd Poynor264e2912012-12-18 17:50:10 -0800131
132 mod_timer_pinned(&pcpu->cpu_timer, expires);
133 if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
134 expires += usecs_to_jiffies(timer_slack_val);
135 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
136 }
137
Todd Poynor8a42a032013-01-02 13:14:00 -0800138 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynorae7f28c2012-10-08 20:14:34 -0700139 pcpu->time_in_idle =
140 get_cpu_idle_time_us(smp_processor_id(),
141 &pcpu->time_in_idle_timestamp);
Todd Poynor07a9e292012-12-11 16:05:03 -0800142 pcpu->cputime_speedadj = 0;
143 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
Todd Poynor8a42a032013-01-02 13:14:00 -0800144 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Todd Poynorae7f28c2012-10-08 20:14:34 -0700145}
146
Todd Poynor21df1ca2012-11-14 11:41:21 -0800147static unsigned int freq_to_targetload(unsigned int freq)
148{
149 int i;
150 unsigned int ret;
Todd Poynor8a42a032013-01-02 13:14:00 -0800151 unsigned long flags;
Todd Poynor21df1ca2012-11-14 11:41:21 -0800152
Todd Poynor8a42a032013-01-02 13:14:00 -0800153 spin_lock_irqsave(&target_loads_lock, flags);
Todd Poynor21df1ca2012-11-14 11:41:21 -0800154
155 for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
156 ;
157
158 ret = target_loads[i];
Todd Poynor8a42a032013-01-02 13:14:00 -0800159 spin_unlock_irqrestore(&target_loads_lock, flags);
Todd Poynor21df1ca2012-11-14 11:41:21 -0800160 return ret;
161}
162
163/*
164 * If increasing frequencies never map to a lower target load then
165 * choose_freq() will find the minimum frequency that does not exceed its
166 * target load given the current load.
167 */
168
169static unsigned int choose_freq(
Todd Poynor07a9e292012-12-11 16:05:03 -0800170 struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
Todd Poynor21df1ca2012-11-14 11:41:21 -0800171{
172 unsigned int freq = pcpu->policy->cur;
Todd Poynor21df1ca2012-11-14 11:41:21 -0800173 unsigned int prevfreq, freqmin, freqmax;
174 unsigned int tl;
175 int index;
176
177 freqmin = 0;
178 freqmax = UINT_MAX;
179
180 do {
181 prevfreq = freq;
182 tl = freq_to_targetload(freq);
183
184 /*
185 * Find the lowest frequency where the computed load is less
186 * than or equal to the target load.
187 */
188
189 cpufreq_frequency_table_target(
190 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
191 CPUFREQ_RELATION_L, &index);
192 freq = pcpu->freq_table[index].frequency;
193
194 if (freq > prevfreq) {
195 /* The previous frequency is too low. */
196 freqmin = prevfreq;
197
198 if (freq >= freqmax) {
199 /*
200 * Find the highest frequency that is less
201 * than freqmax.
202 */
203 cpufreq_frequency_table_target(
204 pcpu->policy, pcpu->freq_table,
205 freqmax - 1, CPUFREQ_RELATION_H,
206 &index);
207 freq = pcpu->freq_table[index].frequency;
208
209 if (freq == freqmin) {
210 /*
211 * The first frequency below freqmax
212 * has already been found to be too
213 * low. freqmax is the lowest speed
214 * we found that is fast enough.
215 */
216 freq = freqmax;
217 break;
218 }
219 }
220 } else if (freq < prevfreq) {
221 /* The previous frequency is high enough. */
222 freqmax = prevfreq;
223
224 if (freq <= freqmin) {
225 /*
226 * Find the lowest frequency that is higher
227 * than freqmin.
228 */
229 cpufreq_frequency_table_target(
230 pcpu->policy, pcpu->freq_table,
231 freqmin + 1, CPUFREQ_RELATION_L,
232 &index);
233 freq = pcpu->freq_table[index].frequency;
234
235 /*
236 * If freqmax is the first frequency above
237 * freqmin then we have already found that
238 * this speed is fast enough.
239 */
240 if (freq == freqmax)
241 break;
242 }
243 }
244
245 /* If same frequency chosen as previous then done. */
246 } while (freq != prevfreq);
247
248 return freq;
249}
250
Todd Poynor07a9e292012-12-11 16:05:03 -0800251static u64 update_load(int cpu)
252{
253 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
254 u64 now;
255 u64 now_idle;
256 unsigned int delta_idle;
257 unsigned int delta_time;
258 u64 active_time;
259
260 now_idle = get_cpu_idle_time_us(cpu, &now);
261 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
262 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
263 active_time = delta_time - delta_idle;
264 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
265
266 pcpu->time_in_idle = now_idle;
267 pcpu->time_in_idle_timestamp = now;
268 return now;
269}
270
Mike Chan9d49b702010-06-22 11:26:45 -0700271static void cpufreq_interactive_timer(unsigned long data)
272{
Todd Poynor1913e0f2012-11-05 13:09:03 -0800273 u64 now;
Mike Chan9d49b702010-06-22 11:26:45 -0700274 unsigned int delta_time;
Todd Poynor07a9e292012-12-11 16:05:03 -0800275 u64 cputime_speedadj;
Mike Chan9d49b702010-06-22 11:26:45 -0700276 int cpu_load;
Mike Chan9d49b702010-06-22 11:26:45 -0700277 struct cpufreq_interactive_cpuinfo *pcpu =
278 &per_cpu(cpuinfo, data);
Mike Chan9d49b702010-06-22 11:26:45 -0700279 unsigned int new_freq;
Todd Poynor07a9e292012-12-11 16:05:03 -0800280 unsigned int loadadjfreq;
Mike Chan9d49b702010-06-22 11:26:45 -0700281 unsigned int index;
282 unsigned long flags;
Todd Poynor29835472012-12-14 17:31:19 -0800283 bool boosted;
Mike Chan9d49b702010-06-22 11:26:45 -0700284
Todd Poynorbc819a22012-12-18 17:50:44 -0800285 if (!down_read_trylock(&pcpu->enable_sem))
286 return;
Mike Chan9d49b702010-06-22 11:26:45 -0700287 if (!pcpu->governor_enabled)
288 goto exit;
289
Todd Poynor8a42a032013-01-02 13:14:00 -0800290 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor07a9e292012-12-11 16:05:03 -0800291 now = update_load(data);
292 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
293 cputime_speedadj = pcpu->cputime_speedadj;
Todd Poynor8a42a032013-01-02 13:14:00 -0800294 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700295
Todd Poynor07a9e292012-12-11 16:05:03 -0800296 if (WARN_ON_ONCE(!delta_time))
Mike Chan9d49b702010-06-22 11:26:45 -0700297 goto rearm;
298
Todd Poynor07a9e292012-12-11 16:05:03 -0800299 do_div(cputime_speedadj, delta_time);
300 loadadjfreq = (unsigned int)cputime_speedadj * 100;
301 cpu_load = loadadjfreq / pcpu->target_freq;
Todd Poynor29835472012-12-14 17:31:19 -0800302 boosted = boost_val || now < boostpulse_endtime;
Mike Chan9d49b702010-06-22 11:26:45 -0700303
Steve Kondika4371942012-10-10 12:20:25 -0700304 pcpu->cpu_load = cpu_load;
305
Todd Poynor7fca0cc2012-12-19 16:06:48 -0800306 if (cpu_load >= go_hispeed_load || boosted) {
307 if (pcpu->target_freq < hispeed_freq) {
308 new_freq = hispeed_freq;
309 } else {
310 new_freq = choose_freq(pcpu, loadadjfreq);
311
312 if (new_freq < hispeed_freq)
313 new_freq = hispeed_freq;
314 }
315 } else {
Todd Poynor07a9e292012-12-11 16:05:03 -0800316 new_freq = choose_freq(pcpu, loadadjfreq);
Todd Poynor7fca0cc2012-12-19 16:06:48 -0800317 }
Todd Poynor67f07522012-11-08 15:06:55 -0800318
319 if (pcpu->target_freq >= hispeed_freq &&
320 new_freq > pcpu->target_freq &&
321 now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
322 trace_cpufreq_interactive_notyet(
323 data, cpu_load, pcpu->target_freq,
324 pcpu->policy->cur, new_freq);
325 goto rearm;
Mike Chan9d49b702010-06-22 11:26:45 -0700326 }
327
Todd Poynor67f07522012-11-08 15:06:55 -0800328 pcpu->hispeed_validate_time = now;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700329
Mike Chan9d49b702010-06-22 11:26:45 -0700330 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynor6ecca112012-11-28 17:58:17 -0800331 new_freq, CPUFREQ_RELATION_L,
Mike Chan9d49b702010-06-22 11:26:45 -0700332 &index)) {
333 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
334 (int) data);
335 goto rearm;
336 }
337
338 new_freq = pcpu->freq_table[index].frequency;
339
Mike Chan9d49b702010-06-22 11:26:45 -0700340 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700341 * Do not scale below floor_freq unless we have been at or above the
342 * floor frequency for the minimum sample time since last validated.
Mike Chan9d49b702010-06-22 11:26:45 -0700343 */
Todd Poynoraad27322012-04-26 21:41:40 -0700344 if (new_freq < pcpu->floor_freq) {
Todd Poynor1913e0f2012-11-05 13:09:03 -0800345 if (now - pcpu->floor_validate_time < min_sample_time) {
Todd Poynor27f7b8e2012-11-28 17:56:09 -0800346 trace_cpufreq_interactive_notyet(
347 data, cpu_load, pcpu->target_freq,
348 pcpu->policy->cur, new_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700349 goto rearm;
Todd Poynora1e19512012-02-16 16:27:59 -0800350 }
Mike Chan9d49b702010-06-22 11:26:45 -0700351 }
352
Todd Poynor29835472012-12-14 17:31:19 -0800353 /*
354 * Update the timestamp for checking whether speed has been held at
355 * or above the selected frequency for a minimum of min_sample_time,
356 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
357 * allow the speed to drop as soon as the boostpulse duration expires
358 * (or the indefinite boost is turned off).
359 */
360
361 if (!boosted || new_freq > hispeed_freq) {
362 pcpu->floor_freq = new_freq;
363 pcpu->floor_validate_time = now;
364 }
Todd Poynor0a92d482012-04-06 19:59:36 -0700365
366 if (pcpu->target_freq == new_freq) {
Todd Poynor27f7b8e2012-11-28 17:56:09 -0800367 trace_cpufreq_interactive_already(
368 data, cpu_load, pcpu->target_freq,
369 pcpu->policy->cur, new_freq);
Todd Poynor0a92d482012-04-06 19:59:36 -0700370 goto rearm_if_notmax;
371 }
372
Todd Poynora1e19512012-02-16 16:27:59 -0800373 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynor27f7b8e2012-11-28 17:56:09 -0800374 pcpu->policy->cur, new_freq);
Todd Poynora1e19512012-02-16 16:27:59 -0800375
Todd Poynor02442cf2012-07-16 17:07:15 -0700376 pcpu->target_freq = new_freq;
377 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
378 cpumask_set_cpu(data, &speedchange_cpumask);
379 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
380 wake_up_process(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700381
382rearm_if_notmax:
383 /*
384 * Already set max speed and don't see a need to change that,
385 * wait until next idle to re-evaluate, don't need timer.
386 */
387 if (pcpu->target_freq == pcpu->policy->max)
388 goto exit;
389
390rearm:
Todd Poynor264e2912012-12-18 17:50:10 -0800391 if (!timer_pending(&pcpu->cpu_timer))
Todd Poynorae7f28c2012-10-08 20:14:34 -0700392 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700393
394exit:
Todd Poynorbc819a22012-12-18 17:50:44 -0800395 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700396 return;
397}
398
399static void cpufreq_interactive_idle_start(void)
400{
401 struct cpufreq_interactive_cpuinfo *pcpu =
402 &per_cpu(cpuinfo, smp_processor_id());
403 int pending;
404
Todd Poynorbc819a22012-12-18 17:50:44 -0800405 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chan9d49b702010-06-22 11:26:45 -0700406 return;
Todd Poynorbc819a22012-12-18 17:50:44 -0800407 if (!pcpu->governor_enabled) {
408 up_read(&pcpu->enable_sem);
409 return;
410 }
Mike Chan9d49b702010-06-22 11:26:45 -0700411
Mike Chan9d49b702010-06-22 11:26:45 -0700412 pending = timer_pending(&pcpu->cpu_timer);
413
414 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chan9d49b702010-06-22 11:26:45 -0700415 /*
416 * Entering idle while not at lowest speed. On some
417 * platforms this can hold the other CPU(s) at that speed
418 * even though the CPU is idle. Set a timer to re-evaluate
419 * speed so this idle CPU doesn't hold the other CPUs above
420 * min indefinitely. This should probably be a quirk of
421 * the CPUFreq driver.
422 */
Todd Poynor264e2912012-12-18 17:50:10 -0800423 if (!pending)
Todd Poynorae7f28c2012-10-08 20:14:34 -0700424 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700425 }
426
Todd Poynorbc819a22012-12-18 17:50:44 -0800427 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700428}
429
430static void cpufreq_interactive_idle_end(void)
431{
432 struct cpufreq_interactive_cpuinfo *pcpu =
433 &per_cpu(cpuinfo, smp_processor_id());
434
Todd Poynorbc819a22012-12-18 17:50:44 -0800435 if (!down_read_trylock(&pcpu->enable_sem))
Sam Lefflera04e4412012-06-27 10:12:04 -0700436 return;
Todd Poynorbc819a22012-12-18 17:50:44 -0800437 if (!pcpu->governor_enabled) {
438 up_read(&pcpu->enable_sem);
439 return;
440 }
Sam Lefflera04e4412012-06-27 10:12:04 -0700441
Todd Poynor1913e0f2012-11-05 13:09:03 -0800442 /* Arm the timer for 1-2 ticks later if not already. */
443 if (!timer_pending(&pcpu->cpu_timer)) {
Todd Poynorae7f28c2012-10-08 20:14:34 -0700444 cpufreq_interactive_timer_resched(pcpu);
Todd Poynor264e2912012-12-18 17:50:10 -0800445 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
Todd Poynorae7f28c2012-10-08 20:14:34 -0700446 del_timer(&pcpu->cpu_timer);
Todd Poynor264e2912012-12-18 17:50:10 -0800447 del_timer(&pcpu->cpu_slack_timer);
Todd Poynorae7f28c2012-10-08 20:14:34 -0700448 cpufreq_interactive_timer(smp_processor_id());
Mike Chan9d49b702010-06-22 11:26:45 -0700449 }
Todd Poynorbc819a22012-12-18 17:50:44 -0800450
451 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700452}
453
Todd Poynor02442cf2012-07-16 17:07:15 -0700454static int cpufreq_interactive_speedchange_task(void *data)
Mike Chan9d49b702010-06-22 11:26:45 -0700455{
456 unsigned int cpu;
457 cpumask_t tmp_mask;
458 unsigned long flags;
459 struct cpufreq_interactive_cpuinfo *pcpu;
460
461 while (1) {
462 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor02442cf2012-07-16 17:07:15 -0700463 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700464
Todd Poynor02442cf2012-07-16 17:07:15 -0700465 if (cpumask_empty(&speedchange_cpumask)) {
466 spin_unlock_irqrestore(&speedchange_cpumask_lock,
467 flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700468 schedule();
469
470 if (kthread_should_stop())
471 break;
472
Todd Poynor02442cf2012-07-16 17:07:15 -0700473 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700474 }
475
476 set_current_state(TASK_RUNNING);
Todd Poynor02442cf2012-07-16 17:07:15 -0700477 tmp_mask = speedchange_cpumask;
478 cpumask_clear(&speedchange_cpumask);
479 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700480
481 for_each_cpu(cpu, &tmp_mask) {
482 unsigned int j;
483 unsigned int max_freq = 0;
484
485 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynorbc819a22012-12-18 17:50:44 -0800486 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chan9d49b702010-06-22 11:26:45 -0700487 continue;
Todd Poynorbc819a22012-12-18 17:50:44 -0800488 if (!pcpu->governor_enabled) {
489 up_read(&pcpu->enable_sem);
490 continue;
491 }
Mike Chan9d49b702010-06-22 11:26:45 -0700492
Mike Chan9d49b702010-06-22 11:26:45 -0700493 for_each_cpu(j, pcpu->policy->cpus) {
494 struct cpufreq_interactive_cpuinfo *pjcpu =
495 &per_cpu(cpuinfo, j);
496
497 if (pjcpu->target_freq > max_freq)
498 max_freq = pjcpu->target_freq;
Steve Kondika4371942012-10-10 12:20:25 -0700499
500 cpufreq_notify_utilization(pcpu->policy, (pcpu->cpu_load * pcpu->policy->cur) / pcpu->policy->cpuinfo.max_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700501 }
502
503 if (max_freq != pcpu->policy->cur)
504 __cpufreq_driver_target(pcpu->policy,
505 max_freq,
506 CPUFREQ_RELATION_H);
Todd Poynor02442cf2012-07-16 17:07:15 -0700507 trace_cpufreq_interactive_setspeed(cpu,
508 pcpu->target_freq,
Todd Poynora1e19512012-02-16 16:27:59 -0800509 pcpu->policy->cur);
Todd Poynorbc819a22012-12-18 17:50:44 -0800510
511 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700512 }
513 }
514
515 return 0;
516}
517
Todd Poynor7820a652012-04-02 17:17:14 -0700518static void cpufreq_interactive_boost(void)
519{
520 int i;
521 int anyboost = 0;
522 unsigned long flags;
523 struct cpufreq_interactive_cpuinfo *pcpu;
524
Todd Poynor02442cf2012-07-16 17:07:15 -0700525 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700526
527 for_each_online_cpu(i) {
528 pcpu = &per_cpu(cpuinfo, i);
529
530 if (pcpu->target_freq < hispeed_freq) {
531 pcpu->target_freq = hispeed_freq;
Todd Poynor02442cf2012-07-16 17:07:15 -0700532 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynorc9d53b32012-12-07 20:08:45 -0800533 pcpu->hispeed_validate_time =
534 ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700535 anyboost = 1;
536 }
537
538 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700539 * Set floor freq and (re)start timer for when last
540 * validated.
Todd Poynor7820a652012-04-02 17:17:14 -0700541 */
542
Todd Poynoraad27322012-04-26 21:41:40 -0700543 pcpu->floor_freq = hispeed_freq;
544 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700545 }
546
Todd Poynor02442cf2012-07-16 17:07:15 -0700547 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700548
549 if (anyboost)
Todd Poynor02442cf2012-07-16 17:07:15 -0700550 wake_up_process(speedchange_task);
Todd Poynor7820a652012-04-02 17:17:14 -0700551}
552
Todd Poynor07a9e292012-12-11 16:05:03 -0800553static int cpufreq_interactive_notifier(
554 struct notifier_block *nb, unsigned long val, void *data)
555{
556 struct cpufreq_freqs *freq = data;
557 struct cpufreq_interactive_cpuinfo *pcpu;
558 int cpu;
Todd Poynor8a42a032013-01-02 13:14:00 -0800559 unsigned long flags;
Todd Poynor07a9e292012-12-11 16:05:03 -0800560
561 if (val == CPUFREQ_POSTCHANGE) {
562 pcpu = &per_cpu(cpuinfo, freq->cpu);
Todd Poynor84a96d22012-12-23 12:28:49 -0800563 if (!down_read_trylock(&pcpu->enable_sem))
564 return 0;
565 if (!pcpu->governor_enabled) {
566 up_read(&pcpu->enable_sem);
567 return 0;
568 }
Todd Poynor07a9e292012-12-11 16:05:03 -0800569
570 for_each_cpu(cpu, pcpu->policy->cpus) {
571 struct cpufreq_interactive_cpuinfo *pjcpu =
572 &per_cpu(cpuinfo, cpu);
Todd Poynor8a42a032013-01-02 13:14:00 -0800573 spin_lock_irqsave(&pjcpu->load_lock, flags);
Todd Poynor07a9e292012-12-11 16:05:03 -0800574 update_load(cpu);
Todd Poynor8a42a032013-01-02 13:14:00 -0800575 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
Todd Poynor07a9e292012-12-11 16:05:03 -0800576 }
Todd Poynor07a9e292012-12-11 16:05:03 -0800577
Todd Poynor84a96d22012-12-23 12:28:49 -0800578 up_read(&pcpu->enable_sem);
579 }
Todd Poynor07a9e292012-12-11 16:05:03 -0800580 return 0;
581}
582
583static struct notifier_block cpufreq_notifier_block = {
584 .notifier_call = cpufreq_interactive_notifier,
585};
586
Todd Poynor21df1ca2012-11-14 11:41:21 -0800587static ssize_t show_target_loads(
Todd Poynor6ecca112012-11-28 17:58:17 -0800588 struct kobject *kobj, struct attribute *attr, char *buf)
589{
Todd Poynor21df1ca2012-11-14 11:41:21 -0800590 int i;
591 ssize_t ret = 0;
Todd Poynor8a42a032013-01-02 13:14:00 -0800592 unsigned long flags;
Todd Poynor21df1ca2012-11-14 11:41:21 -0800593
Todd Poynor8a42a032013-01-02 13:14:00 -0800594 spin_lock_irqsave(&target_loads_lock, flags);
Todd Poynor21df1ca2012-11-14 11:41:21 -0800595
596 for (i = 0; i < ntarget_loads; i++)
597 ret += sprintf(buf + ret, "%u%s", target_loads[i],
598 i & 0x1 ? ":" : " ");
599
600 ret += sprintf(buf + ret, "\n");
Todd Poynor8a42a032013-01-02 13:14:00 -0800601 spin_unlock_irqrestore(&target_loads_lock, flags);
Todd Poynor21df1ca2012-11-14 11:41:21 -0800602 return ret;
Todd Poynor6ecca112012-11-28 17:58:17 -0800603}
604
Todd Poynor21df1ca2012-11-14 11:41:21 -0800605static ssize_t store_target_loads(
Todd Poynor6ecca112012-11-28 17:58:17 -0800606 struct kobject *kobj, struct attribute *attr, const char *buf,
607 size_t count)
608{
609 int ret;
Todd Poynor21df1ca2012-11-14 11:41:21 -0800610 const char *cp;
611 unsigned int *new_target_loads = NULL;
612 int ntokens = 1;
613 int i;
Todd Poynor8a42a032013-01-02 13:14:00 -0800614 unsigned long flags;
Todd Poynor6ecca112012-11-28 17:58:17 -0800615
Todd Poynor21df1ca2012-11-14 11:41:21 -0800616 cp = buf;
617 while ((cp = strpbrk(cp + 1, " :")))
618 ntokens++;
619
620 if (!(ntokens & 0x1))
621 goto err_inval;
622
623 new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
624 if (!new_target_loads) {
625 ret = -ENOMEM;
626 goto err;
627 }
628
629 cp = buf;
630 i = 0;
631 while (i < ntokens) {
632 if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
633 goto err_inval;
634
635 cp = strpbrk(cp, " :");
636 if (!cp)
637 break;
638 cp++;
639 }
640
641 if (i != ntokens)
642 goto err_inval;
643
Todd Poynor8a42a032013-01-02 13:14:00 -0800644 spin_lock_irqsave(&target_loads_lock, flags);
Todd Poynor21df1ca2012-11-14 11:41:21 -0800645 if (target_loads != default_target_loads)
646 kfree(target_loads);
647 target_loads = new_target_loads;
648 ntarget_loads = ntokens;
Todd Poynor8a42a032013-01-02 13:14:00 -0800649 spin_unlock_irqrestore(&target_loads_lock, flags);
Todd Poynor6ecca112012-11-28 17:58:17 -0800650 return count;
Todd Poynor21df1ca2012-11-14 11:41:21 -0800651
652err_inval:
653 ret = -EINVAL;
654err:
655 kfree(new_target_loads);
656 return ret;
Todd Poynor6ecca112012-11-28 17:58:17 -0800657}
658
Todd Poynor21df1ca2012-11-14 11:41:21 -0800659static struct global_attr target_loads_attr =
660 __ATTR(target_loads, S_IRUGO | S_IWUSR,
661 show_target_loads, store_target_loads);
Todd Poynor6ecca112012-11-28 17:58:17 -0800662
Mike Chan9d49b702010-06-22 11:26:45 -0700663static ssize_t show_hispeed_freq(struct kobject *kobj,
664 struct attribute *attr, char *buf)
665{
Todd Poynorf090ef02012-10-03 00:39:56 -0700666 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700667}
668
669static ssize_t store_hispeed_freq(struct kobject *kobj,
670 struct attribute *attr, const char *buf,
671 size_t count)
672{
673 int ret;
Todd Poynorf090ef02012-10-03 00:39:56 -0700674 long unsigned int val;
Mike Chan9d49b702010-06-22 11:26:45 -0700675
Todd Poynorf090ef02012-10-03 00:39:56 -0700676 ret = strict_strtoul(buf, 0, &val);
Mike Chan9d49b702010-06-22 11:26:45 -0700677 if (ret < 0)
678 return ret;
679 hispeed_freq = val;
680 return count;
681}
682
683static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
684 show_hispeed_freq, store_hispeed_freq);
685
686
687static ssize_t show_go_hispeed_load(struct kobject *kobj,
688 struct attribute *attr, char *buf)
689{
690 return sprintf(buf, "%lu\n", go_hispeed_load);
691}
692
693static ssize_t store_go_hispeed_load(struct kobject *kobj,
694 struct attribute *attr, const char *buf, size_t count)
695{
696 int ret;
697 unsigned long val;
698
699 ret = strict_strtoul(buf, 0, &val);
700 if (ret < 0)
701 return ret;
702 go_hispeed_load = val;
703 return count;
704}
705
706static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
707 show_go_hispeed_load, store_go_hispeed_load);
708
709static ssize_t show_min_sample_time(struct kobject *kobj,
710 struct attribute *attr, char *buf)
711{
712 return sprintf(buf, "%lu\n", min_sample_time);
713}
714
715static ssize_t store_min_sample_time(struct kobject *kobj,
716 struct attribute *attr, const char *buf, size_t count)
717{
718 int ret;
719 unsigned long val;
720
721 ret = strict_strtoul(buf, 0, &val);
722 if (ret < 0)
723 return ret;
724 min_sample_time = val;
725 return count;
726}
727
728static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
729 show_min_sample_time, store_min_sample_time);
730
Todd Poynor596cf1f2012-04-13 20:18:02 -0700731static ssize_t show_above_hispeed_delay(struct kobject *kobj,
732 struct attribute *attr, char *buf)
733{
734 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
735}
736
737static ssize_t store_above_hispeed_delay(struct kobject *kobj,
738 struct attribute *attr,
739 const char *buf, size_t count)
740{
741 int ret;
742 unsigned long val;
743
744 ret = strict_strtoul(buf, 0, &val);
745 if (ret < 0)
746 return ret;
747 above_hispeed_delay_val = val;
748 return count;
749}
750
751define_one_global_rw(above_hispeed_delay);
752
Mike Chan9d49b702010-06-22 11:26:45 -0700753static ssize_t show_timer_rate(struct kobject *kobj,
754 struct attribute *attr, char *buf)
755{
756 return sprintf(buf, "%lu\n", timer_rate);
757}
758
759static ssize_t store_timer_rate(struct kobject *kobj,
760 struct attribute *attr, const char *buf, size_t count)
761{
762 int ret;
763 unsigned long val;
764
765 ret = strict_strtoul(buf, 0, &val);
766 if (ret < 0)
767 return ret;
768 timer_rate = val;
769 return count;
770}
771
772static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
773 show_timer_rate, store_timer_rate);
774
Todd Poynor264e2912012-12-18 17:50:10 -0800775static ssize_t show_timer_slack(
776 struct kobject *kobj, struct attribute *attr, char *buf)
777{
778 return sprintf(buf, "%d\n", timer_slack_val);
779}
780
781static ssize_t store_timer_slack(
782 struct kobject *kobj, struct attribute *attr, const char *buf,
783 size_t count)
784{
785 int ret;
786 unsigned long val;
787
788 ret = kstrtol(buf, 10, &val);
789 if (ret < 0)
790 return ret;
791
792 timer_slack_val = val;
793 return count;
794}
795
796define_one_global_rw(timer_slack);
797
Todd Poynor9fb15312012-04-23 20:42:41 -0700798static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
799 char *buf)
800{
801 return sprintf(buf, "%d\n", boost_val);
802}
803
804static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
805 const char *buf, size_t count)
806{
807 int ret;
808 unsigned long val;
809
810 ret = kstrtoul(buf, 0, &val);
811 if (ret < 0)
812 return ret;
813
814 boost_val = val;
815
Todd Poynor2e739a02012-05-03 00:16:55 -0700816 if (boost_val) {
817 trace_cpufreq_interactive_boost("on");
Todd Poynor9fb15312012-04-23 20:42:41 -0700818 cpufreq_interactive_boost();
Todd Poynor2e739a02012-05-03 00:16:55 -0700819 } else {
820 trace_cpufreq_interactive_unboost("off");
821 }
Todd Poynor9fb15312012-04-23 20:42:41 -0700822
823 return count;
824}
825
826define_one_global_rw(boost);
827
Todd Poynor2e739a02012-05-03 00:16:55 -0700828static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
829 const char *buf, size_t count)
830{
831 int ret;
832 unsigned long val;
833
834 ret = kstrtoul(buf, 0, &val);
835 if (ret < 0)
836 return ret;
837
Todd Poynor29835472012-12-14 17:31:19 -0800838 boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
Todd Poynor2e739a02012-05-03 00:16:55 -0700839 trace_cpufreq_interactive_boost("pulse");
840 cpufreq_interactive_boost();
841 return count;
842}
843
844static struct global_attr boostpulse =
845 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
846
Todd Poynor29835472012-12-14 17:31:19 -0800847static ssize_t show_boostpulse_duration(
848 struct kobject *kobj, struct attribute *attr, char *buf)
849{
850 return sprintf(buf, "%d\n", boostpulse_duration_val);
851}
852
853static ssize_t store_boostpulse_duration(
854 struct kobject *kobj, struct attribute *attr, const char *buf,
855 size_t count)
856{
857 int ret;
858 unsigned long val;
859
860 ret = kstrtoul(buf, 0, &val);
861 if (ret < 0)
862 return ret;
863
864 boostpulse_duration_val = val;
865 return count;
866}
867
868define_one_global_rw(boostpulse_duration);
869
Mike Chan9d49b702010-06-22 11:26:45 -0700870static struct attribute *interactive_attributes[] = {
Todd Poynor21df1ca2012-11-14 11:41:21 -0800871 &target_loads_attr.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700872 &hispeed_freq_attr.attr,
873 &go_hispeed_load_attr.attr,
Todd Poynor596cf1f2012-04-13 20:18:02 -0700874 &above_hispeed_delay.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700875 &min_sample_time_attr.attr,
876 &timer_rate_attr.attr,
Todd Poynor264e2912012-12-18 17:50:10 -0800877 &timer_slack.attr,
Todd Poynor9fb15312012-04-23 20:42:41 -0700878 &boost.attr,
Todd Poynor2e739a02012-05-03 00:16:55 -0700879 &boostpulse.attr,
Todd Poynor29835472012-12-14 17:31:19 -0800880 &boostpulse_duration.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700881 NULL,
882};
883
884static struct attribute_group interactive_attr_group = {
885 .attrs = interactive_attributes,
886 .name = "interactive",
887};
888
Sam Lefflera04e4412012-06-27 10:12:04 -0700889static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
890 unsigned long val,
891 void *data)
892{
893 switch (val) {
894 case IDLE_START:
895 cpufreq_interactive_idle_start();
896 break;
897 case IDLE_END:
898 cpufreq_interactive_idle_end();
899 break;
900 }
901
902 return 0;
903}
904
905static struct notifier_block cpufreq_interactive_idle_nb = {
906 .notifier_call = cpufreq_interactive_idle_notifier,
907};
908
Mike Chan9d49b702010-06-22 11:26:45 -0700909static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
910 unsigned int event)
911{
912 int rc;
913 unsigned int j;
914 struct cpufreq_interactive_cpuinfo *pcpu;
915 struct cpufreq_frequency_table *freq_table;
916
917 switch (event) {
918 case CPUFREQ_GOV_START:
919 if (!cpu_online(policy->cpu))
920 return -EINVAL;
921
Lianwei Wang9f8d5372013-01-07 14:15:51 +0800922 mutex_lock(&gov_lock);
923
Mike Chan9d49b702010-06-22 11:26:45 -0700924 freq_table =
925 cpufreq_frequency_get_table(policy->cpu);
Todd Poynor1913e0f2012-11-05 13:09:03 -0800926 if (!hispeed_freq)
927 hispeed_freq = policy->max;
Mike Chan9d49b702010-06-22 11:26:45 -0700928
929 for_each_cpu(j, policy->cpus) {
Todd Poynor264e2912012-12-18 17:50:10 -0800930 unsigned long expires;
931
Mike Chan9d49b702010-06-22 11:26:45 -0700932 pcpu = &per_cpu(cpuinfo, j);
933 pcpu->policy = policy;
934 pcpu->target_freq = policy->cur;
935 pcpu->freq_table = freq_table;
Todd Poynoraad27322012-04-26 21:41:40 -0700936 pcpu->floor_freq = pcpu->target_freq;
937 pcpu->floor_validate_time =
Todd Poynorc9d53b32012-12-07 20:08:45 -0800938 ktime_to_us(ktime_get());
Todd Poynor5a5aa702012-05-10 23:28:06 -0700939 pcpu->hispeed_validate_time =
Todd Poynorc9d53b32012-12-07 20:08:45 -0800940 pcpu->floor_validate_time;
Todd Poynor2ca56472012-12-20 15:51:00 -0800941 down_write(&pcpu->enable_sem);
Todd Poynor264e2912012-12-18 17:50:10 -0800942 expires = jiffies + usecs_to_jiffies(timer_rate);
943 pcpu->cpu_timer.expires = expires;
Todd Poynor1913e0f2012-11-05 13:09:03 -0800944 add_timer_on(&pcpu->cpu_timer, j);
Todd Poynor264e2912012-12-18 17:50:10 -0800945 if (timer_slack_val >= 0) {
946 expires += usecs_to_jiffies(timer_slack_val);
947 pcpu->cpu_slack_timer.expires = expires;
948 add_timer_on(&pcpu->cpu_slack_timer, j);
949 }
Todd Poynor2ca56472012-12-20 15:51:00 -0800950 pcpu->governor_enabled = 1;
951 up_write(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700952 }
953
Mike Chan9d49b702010-06-22 11:26:45 -0700954 /*
955 * Do not register the idle hook and create sysfs
956 * entries if we have already done so.
957 */
Lianwei Wang9f8d5372013-01-07 14:15:51 +0800958 if (++active_count > 1) {
959 mutex_unlock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -0700960 return 0;
Lianwei Wang9f8d5372013-01-07 14:15:51 +0800961 }
Mike Chan9d49b702010-06-22 11:26:45 -0700962
963 rc = sysfs_create_group(cpufreq_global_kobject,
964 &interactive_attr_group);
Lianwei Wang9f8d5372013-01-07 14:15:51 +0800965 if (rc) {
966 mutex_unlock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -0700967 return rc;
Lianwei Wang9f8d5372013-01-07 14:15:51 +0800968 }
Mike Chan9d49b702010-06-22 11:26:45 -0700969
Sam Lefflera04e4412012-06-27 10:12:04 -0700970 idle_notifier_register(&cpufreq_interactive_idle_nb);
Todd Poynor07a9e292012-12-11 16:05:03 -0800971 cpufreq_register_notifier(
972 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Lianwei Wang9f8d5372013-01-07 14:15:51 +0800973 mutex_unlock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -0700974 break;
975
976 case CPUFREQ_GOV_STOP:
Lianwei Wang9f8d5372013-01-07 14:15:51 +0800977 mutex_lock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -0700978 for_each_cpu(j, policy->cpus) {
979 pcpu = &per_cpu(cpuinfo, j);
Todd Poynorbc819a22012-12-18 17:50:44 -0800980 down_write(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700981 pcpu->governor_enabled = 0;
Mike Chan9d49b702010-06-22 11:26:45 -0700982 del_timer_sync(&pcpu->cpu_timer);
Todd Poynor264e2912012-12-18 17:50:10 -0800983 del_timer_sync(&pcpu->cpu_slack_timer);
Todd Poynorbc819a22012-12-18 17:50:44 -0800984 up_write(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700985 }
986
Lianwei Wang9f8d5372013-01-07 14:15:51 +0800987 if (--active_count > 0) {
988 mutex_unlock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -0700989 return 0;
Lianwei Wang9f8d5372013-01-07 14:15:51 +0800990 }
Mike Chan9d49b702010-06-22 11:26:45 -0700991
Todd Poynor07a9e292012-12-11 16:05:03 -0800992 cpufreq_unregister_notifier(
993 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Sam Lefflera04e4412012-06-27 10:12:04 -0700994 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700995 sysfs_remove_group(cpufreq_global_kobject,
996 &interactive_attr_group);
Lianwei Wang9f8d5372013-01-07 14:15:51 +0800997 mutex_unlock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -0700998
999 break;
1000
1001 case CPUFREQ_GOV_LIMITS:
1002 if (policy->max < policy->cur)
1003 __cpufreq_driver_target(policy,
1004 policy->max, CPUFREQ_RELATION_H);
1005 else if (policy->min > policy->cur)
1006 __cpufreq_driver_target(policy,
1007 policy->min, CPUFREQ_RELATION_L);
1008 break;
1009 }
1010 return 0;
1011}
1012
Todd Poynor264e2912012-12-18 17:50:10 -08001013static void cpufreq_interactive_nop_timer(unsigned long data)
1014{
1015}
1016
Mike Chan9d49b702010-06-22 11:26:45 -07001017static int __init cpufreq_interactive_init(void)
1018{
1019 unsigned int i;
1020 struct cpufreq_interactive_cpuinfo *pcpu;
1021 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1022
Mike Chan9d49b702010-06-22 11:26:45 -07001023 /* Initalize per-cpu timers */
1024 for_each_possible_cpu(i) {
1025 pcpu = &per_cpu(cpuinfo, i);
Todd Poynor264e2912012-12-18 17:50:10 -08001026 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -07001027 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1028 pcpu->cpu_timer.data = i;
Todd Poynor264e2912012-12-18 17:50:10 -08001029 init_timer(&pcpu->cpu_slack_timer);
1030 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
Todd Poynor07a9e292012-12-11 16:05:03 -08001031 spin_lock_init(&pcpu->load_lock);
Todd Poynorbc819a22012-12-18 17:50:44 -08001032 init_rwsem(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -07001033 }
1034
Todd Poynor21df1ca2012-11-14 11:41:21 -08001035 spin_lock_init(&target_loads_lock);
Todd Poynor02442cf2012-07-16 17:07:15 -07001036 spin_lock_init(&speedchange_cpumask_lock);
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001037 mutex_init(&gov_lock);
Todd Poynor02442cf2012-07-16 17:07:15 -07001038 speedchange_task =
1039 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1040 "cfinteractive");
1041 if (IS_ERR(speedchange_task))
1042 return PTR_ERR(speedchange_task);
Sam Lefflera13f4152012-06-27 12:55:56 -07001043
Todd Poynor02442cf2012-07-16 17:07:15 -07001044 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1045 get_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -07001046
Sam Lefflera13f4152012-06-27 12:55:56 -07001047 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor02442cf2012-07-16 17:07:15 -07001048 wake_up_process(speedchange_task);
Sam Lefflera13f4152012-06-27 12:55:56 -07001049
Mike Chan9d49b702010-06-22 11:26:45 -07001050 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chan9d49b702010-06-22 11:26:45 -07001051}
1052
1053#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1054fs_initcall(cpufreq_interactive_init);
1055#else
1056module_init(cpufreq_interactive_init);
1057#endif
1058
1059static void __exit cpufreq_interactive_exit(void)
1060{
1061 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor02442cf2012-07-16 17:07:15 -07001062 kthread_stop(speedchange_task);
1063 put_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -07001064}
1065
1066module_exit(cpufreq_interactive_exit);
1067
1068MODULE_AUTHOR("Mike Chan <mike@android.com>");
1069MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1070 "Latency sensitive workloads");
1071MODULE_LICENSE("GPL");