blob: acbd4cd20fc3bcf56455f73b5f648acf64ab8694 [file] [log] [blame]
Mike Chan9d49b702010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
23#include <linux/mutex.h>
24#include <linux/sched.h>
25#include <linux/tick.h>
26#include <linux/time.h>
27#include <linux/timer.h>
28#include <linux/workqueue.h>
29#include <linux/kthread.h>
30#include <linux/mutex.h>
Todd Poynor7820a652012-04-02 17:17:14 -070031#include <linux/slab.h>
Todd Poynor9fb15312012-04-23 20:42:41 -070032#include <asm/cputime.h>
Mike Chan9d49b702010-06-22 11:26:45 -070033
Todd Poynora1e19512012-02-16 16:27:59 -080034#define CREATE_TRACE_POINTS
35#include <trace/events/cpufreq_interactive.h>
36
Mike Chan9d49b702010-06-22 11:26:45 -070037static atomic_t active_count = ATOMIC_INIT(0);
38
39struct cpufreq_interactive_cpuinfo {
40 struct timer_list cpu_timer;
41 int timer_idlecancel;
42 u64 time_in_idle;
43 u64 idle_exit_time;
44 u64 timer_run_time;
45 int idling;
Todd Poynor0a92d482012-04-06 19:59:36 -070046 u64 target_set_time;
47 u64 target_set_time_in_idle;
Mike Chan9d49b702010-06-22 11:26:45 -070048 struct cpufreq_policy *policy;
49 struct cpufreq_frequency_table *freq_table;
50 unsigned int target_freq;
Todd Poynoraad27322012-04-26 21:41:40 -070051 unsigned int floor_freq;
52 u64 floor_validate_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -070053 u64 hispeed_validate_time;
Mike Chan9d49b702010-06-22 11:26:45 -070054 int governor_enabled;
55};
56
57static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
58
Todd Poynor02442cf2012-07-16 17:07:15 -070059/* realtime thread handles frequency scaling */
60static struct task_struct *speedchange_task;
61static cpumask_t speedchange_cpumask;
62static spinlock_t speedchange_cpumask_lock;
Mike Chan9d49b702010-06-22 11:26:45 -070063
64/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynorf090ef02012-10-03 00:39:56 -070065static unsigned int hispeed_freq;
Mike Chan9d49b702010-06-22 11:26:45 -070066
67/* Go to hi speed when CPU load at or above this value. */
Todd Poynora0ec4362012-04-17 17:39:34 -070068#define DEFAULT_GO_HISPEED_LOAD 85
Mike Chan9d49b702010-06-22 11:26:45 -070069static unsigned long go_hispeed_load;
70
71/*
72 * The minimum amount of time to spend at a frequency before we can ramp down.
73 */
Todd Poynora0ec4362012-04-17 17:39:34 -070074#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070075static unsigned long min_sample_time;
76
77/*
78 * The sample rate of the timer used to increase frequency
79 */
Todd Poynora0ec4362012-04-17 17:39:34 -070080#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070081static unsigned long timer_rate;
82
Todd Poynor596cf1f2012-04-13 20:18:02 -070083/*
84 * Wait this long before raising speed above hispeed, by default a single
85 * timer interval.
86 */
87#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
88static unsigned long above_hispeed_delay_val;
89
Todd Poynor7820a652012-04-02 17:17:14 -070090/*
Todd Poynor9fb15312012-04-23 20:42:41 -070091 * Non-zero means longer-term speed boost active.
92 */
93
94static int boost_val;
95
Mike Chan9d49b702010-06-22 11:26:45 -070096static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
97 unsigned int event);
98
99#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
100static
101#endif
102struct cpufreq_governor cpufreq_gov_interactive = {
103 .name = "interactive",
104 .governor = cpufreq_governor_interactive,
105 .max_transition_latency = 10000000,
106 .owner = THIS_MODULE,
107};
108
109static void cpufreq_interactive_timer(unsigned long data)
110{
111 unsigned int delta_idle;
112 unsigned int delta_time;
113 int cpu_load;
114 int load_since_change;
115 u64 time_in_idle;
116 u64 idle_exit_time;
117 struct cpufreq_interactive_cpuinfo *pcpu =
118 &per_cpu(cpuinfo, data);
119 u64 now_idle;
120 unsigned int new_freq;
121 unsigned int index;
122 unsigned long flags;
123
124 smp_rmb();
125
126 if (!pcpu->governor_enabled)
127 goto exit;
128
129 /*
130 * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
131 * this lets idle exit know the current idle time sample has
132 * been processed, and idle exit can generate a new sample and
133 * re-arm the timer. This prevents a concurrent idle
134 * exit on that CPU from writing a new set of info at the same time
135 * the timer function runs (the timer function can't use that info
136 * until more time passes).
137 */
138 time_in_idle = pcpu->time_in_idle;
139 idle_exit_time = pcpu->idle_exit_time;
140 now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
141 smp_wmb();
142
143 /* If we raced with cancelling a timer, skip. */
144 if (!idle_exit_time)
145 goto exit;
146
147 delta_idle = (unsigned int)(now_idle - time_in_idle);
148 delta_time = (unsigned int)(pcpu->timer_run_time - idle_exit_time);
149
150 /*
151 * If timer ran less than 1ms after short-term sample started, retry.
152 */
153 if (delta_time < 1000)
154 goto rearm;
155
156 if (delta_idle > delta_time)
157 cpu_load = 0;
158 else
159 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
160
Todd Poynor0a92d482012-04-06 19:59:36 -0700161 delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
162 delta_time = (unsigned int)(pcpu->timer_run_time -
163 pcpu->target_set_time);
Mike Chan9d49b702010-06-22 11:26:45 -0700164
165 if ((delta_time == 0) || (delta_idle > delta_time))
166 load_since_change = 0;
167 else
168 load_since_change =
169 100 * (delta_time - delta_idle) / delta_time;
170
171 /*
172 * Choose greater of short-term load (since last idle timer
173 * started or timer function re-armed itself) or long-term load
174 * (since last frequency change).
175 */
176 if (load_since_change > cpu_load)
177 cpu_load = load_since_change;
178
Todd Poynor9fb15312012-04-23 20:42:41 -0700179 if (cpu_load >= go_hispeed_load || boost_val) {
Todd Poynor53676312012-09-24 18:03:58 -0700180 if (pcpu->target_freq < hispeed_freq &&
181 hispeed_freq < pcpu->policy->max) {
Mike Chan9d49b702010-06-22 11:26:45 -0700182 new_freq = hispeed_freq;
Todd Poynor8dc352c2012-04-06 19:50:12 -0700183 } else {
Mike Chan9d49b702010-06-22 11:26:45 -0700184 new_freq = pcpu->policy->max * cpu_load / 100;
Todd Poynor8dc352c2012-04-06 19:50:12 -0700185
186 if (new_freq < hispeed_freq)
187 new_freq = hispeed_freq;
Todd Poynor596cf1f2012-04-13 20:18:02 -0700188
189 if (pcpu->target_freq == hispeed_freq &&
190 new_freq > hispeed_freq &&
Todd Poynor5a5aa702012-05-10 23:28:06 -0700191 pcpu->timer_run_time - pcpu->hispeed_validate_time
Todd Poynor596cf1f2012-04-13 20:18:02 -0700192 < above_hispeed_delay_val) {
193 trace_cpufreq_interactive_notyet(data, cpu_load,
194 pcpu->target_freq,
195 new_freq);
196 goto rearm;
197 }
Todd Poynor8dc352c2012-04-06 19:50:12 -0700198 }
Mike Chan9d49b702010-06-22 11:26:45 -0700199 } else {
Todd Poynorf090ef02012-10-03 00:39:56 -0700200 new_freq = hispeed_freq * cpu_load / 100;
Mike Chan9d49b702010-06-22 11:26:45 -0700201 }
202
Todd Poynor5a5aa702012-05-10 23:28:06 -0700203 if (new_freq <= hispeed_freq)
204 pcpu->hispeed_validate_time = pcpu->timer_run_time;
205
Mike Chan9d49b702010-06-22 11:26:45 -0700206 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
207 new_freq, CPUFREQ_RELATION_H,
208 &index)) {
209 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
210 (int) data);
211 goto rearm;
212 }
213
214 new_freq = pcpu->freq_table[index].frequency;
215
Mike Chan9d49b702010-06-22 11:26:45 -0700216 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700217 * Do not scale below floor_freq unless we have been at or above the
218 * floor frequency for the minimum sample time since last validated.
Mike Chan9d49b702010-06-22 11:26:45 -0700219 */
Todd Poynoraad27322012-04-26 21:41:40 -0700220 if (new_freq < pcpu->floor_freq) {
John Stultz92967112012-05-01 14:10:31 -0700221 if (pcpu->timer_run_time - pcpu->floor_validate_time
Todd Poynora1e19512012-02-16 16:27:59 -0800222 < min_sample_time) {
223 trace_cpufreq_interactive_notyet(data, cpu_load,
224 pcpu->target_freq, new_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700225 goto rearm;
Todd Poynora1e19512012-02-16 16:27:59 -0800226 }
Mike Chan9d49b702010-06-22 11:26:45 -0700227 }
228
Todd Poynoraad27322012-04-26 21:41:40 -0700229 pcpu->floor_freq = new_freq;
230 pcpu->floor_validate_time = pcpu->timer_run_time;
Todd Poynor0a92d482012-04-06 19:59:36 -0700231
232 if (pcpu->target_freq == new_freq) {
233 trace_cpufreq_interactive_already(data, cpu_load,
234 pcpu->target_freq, new_freq);
235 goto rearm_if_notmax;
236 }
237
Todd Poynora1e19512012-02-16 16:27:59 -0800238 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
239 new_freq);
Todd Poynorbc699d82012-04-20 13:18:32 -0700240 pcpu->target_set_time_in_idle = now_idle;
241 pcpu->target_set_time = pcpu->timer_run_time;
Todd Poynora1e19512012-02-16 16:27:59 -0800242
Todd Poynor02442cf2012-07-16 17:07:15 -0700243 pcpu->target_freq = new_freq;
244 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
245 cpumask_set_cpu(data, &speedchange_cpumask);
246 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
247 wake_up_process(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700248
249rearm_if_notmax:
250 /*
251 * Already set max speed and don't see a need to change that,
252 * wait until next idle to re-evaluate, don't need timer.
253 */
254 if (pcpu->target_freq == pcpu->policy->max)
255 goto exit;
256
257rearm:
258 if (!timer_pending(&pcpu->cpu_timer)) {
259 /*
260 * If already at min: if that CPU is idle, don't set timer.
261 * Else cancel the timer if that CPU goes idle. We don't
262 * need to re-evaluate speed until the next idle exit.
263 */
264 if (pcpu->target_freq == pcpu->policy->min) {
265 smp_rmb();
266
267 if (pcpu->idling)
268 goto exit;
269
270 pcpu->timer_idlecancel = 1;
271 }
272
273 pcpu->time_in_idle = get_cpu_idle_time_us(
274 data, &pcpu->idle_exit_time);
275 mod_timer(&pcpu->cpu_timer,
276 jiffies + usecs_to_jiffies(timer_rate));
277 }
278
279exit:
280 return;
281}
282
283static void cpufreq_interactive_idle_start(void)
284{
285 struct cpufreq_interactive_cpuinfo *pcpu =
286 &per_cpu(cpuinfo, smp_processor_id());
287 int pending;
288
289 if (!pcpu->governor_enabled)
290 return;
291
292 pcpu->idling = 1;
293 smp_wmb();
294 pending = timer_pending(&pcpu->cpu_timer);
295
296 if (pcpu->target_freq != pcpu->policy->min) {
297#ifdef CONFIG_SMP
298 /*
299 * Entering idle while not at lowest speed. On some
300 * platforms this can hold the other CPU(s) at that speed
301 * even though the CPU is idle. Set a timer to re-evaluate
302 * speed so this idle CPU doesn't hold the other CPUs above
303 * min indefinitely. This should probably be a quirk of
304 * the CPUFreq driver.
305 */
306 if (!pending) {
307 pcpu->time_in_idle = get_cpu_idle_time_us(
308 smp_processor_id(), &pcpu->idle_exit_time);
309 pcpu->timer_idlecancel = 0;
310 mod_timer(&pcpu->cpu_timer,
311 jiffies + usecs_to_jiffies(timer_rate));
312 }
313#endif
314 } else {
315 /*
316 * If at min speed and entering idle after load has
317 * already been evaluated, and a timer has been set just in
318 * case the CPU suddenly goes busy, cancel that timer. The
319 * CPU didn't go busy; we'll recheck things upon idle exit.
320 */
321 if (pending && pcpu->timer_idlecancel) {
322 del_timer(&pcpu->cpu_timer);
323 /*
324 * Ensure last timer run time is after current idle
325 * sample start time, so next idle exit will always
326 * start a new idle sampling period.
327 */
328 pcpu->idle_exit_time = 0;
329 pcpu->timer_idlecancel = 0;
330 }
331 }
332
333}
334
335static void cpufreq_interactive_idle_end(void)
336{
337 struct cpufreq_interactive_cpuinfo *pcpu =
338 &per_cpu(cpuinfo, smp_processor_id());
339
Sam Lefflera04e4412012-06-27 10:12:04 -0700340 if (!pcpu->governor_enabled)
341 return;
342
Mike Chan9d49b702010-06-22 11:26:45 -0700343 pcpu->idling = 0;
344 smp_wmb();
345
346 /*
347 * Arm the timer for 1-2 ticks later if not already, and if the timer
348 * function has already processed the previous load sampling
349 * interval. (If the timer is not pending but has not processed
350 * the previous interval, it is probably racing with us on another
351 * CPU. Let it compute load based on the previous sample and then
352 * re-arm the timer for another interval when it's done, rather
353 * than updating the interval start time to be "now", which doesn't
354 * give the timer function enough time to make a decision on this
355 * run.)
356 */
357 if (timer_pending(&pcpu->cpu_timer) == 0 &&
358 pcpu->timer_run_time >= pcpu->idle_exit_time &&
359 pcpu->governor_enabled) {
360 pcpu->time_in_idle =
361 get_cpu_idle_time_us(smp_processor_id(),
362 &pcpu->idle_exit_time);
363 pcpu->timer_idlecancel = 0;
364 mod_timer(&pcpu->cpu_timer,
365 jiffies + usecs_to_jiffies(timer_rate));
366 }
367
368}
369
Todd Poynor02442cf2012-07-16 17:07:15 -0700370static int cpufreq_interactive_speedchange_task(void *data)
Mike Chan9d49b702010-06-22 11:26:45 -0700371{
372 unsigned int cpu;
373 cpumask_t tmp_mask;
374 unsigned long flags;
375 struct cpufreq_interactive_cpuinfo *pcpu;
376
377 while (1) {
378 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor02442cf2012-07-16 17:07:15 -0700379 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700380
Todd Poynor02442cf2012-07-16 17:07:15 -0700381 if (cpumask_empty(&speedchange_cpumask)) {
382 spin_unlock_irqrestore(&speedchange_cpumask_lock,
383 flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700384 schedule();
385
386 if (kthread_should_stop())
387 break;
388
Todd Poynor02442cf2012-07-16 17:07:15 -0700389 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700390 }
391
392 set_current_state(TASK_RUNNING);
Todd Poynor02442cf2012-07-16 17:07:15 -0700393 tmp_mask = speedchange_cpumask;
394 cpumask_clear(&speedchange_cpumask);
395 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700396
397 for_each_cpu(cpu, &tmp_mask) {
398 unsigned int j;
399 unsigned int max_freq = 0;
400
401 pcpu = &per_cpu(cpuinfo, cpu);
402 smp_rmb();
403
404 if (!pcpu->governor_enabled)
405 continue;
406
Mike Chan9d49b702010-06-22 11:26:45 -0700407 for_each_cpu(j, pcpu->policy->cpus) {
408 struct cpufreq_interactive_cpuinfo *pjcpu =
409 &per_cpu(cpuinfo, j);
410
411 if (pjcpu->target_freq > max_freq)
412 max_freq = pjcpu->target_freq;
413 }
414
415 if (max_freq != pcpu->policy->cur)
416 __cpufreq_driver_target(pcpu->policy,
417 max_freq,
418 CPUFREQ_RELATION_H);
Todd Poynor02442cf2012-07-16 17:07:15 -0700419 trace_cpufreq_interactive_setspeed(cpu,
420 pcpu->target_freq,
Todd Poynora1e19512012-02-16 16:27:59 -0800421 pcpu->policy->cur);
Mike Chan9d49b702010-06-22 11:26:45 -0700422 }
423 }
424
425 return 0;
426}
427
Todd Poynor7820a652012-04-02 17:17:14 -0700428static void cpufreq_interactive_boost(void)
429{
430 int i;
431 int anyboost = 0;
432 unsigned long flags;
433 struct cpufreq_interactive_cpuinfo *pcpu;
434
Todd Poynor02442cf2012-07-16 17:07:15 -0700435 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700436
437 for_each_online_cpu(i) {
438 pcpu = &per_cpu(cpuinfo, i);
439
440 if (pcpu->target_freq < hispeed_freq) {
441 pcpu->target_freq = hispeed_freq;
Todd Poynor02442cf2012-07-16 17:07:15 -0700442 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynor7820a652012-04-02 17:17:14 -0700443 pcpu->target_set_time_in_idle =
444 get_cpu_idle_time_us(i, &pcpu->target_set_time);
Todd Poynor5a5aa702012-05-10 23:28:06 -0700445 pcpu->hispeed_validate_time = pcpu->target_set_time;
Todd Poynor7820a652012-04-02 17:17:14 -0700446 anyboost = 1;
447 }
448
449 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700450 * Set floor freq and (re)start timer for when last
451 * validated.
Todd Poynor7820a652012-04-02 17:17:14 -0700452 */
453
Todd Poynoraad27322012-04-26 21:41:40 -0700454 pcpu->floor_freq = hispeed_freq;
455 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700456 }
457
Todd Poynor02442cf2012-07-16 17:07:15 -0700458 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700459
460 if (anyboost)
Todd Poynor02442cf2012-07-16 17:07:15 -0700461 wake_up_process(speedchange_task);
Todd Poynor7820a652012-04-02 17:17:14 -0700462}
463
Mike Chan9d49b702010-06-22 11:26:45 -0700464static ssize_t show_hispeed_freq(struct kobject *kobj,
465 struct attribute *attr, char *buf)
466{
Todd Poynorf090ef02012-10-03 00:39:56 -0700467 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700468}
469
470static ssize_t store_hispeed_freq(struct kobject *kobj,
471 struct attribute *attr, const char *buf,
472 size_t count)
473{
474 int ret;
Todd Poynorf090ef02012-10-03 00:39:56 -0700475 long unsigned int val;
Mike Chan9d49b702010-06-22 11:26:45 -0700476
Todd Poynorf090ef02012-10-03 00:39:56 -0700477 ret = strict_strtoul(buf, 0, &val);
Mike Chan9d49b702010-06-22 11:26:45 -0700478 if (ret < 0)
479 return ret;
480 hispeed_freq = val;
481 return count;
482}
483
484static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
485 show_hispeed_freq, store_hispeed_freq);
486
487
488static ssize_t show_go_hispeed_load(struct kobject *kobj,
489 struct attribute *attr, char *buf)
490{
491 return sprintf(buf, "%lu\n", go_hispeed_load);
492}
493
494static ssize_t store_go_hispeed_load(struct kobject *kobj,
495 struct attribute *attr, const char *buf, size_t count)
496{
497 int ret;
498 unsigned long val;
499
500 ret = strict_strtoul(buf, 0, &val);
501 if (ret < 0)
502 return ret;
503 go_hispeed_load = val;
504 return count;
505}
506
507static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
508 show_go_hispeed_load, store_go_hispeed_load);
509
510static ssize_t show_min_sample_time(struct kobject *kobj,
511 struct attribute *attr, char *buf)
512{
513 return sprintf(buf, "%lu\n", min_sample_time);
514}
515
516static ssize_t store_min_sample_time(struct kobject *kobj,
517 struct attribute *attr, const char *buf, size_t count)
518{
519 int ret;
520 unsigned long val;
521
522 ret = strict_strtoul(buf, 0, &val);
523 if (ret < 0)
524 return ret;
525 min_sample_time = val;
526 return count;
527}
528
529static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
530 show_min_sample_time, store_min_sample_time);
531
Todd Poynor596cf1f2012-04-13 20:18:02 -0700532static ssize_t show_above_hispeed_delay(struct kobject *kobj,
533 struct attribute *attr, char *buf)
534{
535 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
536}
537
538static ssize_t store_above_hispeed_delay(struct kobject *kobj,
539 struct attribute *attr,
540 const char *buf, size_t count)
541{
542 int ret;
543 unsigned long val;
544
545 ret = strict_strtoul(buf, 0, &val);
546 if (ret < 0)
547 return ret;
548 above_hispeed_delay_val = val;
549 return count;
550}
551
552define_one_global_rw(above_hispeed_delay);
553
Mike Chan9d49b702010-06-22 11:26:45 -0700554static ssize_t show_timer_rate(struct kobject *kobj,
555 struct attribute *attr, char *buf)
556{
557 return sprintf(buf, "%lu\n", timer_rate);
558}
559
560static ssize_t store_timer_rate(struct kobject *kobj,
561 struct attribute *attr, const char *buf, size_t count)
562{
563 int ret;
564 unsigned long val;
565
566 ret = strict_strtoul(buf, 0, &val);
567 if (ret < 0)
568 return ret;
569 timer_rate = val;
570 return count;
571}
572
573static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
574 show_timer_rate, store_timer_rate);
575
Todd Poynor9fb15312012-04-23 20:42:41 -0700576static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
577 char *buf)
578{
579 return sprintf(buf, "%d\n", boost_val);
580}
581
582static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
583 const char *buf, size_t count)
584{
585 int ret;
586 unsigned long val;
587
588 ret = kstrtoul(buf, 0, &val);
589 if (ret < 0)
590 return ret;
591
592 boost_val = val;
593
Todd Poynor2e739a02012-05-03 00:16:55 -0700594 if (boost_val) {
595 trace_cpufreq_interactive_boost("on");
Todd Poynor9fb15312012-04-23 20:42:41 -0700596 cpufreq_interactive_boost();
Todd Poynor2e739a02012-05-03 00:16:55 -0700597 } else {
598 trace_cpufreq_interactive_unboost("off");
599 }
Todd Poynor9fb15312012-04-23 20:42:41 -0700600
601 return count;
602}
603
604define_one_global_rw(boost);
605
Todd Poynor2e739a02012-05-03 00:16:55 -0700606static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
607 const char *buf, size_t count)
608{
609 int ret;
610 unsigned long val;
611
612 ret = kstrtoul(buf, 0, &val);
613 if (ret < 0)
614 return ret;
615
616 trace_cpufreq_interactive_boost("pulse");
617 cpufreq_interactive_boost();
618 return count;
619}
620
621static struct global_attr boostpulse =
622 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
623
Mike Chan9d49b702010-06-22 11:26:45 -0700624static struct attribute *interactive_attributes[] = {
625 &hispeed_freq_attr.attr,
626 &go_hispeed_load_attr.attr,
Todd Poynor596cf1f2012-04-13 20:18:02 -0700627 &above_hispeed_delay.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700628 &min_sample_time_attr.attr,
629 &timer_rate_attr.attr,
Todd Poynor9fb15312012-04-23 20:42:41 -0700630 &boost.attr,
Todd Poynor2e739a02012-05-03 00:16:55 -0700631 &boostpulse.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700632 NULL,
633};
634
635static struct attribute_group interactive_attr_group = {
636 .attrs = interactive_attributes,
637 .name = "interactive",
638};
639
Sam Lefflera04e4412012-06-27 10:12:04 -0700640static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
641 unsigned long val,
642 void *data)
643{
644 switch (val) {
645 case IDLE_START:
646 cpufreq_interactive_idle_start();
647 break;
648 case IDLE_END:
649 cpufreq_interactive_idle_end();
650 break;
651 }
652
653 return 0;
654}
655
656static struct notifier_block cpufreq_interactive_idle_nb = {
657 .notifier_call = cpufreq_interactive_idle_notifier,
658};
659
Mike Chan9d49b702010-06-22 11:26:45 -0700660static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
661 unsigned int event)
662{
663 int rc;
664 unsigned int j;
665 struct cpufreq_interactive_cpuinfo *pcpu;
666 struct cpufreq_frequency_table *freq_table;
667
668 switch (event) {
669 case CPUFREQ_GOV_START:
670 if (!cpu_online(policy->cpu))
671 return -EINVAL;
672
673 freq_table =
674 cpufreq_frequency_get_table(policy->cpu);
675
676 for_each_cpu(j, policy->cpus) {
677 pcpu = &per_cpu(cpuinfo, j);
678 pcpu->policy = policy;
679 pcpu->target_freq = policy->cur;
680 pcpu->freq_table = freq_table;
Todd Poynor0a92d482012-04-06 19:59:36 -0700681 pcpu->target_set_time_in_idle =
Mike Chan9d49b702010-06-22 11:26:45 -0700682 get_cpu_idle_time_us(j,
Todd Poynor0a92d482012-04-06 19:59:36 -0700683 &pcpu->target_set_time);
Todd Poynoraad27322012-04-26 21:41:40 -0700684 pcpu->floor_freq = pcpu->target_freq;
685 pcpu->floor_validate_time =
Todd Poynorbc699d82012-04-20 13:18:32 -0700686 pcpu->target_set_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700687 pcpu->hispeed_validate_time =
688 pcpu->target_set_time;
Mike Chan9d49b702010-06-22 11:26:45 -0700689 pcpu->governor_enabled = 1;
690 smp_wmb();
691 }
692
693 if (!hispeed_freq)
694 hispeed_freq = policy->max;
695
696 /*
697 * Do not register the idle hook and create sysfs
698 * entries if we have already done so.
699 */
700 if (atomic_inc_return(&active_count) > 1)
701 return 0;
702
703 rc = sysfs_create_group(cpufreq_global_kobject,
704 &interactive_attr_group);
705 if (rc)
706 return rc;
707
Sam Lefflera04e4412012-06-27 10:12:04 -0700708 idle_notifier_register(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700709 break;
710
711 case CPUFREQ_GOV_STOP:
712 for_each_cpu(j, policy->cpus) {
713 pcpu = &per_cpu(cpuinfo, j);
714 pcpu->governor_enabled = 0;
715 smp_wmb();
716 del_timer_sync(&pcpu->cpu_timer);
717
718 /*
719 * Reset idle exit time since we may cancel the timer
720 * before it can run after the last idle exit time,
721 * to avoid tripping the check in idle exit for a timer
722 * that is trying to run.
723 */
724 pcpu->idle_exit_time = 0;
725 }
726
Mike Chan9d49b702010-06-22 11:26:45 -0700727 if (atomic_dec_return(&active_count) > 0)
728 return 0;
729
Sam Lefflera04e4412012-06-27 10:12:04 -0700730 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700731 sysfs_remove_group(cpufreq_global_kobject,
732 &interactive_attr_group);
733
734 break;
735
736 case CPUFREQ_GOV_LIMITS:
737 if (policy->max < policy->cur)
738 __cpufreq_driver_target(policy,
739 policy->max, CPUFREQ_RELATION_H);
740 else if (policy->min > policy->cur)
741 __cpufreq_driver_target(policy,
742 policy->min, CPUFREQ_RELATION_L);
743 break;
744 }
745 return 0;
746}
747
Mike Chan9d49b702010-06-22 11:26:45 -0700748static int __init cpufreq_interactive_init(void)
749{
750 unsigned int i;
751 struct cpufreq_interactive_cpuinfo *pcpu;
752 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
753
754 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
755 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Todd Poynor596cf1f2012-04-13 20:18:02 -0700756 above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
Mike Chan9d49b702010-06-22 11:26:45 -0700757 timer_rate = DEFAULT_TIMER_RATE;
758
759 /* Initalize per-cpu timers */
760 for_each_possible_cpu(i) {
761 pcpu = &per_cpu(cpuinfo, i);
762 init_timer(&pcpu->cpu_timer);
763 pcpu->cpu_timer.function = cpufreq_interactive_timer;
764 pcpu->cpu_timer.data = i;
765 }
766
Todd Poynor02442cf2012-07-16 17:07:15 -0700767 spin_lock_init(&speedchange_cpumask_lock);
768 speedchange_task =
769 kthread_create(cpufreq_interactive_speedchange_task, NULL,
770 "cfinteractive");
771 if (IS_ERR(speedchange_task))
772 return PTR_ERR(speedchange_task);
Sam Lefflera13f4152012-06-27 12:55:56 -0700773
Todd Poynor02442cf2012-07-16 17:07:15 -0700774 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
775 get_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700776
Sam Lefflera13f4152012-06-27 12:55:56 -0700777 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor02442cf2012-07-16 17:07:15 -0700778 wake_up_process(speedchange_task);
Sam Lefflera13f4152012-06-27 12:55:56 -0700779
Mike Chan9d49b702010-06-22 11:26:45 -0700780 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chan9d49b702010-06-22 11:26:45 -0700781}
782
783#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
784fs_initcall(cpufreq_interactive_init);
785#else
786module_init(cpufreq_interactive_init);
787#endif
788
789static void __exit cpufreq_interactive_exit(void)
790{
791 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor02442cf2012-07-16 17:07:15 -0700792 kthread_stop(speedchange_task);
793 put_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700794}
795
796module_exit(cpufreq_interactive_exit);
797
798MODULE_AUTHOR("Mike Chan <mike@android.com>");
799MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
800 "Latency sensitive workloads");
801MODULE_LICENSE("GPL");