blob: 60d3057d87ab8bc168b738019fca649bc98bbd06 [file] [log] [blame]
Mike Chan9d49b702010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd4edd5d2012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Mike Chan9d49b702010-06-22 11:26:45 -070024#include <linux/mutex.h>
25#include <linux/sched.h>
26#include <linux/tick.h>
27#include <linux/time.h>
28#include <linux/timer.h>
29#include <linux/workqueue.h>
30#include <linux/kthread.h>
31#include <linux/mutex.h>
Todd Poynor7820a652012-04-02 17:17:14 -070032#include <linux/slab.h>
Todd Poynor9fb15312012-04-23 20:42:41 -070033#include <asm/cputime.h>
Mike Chan9d49b702010-06-22 11:26:45 -070034
Todd Poynora1e19512012-02-16 16:27:59 -080035#define CREATE_TRACE_POINTS
36#include <trace/events/cpufreq_interactive.h>
37
Mike Chan9d49b702010-06-22 11:26:45 -070038static atomic_t active_count = ATOMIC_INIT(0);
39
40struct cpufreq_interactive_cpuinfo {
41 struct timer_list cpu_timer;
42 int timer_idlecancel;
43 u64 time_in_idle;
Todd Poynorae7f28c2012-10-08 20:14:34 -070044 u64 time_in_idle_timestamp;
Todd Poynor0a92d482012-04-06 19:59:36 -070045 u64 target_set_time;
46 u64 target_set_time_in_idle;
Mike Chan9d49b702010-06-22 11:26:45 -070047 struct cpufreq_policy *policy;
48 struct cpufreq_frequency_table *freq_table;
49 unsigned int target_freq;
Todd Poynoraad27322012-04-26 21:41:40 -070050 unsigned int floor_freq;
51 u64 floor_validate_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -070052 u64 hispeed_validate_time;
Mike Chan9d49b702010-06-22 11:26:45 -070053 int governor_enabled;
54};
55
56static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
57
Todd Poynor02442cf2012-07-16 17:07:15 -070058/* realtime thread handles frequency scaling */
59static struct task_struct *speedchange_task;
60static cpumask_t speedchange_cpumask;
61static spinlock_t speedchange_cpumask_lock;
Mike Chan9d49b702010-06-22 11:26:45 -070062
63/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynorf090ef02012-10-03 00:39:56 -070064static unsigned int hispeed_freq;
Mike Chan9d49b702010-06-22 11:26:45 -070065
66/* Go to hi speed when CPU load at or above this value. */
Todd Poynora0ec4362012-04-17 17:39:34 -070067#define DEFAULT_GO_HISPEED_LOAD 85
Mike Chan9d49b702010-06-22 11:26:45 -070068static unsigned long go_hispeed_load;
69
70/*
71 * The minimum amount of time to spend at a frequency before we can ramp down.
72 */
Todd Poynora0ec4362012-04-17 17:39:34 -070073#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070074static unsigned long min_sample_time;
75
76/*
77 * The sample rate of the timer used to increase frequency
78 */
Todd Poynora0ec4362012-04-17 17:39:34 -070079#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070080static unsigned long timer_rate;
81
Todd Poynor596cf1f2012-04-13 20:18:02 -070082/*
83 * Wait this long before raising speed above hispeed, by default a single
84 * timer interval.
85 */
86#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
87static unsigned long above_hispeed_delay_val;
88
Todd Poynor7820a652012-04-02 17:17:14 -070089/*
Todd Poynor9fb15312012-04-23 20:42:41 -070090 * Non-zero means longer-term speed boost active.
91 */
92
93static int boost_val;
94
Lianwei Wangd4edd5d2012-11-01 09:59:52 +080095static bool governidle;
96module_param(governidle, bool, S_IWUSR | S_IRUGO);
97MODULE_PARM_DESC(governidle,
98 "Set to 1 to wake up CPUs from idle to reduce speed (default 0)");
99
Mike Chan9d49b702010-06-22 11:26:45 -0700100static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
101 unsigned int event);
102
103#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
104static
105#endif
106struct cpufreq_governor cpufreq_gov_interactive = {
107 .name = "interactive",
108 .governor = cpufreq_governor_interactive,
109 .max_transition_latency = 10000000,
110 .owner = THIS_MODULE,
111};
112
Todd Poynorae7f28c2012-10-08 20:14:34 -0700113static void cpufreq_interactive_timer_resched(
114 struct cpufreq_interactive_cpuinfo *pcpu)
115{
116 mod_timer_pinned(&pcpu->cpu_timer,
117 jiffies + usecs_to_jiffies(timer_rate));
118 pcpu->time_in_idle =
119 get_cpu_idle_time_us(smp_processor_id(),
120 &pcpu->time_in_idle_timestamp);
121}
122
Mike Chan9d49b702010-06-22 11:26:45 -0700123static void cpufreq_interactive_timer(unsigned long data)
124{
Todd Poynor1913e0f2012-11-05 13:09:03 -0800125 u64 now;
Mike Chan9d49b702010-06-22 11:26:45 -0700126 unsigned int delta_idle;
127 unsigned int delta_time;
128 int cpu_load;
129 int load_since_change;
Mike Chan9d49b702010-06-22 11:26:45 -0700130 struct cpufreq_interactive_cpuinfo *pcpu =
131 &per_cpu(cpuinfo, data);
132 u64 now_idle;
133 unsigned int new_freq;
134 unsigned int index;
135 unsigned long flags;
136
137 smp_rmb();
138
139 if (!pcpu->governor_enabled)
140 goto exit;
141
Todd Poynor1913e0f2012-11-05 13:09:03 -0800142 now_idle = get_cpu_idle_time_us(data, &now);
Todd Poynorae7f28c2012-10-08 20:14:34 -0700143 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
144 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
Mike Chan9d49b702010-06-22 11:26:45 -0700145
146 /*
147 * If timer ran less than 1ms after short-term sample started, retry.
148 */
149 if (delta_time < 1000)
150 goto rearm;
151
152 if (delta_idle > delta_time)
153 cpu_load = 0;
154 else
155 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
156
Todd Poynor0a92d482012-04-06 19:59:36 -0700157 delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
Todd Poynor1913e0f2012-11-05 13:09:03 -0800158 delta_time = (unsigned int)(now - pcpu->target_set_time);
Mike Chan9d49b702010-06-22 11:26:45 -0700159
160 if ((delta_time == 0) || (delta_idle > delta_time))
161 load_since_change = 0;
162 else
163 load_since_change =
164 100 * (delta_time - delta_idle) / delta_time;
165
166 /*
167 * Choose greater of short-term load (since last idle timer
168 * started or timer function re-armed itself) or long-term load
169 * (since last frequency change).
170 */
171 if (load_since_change > cpu_load)
172 cpu_load = load_since_change;
173
Todd Poynor9fb15312012-04-23 20:42:41 -0700174 if (cpu_load >= go_hispeed_load || boost_val) {
Todd Poynor53676312012-09-24 18:03:58 -0700175 if (pcpu->target_freq < hispeed_freq &&
176 hispeed_freq < pcpu->policy->max) {
Mike Chan9d49b702010-06-22 11:26:45 -0700177 new_freq = hispeed_freq;
Todd Poynor8dc352c2012-04-06 19:50:12 -0700178 } else {
Mike Chan9d49b702010-06-22 11:26:45 -0700179 new_freq = pcpu->policy->max * cpu_load / 100;
Todd Poynor8dc352c2012-04-06 19:50:12 -0700180
181 if (new_freq < hispeed_freq)
182 new_freq = hispeed_freq;
Todd Poynor596cf1f2012-04-13 20:18:02 -0700183
184 if (pcpu->target_freq == hispeed_freq &&
185 new_freq > hispeed_freq &&
Todd Poynor1913e0f2012-11-05 13:09:03 -0800186 now - pcpu->hispeed_validate_time
Todd Poynor596cf1f2012-04-13 20:18:02 -0700187 < above_hispeed_delay_val) {
Todd Poynor27f7b8e2012-11-28 17:56:09 -0800188 trace_cpufreq_interactive_notyet(
189 data, cpu_load, pcpu->target_freq,
190 pcpu->policy->cur, new_freq);
Todd Poynor596cf1f2012-04-13 20:18:02 -0700191 goto rearm;
192 }
Todd Poynor8dc352c2012-04-06 19:50:12 -0700193 }
Mike Chan9d49b702010-06-22 11:26:45 -0700194 } else {
Todd Poynorf090ef02012-10-03 00:39:56 -0700195 new_freq = hispeed_freq * cpu_load / 100;
Mike Chan9d49b702010-06-22 11:26:45 -0700196 }
197
Todd Poynor5a5aa702012-05-10 23:28:06 -0700198 if (new_freq <= hispeed_freq)
Todd Poynor1913e0f2012-11-05 13:09:03 -0800199 pcpu->hispeed_validate_time = now;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700200
Mike Chan9d49b702010-06-22 11:26:45 -0700201 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
202 new_freq, CPUFREQ_RELATION_H,
203 &index)) {
204 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
205 (int) data);
206 goto rearm;
207 }
208
209 new_freq = pcpu->freq_table[index].frequency;
210
Mike Chan9d49b702010-06-22 11:26:45 -0700211 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700212 * Do not scale below floor_freq unless we have been at or above the
213 * floor frequency for the minimum sample time since last validated.
Mike Chan9d49b702010-06-22 11:26:45 -0700214 */
Todd Poynoraad27322012-04-26 21:41:40 -0700215 if (new_freq < pcpu->floor_freq) {
Todd Poynor1913e0f2012-11-05 13:09:03 -0800216 if (now - pcpu->floor_validate_time < min_sample_time) {
Todd Poynor27f7b8e2012-11-28 17:56:09 -0800217 trace_cpufreq_interactive_notyet(
218 data, cpu_load, pcpu->target_freq,
219 pcpu->policy->cur, new_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700220 goto rearm;
Todd Poynora1e19512012-02-16 16:27:59 -0800221 }
Mike Chan9d49b702010-06-22 11:26:45 -0700222 }
223
Todd Poynoraad27322012-04-26 21:41:40 -0700224 pcpu->floor_freq = new_freq;
Todd Poynor1913e0f2012-11-05 13:09:03 -0800225 pcpu->floor_validate_time = now;
Todd Poynor0a92d482012-04-06 19:59:36 -0700226
227 if (pcpu->target_freq == new_freq) {
Todd Poynor27f7b8e2012-11-28 17:56:09 -0800228 trace_cpufreq_interactive_already(
229 data, cpu_load, pcpu->target_freq,
230 pcpu->policy->cur, new_freq);
Todd Poynor0a92d482012-04-06 19:59:36 -0700231 goto rearm_if_notmax;
232 }
233
Todd Poynora1e19512012-02-16 16:27:59 -0800234 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynor27f7b8e2012-11-28 17:56:09 -0800235 pcpu->policy->cur, new_freq);
Todd Poynorbc699d82012-04-20 13:18:32 -0700236 pcpu->target_set_time_in_idle = now_idle;
Todd Poynor1913e0f2012-11-05 13:09:03 -0800237 pcpu->target_set_time = now;
Todd Poynora1e19512012-02-16 16:27:59 -0800238
Todd Poynor02442cf2012-07-16 17:07:15 -0700239 pcpu->target_freq = new_freq;
240 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
241 cpumask_set_cpu(data, &speedchange_cpumask);
242 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
243 wake_up_process(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700244
245rearm_if_notmax:
246 /*
247 * Already set max speed and don't see a need to change that,
248 * wait until next idle to re-evaluate, don't need timer.
249 */
250 if (pcpu->target_freq == pcpu->policy->max)
251 goto exit;
252
253rearm:
254 if (!timer_pending(&pcpu->cpu_timer)) {
255 /*
Lianwei Wangd4edd5d2012-11-01 09:59:52 +0800256 * If governing speed in idle and already at min, cancel the
257 * timer if that CPU goes idle. We don't need to re-evaluate
258 * speed until the next idle exit.
Mike Chan9d49b702010-06-22 11:26:45 -0700259 */
Lianwei Wangd4edd5d2012-11-01 09:59:52 +0800260 if (governidle && pcpu->target_freq == pcpu->policy->min)
Mike Chan9d49b702010-06-22 11:26:45 -0700261 pcpu->timer_idlecancel = 1;
Mike Chan9d49b702010-06-22 11:26:45 -0700262
Todd Poynorae7f28c2012-10-08 20:14:34 -0700263 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700264 }
265
266exit:
267 return;
268}
269
270static void cpufreq_interactive_idle_start(void)
271{
272 struct cpufreq_interactive_cpuinfo *pcpu =
273 &per_cpu(cpuinfo, smp_processor_id());
274 int pending;
275
276 if (!pcpu->governor_enabled)
277 return;
278
Mike Chan9d49b702010-06-22 11:26:45 -0700279 pending = timer_pending(&pcpu->cpu_timer);
280
281 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chan9d49b702010-06-22 11:26:45 -0700282 /*
283 * Entering idle while not at lowest speed. On some
284 * platforms this can hold the other CPU(s) at that speed
285 * even though the CPU is idle. Set a timer to re-evaluate
286 * speed so this idle CPU doesn't hold the other CPUs above
287 * min indefinitely. This should probably be a quirk of
288 * the CPUFreq driver.
289 */
290 if (!pending) {
Mike Chan9d49b702010-06-22 11:26:45 -0700291 pcpu->timer_idlecancel = 0;
Todd Poynorae7f28c2012-10-08 20:14:34 -0700292 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700293 }
Lianwei Wangd4edd5d2012-11-01 09:59:52 +0800294 } else if (governidle) {
Mike Chan9d49b702010-06-22 11:26:45 -0700295 /*
296 * If at min speed and entering idle after load has
297 * already been evaluated, and a timer has been set just in
298 * case the CPU suddenly goes busy, cancel that timer. The
299 * CPU didn't go busy; we'll recheck things upon idle exit.
300 */
301 if (pending && pcpu->timer_idlecancel) {
302 del_timer(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700303 pcpu->timer_idlecancel = 0;
304 }
305 }
306
307}
308
309static void cpufreq_interactive_idle_end(void)
310{
311 struct cpufreq_interactive_cpuinfo *pcpu =
312 &per_cpu(cpuinfo, smp_processor_id());
313
Sam Lefflera04e4412012-06-27 10:12:04 -0700314 if (!pcpu->governor_enabled)
315 return;
316
Todd Poynor1913e0f2012-11-05 13:09:03 -0800317 /* Arm the timer for 1-2 ticks later if not already. */
318 if (!timer_pending(&pcpu->cpu_timer)) {
Mike Chan9d49b702010-06-22 11:26:45 -0700319 pcpu->timer_idlecancel = 0;
Todd Poynorae7f28c2012-10-08 20:14:34 -0700320 cpufreq_interactive_timer_resched(pcpu);
321 } else if (!governidle &&
322 time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
323 del_timer(&pcpu->cpu_timer);
324 cpufreq_interactive_timer(smp_processor_id());
Mike Chan9d49b702010-06-22 11:26:45 -0700325 }
Mike Chan9d49b702010-06-22 11:26:45 -0700326}
327
Todd Poynor02442cf2012-07-16 17:07:15 -0700328static int cpufreq_interactive_speedchange_task(void *data)
Mike Chan9d49b702010-06-22 11:26:45 -0700329{
330 unsigned int cpu;
331 cpumask_t tmp_mask;
332 unsigned long flags;
333 struct cpufreq_interactive_cpuinfo *pcpu;
334
335 while (1) {
336 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor02442cf2012-07-16 17:07:15 -0700337 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700338
Todd Poynor02442cf2012-07-16 17:07:15 -0700339 if (cpumask_empty(&speedchange_cpumask)) {
340 spin_unlock_irqrestore(&speedchange_cpumask_lock,
341 flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700342 schedule();
343
344 if (kthread_should_stop())
345 break;
346
Todd Poynor02442cf2012-07-16 17:07:15 -0700347 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700348 }
349
350 set_current_state(TASK_RUNNING);
Todd Poynor02442cf2012-07-16 17:07:15 -0700351 tmp_mask = speedchange_cpumask;
352 cpumask_clear(&speedchange_cpumask);
353 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700354
355 for_each_cpu(cpu, &tmp_mask) {
356 unsigned int j;
357 unsigned int max_freq = 0;
358
359 pcpu = &per_cpu(cpuinfo, cpu);
360 smp_rmb();
361
362 if (!pcpu->governor_enabled)
363 continue;
364
Mike Chan9d49b702010-06-22 11:26:45 -0700365 for_each_cpu(j, pcpu->policy->cpus) {
366 struct cpufreq_interactive_cpuinfo *pjcpu =
367 &per_cpu(cpuinfo, j);
368
369 if (pjcpu->target_freq > max_freq)
370 max_freq = pjcpu->target_freq;
371 }
372
373 if (max_freq != pcpu->policy->cur)
374 __cpufreq_driver_target(pcpu->policy,
375 max_freq,
376 CPUFREQ_RELATION_H);
Todd Poynor02442cf2012-07-16 17:07:15 -0700377 trace_cpufreq_interactive_setspeed(cpu,
378 pcpu->target_freq,
Todd Poynora1e19512012-02-16 16:27:59 -0800379 pcpu->policy->cur);
Mike Chan9d49b702010-06-22 11:26:45 -0700380 }
381 }
382
383 return 0;
384}
385
Todd Poynor7820a652012-04-02 17:17:14 -0700386static void cpufreq_interactive_boost(void)
387{
388 int i;
389 int anyboost = 0;
390 unsigned long flags;
391 struct cpufreq_interactive_cpuinfo *pcpu;
392
Todd Poynor02442cf2012-07-16 17:07:15 -0700393 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700394
395 for_each_online_cpu(i) {
396 pcpu = &per_cpu(cpuinfo, i);
397
398 if (pcpu->target_freq < hispeed_freq) {
399 pcpu->target_freq = hispeed_freq;
Todd Poynor02442cf2012-07-16 17:07:15 -0700400 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynor7820a652012-04-02 17:17:14 -0700401 pcpu->target_set_time_in_idle =
402 get_cpu_idle_time_us(i, &pcpu->target_set_time);
Todd Poynor5a5aa702012-05-10 23:28:06 -0700403 pcpu->hispeed_validate_time = pcpu->target_set_time;
Todd Poynor7820a652012-04-02 17:17:14 -0700404 anyboost = 1;
405 }
406
407 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700408 * Set floor freq and (re)start timer for when last
409 * validated.
Todd Poynor7820a652012-04-02 17:17:14 -0700410 */
411
Todd Poynoraad27322012-04-26 21:41:40 -0700412 pcpu->floor_freq = hispeed_freq;
413 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700414 }
415
Todd Poynor02442cf2012-07-16 17:07:15 -0700416 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700417
418 if (anyboost)
Todd Poynor02442cf2012-07-16 17:07:15 -0700419 wake_up_process(speedchange_task);
Todd Poynor7820a652012-04-02 17:17:14 -0700420}
421
Mike Chan9d49b702010-06-22 11:26:45 -0700422static ssize_t show_hispeed_freq(struct kobject *kobj,
423 struct attribute *attr, char *buf)
424{
Todd Poynorf090ef02012-10-03 00:39:56 -0700425 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700426}
427
428static ssize_t store_hispeed_freq(struct kobject *kobj,
429 struct attribute *attr, const char *buf,
430 size_t count)
431{
432 int ret;
Todd Poynorf090ef02012-10-03 00:39:56 -0700433 long unsigned int val;
Mike Chan9d49b702010-06-22 11:26:45 -0700434
Todd Poynorf090ef02012-10-03 00:39:56 -0700435 ret = strict_strtoul(buf, 0, &val);
Mike Chan9d49b702010-06-22 11:26:45 -0700436 if (ret < 0)
437 return ret;
438 hispeed_freq = val;
439 return count;
440}
441
442static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
443 show_hispeed_freq, store_hispeed_freq);
444
445
446static ssize_t show_go_hispeed_load(struct kobject *kobj,
447 struct attribute *attr, char *buf)
448{
449 return sprintf(buf, "%lu\n", go_hispeed_load);
450}
451
452static ssize_t store_go_hispeed_load(struct kobject *kobj,
453 struct attribute *attr, const char *buf, size_t count)
454{
455 int ret;
456 unsigned long val;
457
458 ret = strict_strtoul(buf, 0, &val);
459 if (ret < 0)
460 return ret;
461 go_hispeed_load = val;
462 return count;
463}
464
465static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
466 show_go_hispeed_load, store_go_hispeed_load);
467
468static ssize_t show_min_sample_time(struct kobject *kobj,
469 struct attribute *attr, char *buf)
470{
471 return sprintf(buf, "%lu\n", min_sample_time);
472}
473
474static ssize_t store_min_sample_time(struct kobject *kobj,
475 struct attribute *attr, const char *buf, size_t count)
476{
477 int ret;
478 unsigned long val;
479
480 ret = strict_strtoul(buf, 0, &val);
481 if (ret < 0)
482 return ret;
483 min_sample_time = val;
484 return count;
485}
486
487static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
488 show_min_sample_time, store_min_sample_time);
489
Todd Poynor596cf1f2012-04-13 20:18:02 -0700490static ssize_t show_above_hispeed_delay(struct kobject *kobj,
491 struct attribute *attr, char *buf)
492{
493 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
494}
495
496static ssize_t store_above_hispeed_delay(struct kobject *kobj,
497 struct attribute *attr,
498 const char *buf, size_t count)
499{
500 int ret;
501 unsigned long val;
502
503 ret = strict_strtoul(buf, 0, &val);
504 if (ret < 0)
505 return ret;
506 above_hispeed_delay_val = val;
507 return count;
508}
509
510define_one_global_rw(above_hispeed_delay);
511
Mike Chan9d49b702010-06-22 11:26:45 -0700512static ssize_t show_timer_rate(struct kobject *kobj,
513 struct attribute *attr, char *buf)
514{
515 return sprintf(buf, "%lu\n", timer_rate);
516}
517
518static ssize_t store_timer_rate(struct kobject *kobj,
519 struct attribute *attr, const char *buf, size_t count)
520{
521 int ret;
522 unsigned long val;
523
524 ret = strict_strtoul(buf, 0, &val);
525 if (ret < 0)
526 return ret;
527 timer_rate = val;
528 return count;
529}
530
531static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
532 show_timer_rate, store_timer_rate);
533
Todd Poynor9fb15312012-04-23 20:42:41 -0700534static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
535 char *buf)
536{
537 return sprintf(buf, "%d\n", boost_val);
538}
539
540static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
541 const char *buf, size_t count)
542{
543 int ret;
544 unsigned long val;
545
546 ret = kstrtoul(buf, 0, &val);
547 if (ret < 0)
548 return ret;
549
550 boost_val = val;
551
Todd Poynor2e739a02012-05-03 00:16:55 -0700552 if (boost_val) {
553 trace_cpufreq_interactive_boost("on");
Todd Poynor9fb15312012-04-23 20:42:41 -0700554 cpufreq_interactive_boost();
Todd Poynor2e739a02012-05-03 00:16:55 -0700555 } else {
556 trace_cpufreq_interactive_unboost("off");
557 }
Todd Poynor9fb15312012-04-23 20:42:41 -0700558
559 return count;
560}
561
562define_one_global_rw(boost);
563
Todd Poynor2e739a02012-05-03 00:16:55 -0700564static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
565 const char *buf, size_t count)
566{
567 int ret;
568 unsigned long val;
569
570 ret = kstrtoul(buf, 0, &val);
571 if (ret < 0)
572 return ret;
573
574 trace_cpufreq_interactive_boost("pulse");
575 cpufreq_interactive_boost();
576 return count;
577}
578
579static struct global_attr boostpulse =
580 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
581
Mike Chan9d49b702010-06-22 11:26:45 -0700582static struct attribute *interactive_attributes[] = {
583 &hispeed_freq_attr.attr,
584 &go_hispeed_load_attr.attr,
Todd Poynor596cf1f2012-04-13 20:18:02 -0700585 &above_hispeed_delay.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700586 &min_sample_time_attr.attr,
587 &timer_rate_attr.attr,
Todd Poynor9fb15312012-04-23 20:42:41 -0700588 &boost.attr,
Todd Poynor2e739a02012-05-03 00:16:55 -0700589 &boostpulse.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700590 NULL,
591};
592
593static struct attribute_group interactive_attr_group = {
594 .attrs = interactive_attributes,
595 .name = "interactive",
596};
597
Sam Lefflera04e4412012-06-27 10:12:04 -0700598static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
599 unsigned long val,
600 void *data)
601{
602 switch (val) {
603 case IDLE_START:
604 cpufreq_interactive_idle_start();
605 break;
606 case IDLE_END:
607 cpufreq_interactive_idle_end();
608 break;
609 }
610
611 return 0;
612}
613
614static struct notifier_block cpufreq_interactive_idle_nb = {
615 .notifier_call = cpufreq_interactive_idle_notifier,
616};
617
Mike Chan9d49b702010-06-22 11:26:45 -0700618static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
619 unsigned int event)
620{
621 int rc;
622 unsigned int j;
623 struct cpufreq_interactive_cpuinfo *pcpu;
624 struct cpufreq_frequency_table *freq_table;
625
626 switch (event) {
627 case CPUFREQ_GOV_START:
628 if (!cpu_online(policy->cpu))
629 return -EINVAL;
630
631 freq_table =
632 cpufreq_frequency_get_table(policy->cpu);
Todd Poynor1913e0f2012-11-05 13:09:03 -0800633 if (!hispeed_freq)
634 hispeed_freq = policy->max;
Mike Chan9d49b702010-06-22 11:26:45 -0700635
636 for_each_cpu(j, policy->cpus) {
637 pcpu = &per_cpu(cpuinfo, j);
638 pcpu->policy = policy;
639 pcpu->target_freq = policy->cur;
640 pcpu->freq_table = freq_table;
Todd Poynor0a92d482012-04-06 19:59:36 -0700641 pcpu->target_set_time_in_idle =
Mike Chan9d49b702010-06-22 11:26:45 -0700642 get_cpu_idle_time_us(j,
Todd Poynor0a92d482012-04-06 19:59:36 -0700643 &pcpu->target_set_time);
Todd Poynoraad27322012-04-26 21:41:40 -0700644 pcpu->floor_freq = pcpu->target_freq;
645 pcpu->floor_validate_time =
Todd Poynorbc699d82012-04-20 13:18:32 -0700646 pcpu->target_set_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700647 pcpu->hispeed_validate_time =
648 pcpu->target_set_time;
Mike Chan9d49b702010-06-22 11:26:45 -0700649 pcpu->governor_enabled = 1;
650 smp_wmb();
Todd Poynor1913e0f2012-11-05 13:09:03 -0800651 pcpu->cpu_timer.expires =
652 jiffies + usecs_to_jiffies(timer_rate);
653 add_timer_on(&pcpu->cpu_timer, j);
Mike Chan9d49b702010-06-22 11:26:45 -0700654 }
655
Mike Chan9d49b702010-06-22 11:26:45 -0700656 /*
657 * Do not register the idle hook and create sysfs
658 * entries if we have already done so.
659 */
660 if (atomic_inc_return(&active_count) > 1)
661 return 0;
662
663 rc = sysfs_create_group(cpufreq_global_kobject,
664 &interactive_attr_group);
665 if (rc)
666 return rc;
667
Sam Lefflera04e4412012-06-27 10:12:04 -0700668 idle_notifier_register(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700669 break;
670
671 case CPUFREQ_GOV_STOP:
672 for_each_cpu(j, policy->cpus) {
673 pcpu = &per_cpu(cpuinfo, j);
674 pcpu->governor_enabled = 0;
675 smp_wmb();
676 del_timer_sync(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700677 }
678
Mike Chan9d49b702010-06-22 11:26:45 -0700679 if (atomic_dec_return(&active_count) > 0)
680 return 0;
681
Sam Lefflera04e4412012-06-27 10:12:04 -0700682 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700683 sysfs_remove_group(cpufreq_global_kobject,
684 &interactive_attr_group);
685
686 break;
687
688 case CPUFREQ_GOV_LIMITS:
689 if (policy->max < policy->cur)
690 __cpufreq_driver_target(policy,
691 policy->max, CPUFREQ_RELATION_H);
692 else if (policy->min > policy->cur)
693 __cpufreq_driver_target(policy,
694 policy->min, CPUFREQ_RELATION_L);
695 break;
696 }
697 return 0;
698}
699
Mike Chan9d49b702010-06-22 11:26:45 -0700700static int __init cpufreq_interactive_init(void)
701{
702 unsigned int i;
703 struct cpufreq_interactive_cpuinfo *pcpu;
704 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
705
706 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
707 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Todd Poynor596cf1f2012-04-13 20:18:02 -0700708 above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
Mike Chan9d49b702010-06-22 11:26:45 -0700709 timer_rate = DEFAULT_TIMER_RATE;
710
711 /* Initalize per-cpu timers */
712 for_each_possible_cpu(i) {
713 pcpu = &per_cpu(cpuinfo, i);
Lianwei Wangd4edd5d2012-11-01 09:59:52 +0800714 if (governidle)
715 init_timer(&pcpu->cpu_timer);
716 else
717 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700718 pcpu->cpu_timer.function = cpufreq_interactive_timer;
719 pcpu->cpu_timer.data = i;
720 }
721
Todd Poynor02442cf2012-07-16 17:07:15 -0700722 spin_lock_init(&speedchange_cpumask_lock);
723 speedchange_task =
724 kthread_create(cpufreq_interactive_speedchange_task, NULL,
725 "cfinteractive");
726 if (IS_ERR(speedchange_task))
727 return PTR_ERR(speedchange_task);
Sam Lefflera13f4152012-06-27 12:55:56 -0700728
Todd Poynor02442cf2012-07-16 17:07:15 -0700729 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
730 get_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700731
Sam Lefflera13f4152012-06-27 12:55:56 -0700732 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor02442cf2012-07-16 17:07:15 -0700733 wake_up_process(speedchange_task);
Sam Lefflera13f4152012-06-27 12:55:56 -0700734
Mike Chan9d49b702010-06-22 11:26:45 -0700735 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chan9d49b702010-06-22 11:26:45 -0700736}
737
738#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
739fs_initcall(cpufreq_interactive_init);
740#else
741module_init(cpufreq_interactive_init);
742#endif
743
744static void __exit cpufreq_interactive_exit(void)
745{
746 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor02442cf2012-07-16 17:07:15 -0700747 kthread_stop(speedchange_task);
748 put_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700749}
750
751module_exit(cpufreq_interactive_exit);
752
753MODULE_AUTHOR("Mike Chan <mike@android.com>");
754MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
755 "Latency sensitive workloads");
756MODULE_LICENSE("GPL");