blob: edc1cbda3c28f1f5d66ec192d5bd6fc4db7ba040 [file] [log] [blame]
Mike Chan9d49b702010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd4edd5d2012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Mike Chan9d49b702010-06-22 11:26:45 -070024#include <linux/mutex.h>
25#include <linux/sched.h>
26#include <linux/tick.h>
27#include <linux/time.h>
28#include <linux/timer.h>
29#include <linux/workqueue.h>
30#include <linux/kthread.h>
31#include <linux/mutex.h>
Todd Poynor7820a652012-04-02 17:17:14 -070032#include <linux/slab.h>
Todd Poynor9fb15312012-04-23 20:42:41 -070033#include <asm/cputime.h>
Mike Chan9d49b702010-06-22 11:26:45 -070034
Todd Poynora1e19512012-02-16 16:27:59 -080035#define CREATE_TRACE_POINTS
36#include <trace/events/cpufreq_interactive.h>
37
Mike Chan9d49b702010-06-22 11:26:45 -070038static atomic_t active_count = ATOMIC_INIT(0);
39
40struct cpufreq_interactive_cpuinfo {
41 struct timer_list cpu_timer;
42 int timer_idlecancel;
43 u64 time_in_idle;
Todd Poynorae7f28c2012-10-08 20:14:34 -070044 u64 time_in_idle_timestamp;
Todd Poynor0a92d482012-04-06 19:59:36 -070045 u64 target_set_time;
46 u64 target_set_time_in_idle;
Mike Chan9d49b702010-06-22 11:26:45 -070047 struct cpufreq_policy *policy;
48 struct cpufreq_frequency_table *freq_table;
49 unsigned int target_freq;
Todd Poynoraad27322012-04-26 21:41:40 -070050 unsigned int floor_freq;
51 u64 floor_validate_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -070052 u64 hispeed_validate_time;
Mike Chan9d49b702010-06-22 11:26:45 -070053 int governor_enabled;
54};
55
56static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
57
Todd Poynor02442cf2012-07-16 17:07:15 -070058/* realtime thread handles frequency scaling */
59static struct task_struct *speedchange_task;
60static cpumask_t speedchange_cpumask;
61static spinlock_t speedchange_cpumask_lock;
Mike Chan9d49b702010-06-22 11:26:45 -070062
63/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynorf090ef02012-10-03 00:39:56 -070064static unsigned int hispeed_freq;
Mike Chan9d49b702010-06-22 11:26:45 -070065
66/* Go to hi speed when CPU load at or above this value. */
Todd Poynora0ec4362012-04-17 17:39:34 -070067#define DEFAULT_GO_HISPEED_LOAD 85
Mike Chan9d49b702010-06-22 11:26:45 -070068static unsigned long go_hispeed_load;
69
70/*
71 * The minimum amount of time to spend at a frequency before we can ramp down.
72 */
Todd Poynora0ec4362012-04-17 17:39:34 -070073#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070074static unsigned long min_sample_time;
75
76/*
77 * The sample rate of the timer used to increase frequency
78 */
Todd Poynora0ec4362012-04-17 17:39:34 -070079#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070080static unsigned long timer_rate;
81
Todd Poynor596cf1f2012-04-13 20:18:02 -070082/*
83 * Wait this long before raising speed above hispeed, by default a single
84 * timer interval.
85 */
86#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
87static unsigned long above_hispeed_delay_val;
88
Todd Poynor7820a652012-04-02 17:17:14 -070089/*
Todd Poynor9fb15312012-04-23 20:42:41 -070090 * Non-zero means longer-term speed boost active.
91 */
92
93static int boost_val;
94
Lianwei Wangd4edd5d2012-11-01 09:59:52 +080095static bool governidle;
96module_param(governidle, bool, S_IWUSR | S_IRUGO);
97MODULE_PARM_DESC(governidle,
98 "Set to 1 to wake up CPUs from idle to reduce speed (default 0)");
99
Mike Chan9d49b702010-06-22 11:26:45 -0700100static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
101 unsigned int event);
102
103#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
104static
105#endif
106struct cpufreq_governor cpufreq_gov_interactive = {
107 .name = "interactive",
108 .governor = cpufreq_governor_interactive,
109 .max_transition_latency = 10000000,
110 .owner = THIS_MODULE,
111};
112
Todd Poynorae7f28c2012-10-08 20:14:34 -0700113static void cpufreq_interactive_timer_resched(
114 struct cpufreq_interactive_cpuinfo *pcpu)
115{
116 mod_timer_pinned(&pcpu->cpu_timer,
117 jiffies + usecs_to_jiffies(timer_rate));
118 pcpu->time_in_idle =
119 get_cpu_idle_time_us(smp_processor_id(),
120 &pcpu->time_in_idle_timestamp);
121}
122
Mike Chan9d49b702010-06-22 11:26:45 -0700123static void cpufreq_interactive_timer(unsigned long data)
124{
Todd Poynor1913e0f2012-11-05 13:09:03 -0800125 u64 now;
Mike Chan9d49b702010-06-22 11:26:45 -0700126 unsigned int delta_idle;
127 unsigned int delta_time;
128 int cpu_load;
129 int load_since_change;
Mike Chan9d49b702010-06-22 11:26:45 -0700130 struct cpufreq_interactive_cpuinfo *pcpu =
131 &per_cpu(cpuinfo, data);
132 u64 now_idle;
133 unsigned int new_freq;
134 unsigned int index;
135 unsigned long flags;
136
137 smp_rmb();
138
139 if (!pcpu->governor_enabled)
140 goto exit;
141
Todd Poynor1913e0f2012-11-05 13:09:03 -0800142 now_idle = get_cpu_idle_time_us(data, &now);
Todd Poynorae7f28c2012-10-08 20:14:34 -0700143 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
144 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
Mike Chan9d49b702010-06-22 11:26:45 -0700145
146 /*
147 * If timer ran less than 1ms after short-term sample started, retry.
148 */
149 if (delta_time < 1000)
150 goto rearm;
151
152 if (delta_idle > delta_time)
153 cpu_load = 0;
154 else
155 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
156
Todd Poynor0a92d482012-04-06 19:59:36 -0700157 delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
Todd Poynor1913e0f2012-11-05 13:09:03 -0800158 delta_time = (unsigned int)(now - pcpu->target_set_time);
Mike Chan9d49b702010-06-22 11:26:45 -0700159
160 if ((delta_time == 0) || (delta_idle > delta_time))
161 load_since_change = 0;
162 else
163 load_since_change =
164 100 * (delta_time - delta_idle) / delta_time;
165
166 /*
167 * Choose greater of short-term load (since last idle timer
168 * started or timer function re-armed itself) or long-term load
169 * (since last frequency change).
170 */
171 if (load_since_change > cpu_load)
172 cpu_load = load_since_change;
173
Todd Poynor9fb15312012-04-23 20:42:41 -0700174 if (cpu_load >= go_hispeed_load || boost_val) {
Todd Poynor53676312012-09-24 18:03:58 -0700175 if (pcpu->target_freq < hispeed_freq &&
176 hispeed_freq < pcpu->policy->max) {
Mike Chan9d49b702010-06-22 11:26:45 -0700177 new_freq = hispeed_freq;
Todd Poynor8dc352c2012-04-06 19:50:12 -0700178 } else {
Mike Chan9d49b702010-06-22 11:26:45 -0700179 new_freq = pcpu->policy->max * cpu_load / 100;
Todd Poynor8dc352c2012-04-06 19:50:12 -0700180
181 if (new_freq < hispeed_freq)
182 new_freq = hispeed_freq;
Todd Poynor596cf1f2012-04-13 20:18:02 -0700183
184 if (pcpu->target_freq == hispeed_freq &&
185 new_freq > hispeed_freq &&
Todd Poynor1913e0f2012-11-05 13:09:03 -0800186 now - pcpu->hispeed_validate_time
Todd Poynor596cf1f2012-04-13 20:18:02 -0700187 < above_hispeed_delay_val) {
188 trace_cpufreq_interactive_notyet(data, cpu_load,
189 pcpu->target_freq,
190 new_freq);
191 goto rearm;
192 }
Todd Poynor8dc352c2012-04-06 19:50:12 -0700193 }
Mike Chan9d49b702010-06-22 11:26:45 -0700194 } else {
Todd Poynorf090ef02012-10-03 00:39:56 -0700195 new_freq = hispeed_freq * cpu_load / 100;
Mike Chan9d49b702010-06-22 11:26:45 -0700196 }
197
Todd Poynor5a5aa702012-05-10 23:28:06 -0700198 if (new_freq <= hispeed_freq)
Todd Poynor1913e0f2012-11-05 13:09:03 -0800199 pcpu->hispeed_validate_time = now;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700200
Mike Chan9d49b702010-06-22 11:26:45 -0700201 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
202 new_freq, CPUFREQ_RELATION_H,
203 &index)) {
204 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
205 (int) data);
206 goto rearm;
207 }
208
209 new_freq = pcpu->freq_table[index].frequency;
210
Mike Chan9d49b702010-06-22 11:26:45 -0700211 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700212 * Do not scale below floor_freq unless we have been at or above the
213 * floor frequency for the minimum sample time since last validated.
Mike Chan9d49b702010-06-22 11:26:45 -0700214 */
Todd Poynoraad27322012-04-26 21:41:40 -0700215 if (new_freq < pcpu->floor_freq) {
Todd Poynor1913e0f2012-11-05 13:09:03 -0800216 if (now - pcpu->floor_validate_time < min_sample_time) {
Todd Poynora1e19512012-02-16 16:27:59 -0800217 trace_cpufreq_interactive_notyet(data, cpu_load,
218 pcpu->target_freq, new_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700219 goto rearm;
Todd Poynora1e19512012-02-16 16:27:59 -0800220 }
Mike Chan9d49b702010-06-22 11:26:45 -0700221 }
222
Todd Poynoraad27322012-04-26 21:41:40 -0700223 pcpu->floor_freq = new_freq;
Todd Poynor1913e0f2012-11-05 13:09:03 -0800224 pcpu->floor_validate_time = now;
Todd Poynor0a92d482012-04-06 19:59:36 -0700225
226 if (pcpu->target_freq == new_freq) {
227 trace_cpufreq_interactive_already(data, cpu_load,
228 pcpu->target_freq, new_freq);
229 goto rearm_if_notmax;
230 }
231
Todd Poynora1e19512012-02-16 16:27:59 -0800232 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
233 new_freq);
Todd Poynorbc699d82012-04-20 13:18:32 -0700234 pcpu->target_set_time_in_idle = now_idle;
Todd Poynor1913e0f2012-11-05 13:09:03 -0800235 pcpu->target_set_time = now;
Todd Poynora1e19512012-02-16 16:27:59 -0800236
Todd Poynor02442cf2012-07-16 17:07:15 -0700237 pcpu->target_freq = new_freq;
238 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
239 cpumask_set_cpu(data, &speedchange_cpumask);
240 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
241 wake_up_process(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700242
243rearm_if_notmax:
244 /*
245 * Already set max speed and don't see a need to change that,
246 * wait until next idle to re-evaluate, don't need timer.
247 */
248 if (pcpu->target_freq == pcpu->policy->max)
249 goto exit;
250
251rearm:
252 if (!timer_pending(&pcpu->cpu_timer)) {
253 /*
Lianwei Wangd4edd5d2012-11-01 09:59:52 +0800254 * If governing speed in idle and already at min, cancel the
255 * timer if that CPU goes idle. We don't need to re-evaluate
256 * speed until the next idle exit.
Mike Chan9d49b702010-06-22 11:26:45 -0700257 */
Lianwei Wangd4edd5d2012-11-01 09:59:52 +0800258 if (governidle && pcpu->target_freq == pcpu->policy->min)
Mike Chan9d49b702010-06-22 11:26:45 -0700259 pcpu->timer_idlecancel = 1;
Mike Chan9d49b702010-06-22 11:26:45 -0700260
Todd Poynorae7f28c2012-10-08 20:14:34 -0700261 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700262 }
263
264exit:
265 return;
266}
267
268static void cpufreq_interactive_idle_start(void)
269{
270 struct cpufreq_interactive_cpuinfo *pcpu =
271 &per_cpu(cpuinfo, smp_processor_id());
272 int pending;
273
274 if (!pcpu->governor_enabled)
275 return;
276
Mike Chan9d49b702010-06-22 11:26:45 -0700277 pending = timer_pending(&pcpu->cpu_timer);
278
279 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chan9d49b702010-06-22 11:26:45 -0700280 /*
281 * Entering idle while not at lowest speed. On some
282 * platforms this can hold the other CPU(s) at that speed
283 * even though the CPU is idle. Set a timer to re-evaluate
284 * speed so this idle CPU doesn't hold the other CPUs above
285 * min indefinitely. This should probably be a quirk of
286 * the CPUFreq driver.
287 */
288 if (!pending) {
Mike Chan9d49b702010-06-22 11:26:45 -0700289 pcpu->timer_idlecancel = 0;
Todd Poynorae7f28c2012-10-08 20:14:34 -0700290 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700291 }
Lianwei Wangd4edd5d2012-11-01 09:59:52 +0800292 } else if (governidle) {
Mike Chan9d49b702010-06-22 11:26:45 -0700293 /*
294 * If at min speed and entering idle after load has
295 * already been evaluated, and a timer has been set just in
296 * case the CPU suddenly goes busy, cancel that timer. The
297 * CPU didn't go busy; we'll recheck things upon idle exit.
298 */
299 if (pending && pcpu->timer_idlecancel) {
300 del_timer(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700301 pcpu->timer_idlecancel = 0;
302 }
303 }
304
305}
306
307static void cpufreq_interactive_idle_end(void)
308{
309 struct cpufreq_interactive_cpuinfo *pcpu =
310 &per_cpu(cpuinfo, smp_processor_id());
311
Sam Lefflera04e4412012-06-27 10:12:04 -0700312 if (!pcpu->governor_enabled)
313 return;
314
Todd Poynor1913e0f2012-11-05 13:09:03 -0800315 /* Arm the timer for 1-2 ticks later if not already. */
316 if (!timer_pending(&pcpu->cpu_timer)) {
Mike Chan9d49b702010-06-22 11:26:45 -0700317 pcpu->timer_idlecancel = 0;
Todd Poynorae7f28c2012-10-08 20:14:34 -0700318 cpufreq_interactive_timer_resched(pcpu);
319 } else if (!governidle &&
320 time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
321 del_timer(&pcpu->cpu_timer);
322 cpufreq_interactive_timer(smp_processor_id());
Mike Chan9d49b702010-06-22 11:26:45 -0700323 }
Mike Chan9d49b702010-06-22 11:26:45 -0700324}
325
Todd Poynor02442cf2012-07-16 17:07:15 -0700326static int cpufreq_interactive_speedchange_task(void *data)
Mike Chan9d49b702010-06-22 11:26:45 -0700327{
328 unsigned int cpu;
329 cpumask_t tmp_mask;
330 unsigned long flags;
331 struct cpufreq_interactive_cpuinfo *pcpu;
332
333 while (1) {
334 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor02442cf2012-07-16 17:07:15 -0700335 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700336
Todd Poynor02442cf2012-07-16 17:07:15 -0700337 if (cpumask_empty(&speedchange_cpumask)) {
338 spin_unlock_irqrestore(&speedchange_cpumask_lock,
339 flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700340 schedule();
341
342 if (kthread_should_stop())
343 break;
344
Todd Poynor02442cf2012-07-16 17:07:15 -0700345 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700346 }
347
348 set_current_state(TASK_RUNNING);
Todd Poynor02442cf2012-07-16 17:07:15 -0700349 tmp_mask = speedchange_cpumask;
350 cpumask_clear(&speedchange_cpumask);
351 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700352
353 for_each_cpu(cpu, &tmp_mask) {
354 unsigned int j;
355 unsigned int max_freq = 0;
356
357 pcpu = &per_cpu(cpuinfo, cpu);
358 smp_rmb();
359
360 if (!pcpu->governor_enabled)
361 continue;
362
Mike Chan9d49b702010-06-22 11:26:45 -0700363 for_each_cpu(j, pcpu->policy->cpus) {
364 struct cpufreq_interactive_cpuinfo *pjcpu =
365 &per_cpu(cpuinfo, j);
366
367 if (pjcpu->target_freq > max_freq)
368 max_freq = pjcpu->target_freq;
369 }
370
371 if (max_freq != pcpu->policy->cur)
372 __cpufreq_driver_target(pcpu->policy,
373 max_freq,
374 CPUFREQ_RELATION_H);
Todd Poynor02442cf2012-07-16 17:07:15 -0700375 trace_cpufreq_interactive_setspeed(cpu,
376 pcpu->target_freq,
Todd Poynora1e19512012-02-16 16:27:59 -0800377 pcpu->policy->cur);
Mike Chan9d49b702010-06-22 11:26:45 -0700378 }
379 }
380
381 return 0;
382}
383
Todd Poynor7820a652012-04-02 17:17:14 -0700384static void cpufreq_interactive_boost(void)
385{
386 int i;
387 int anyboost = 0;
388 unsigned long flags;
389 struct cpufreq_interactive_cpuinfo *pcpu;
390
Todd Poynor02442cf2012-07-16 17:07:15 -0700391 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700392
393 for_each_online_cpu(i) {
394 pcpu = &per_cpu(cpuinfo, i);
395
396 if (pcpu->target_freq < hispeed_freq) {
397 pcpu->target_freq = hispeed_freq;
Todd Poynor02442cf2012-07-16 17:07:15 -0700398 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynor7820a652012-04-02 17:17:14 -0700399 pcpu->target_set_time_in_idle =
400 get_cpu_idle_time_us(i, &pcpu->target_set_time);
Todd Poynor5a5aa702012-05-10 23:28:06 -0700401 pcpu->hispeed_validate_time = pcpu->target_set_time;
Todd Poynor7820a652012-04-02 17:17:14 -0700402 anyboost = 1;
403 }
404
405 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700406 * Set floor freq and (re)start timer for when last
407 * validated.
Todd Poynor7820a652012-04-02 17:17:14 -0700408 */
409
Todd Poynoraad27322012-04-26 21:41:40 -0700410 pcpu->floor_freq = hispeed_freq;
411 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700412 }
413
Todd Poynor02442cf2012-07-16 17:07:15 -0700414 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700415
416 if (anyboost)
Todd Poynor02442cf2012-07-16 17:07:15 -0700417 wake_up_process(speedchange_task);
Todd Poynor7820a652012-04-02 17:17:14 -0700418}
419
Mike Chan9d49b702010-06-22 11:26:45 -0700420static ssize_t show_hispeed_freq(struct kobject *kobj,
421 struct attribute *attr, char *buf)
422{
Todd Poynorf090ef02012-10-03 00:39:56 -0700423 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700424}
425
426static ssize_t store_hispeed_freq(struct kobject *kobj,
427 struct attribute *attr, const char *buf,
428 size_t count)
429{
430 int ret;
Todd Poynorf090ef02012-10-03 00:39:56 -0700431 long unsigned int val;
Mike Chan9d49b702010-06-22 11:26:45 -0700432
Todd Poynorf090ef02012-10-03 00:39:56 -0700433 ret = strict_strtoul(buf, 0, &val);
Mike Chan9d49b702010-06-22 11:26:45 -0700434 if (ret < 0)
435 return ret;
436 hispeed_freq = val;
437 return count;
438}
439
440static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
441 show_hispeed_freq, store_hispeed_freq);
442
443
444static ssize_t show_go_hispeed_load(struct kobject *kobj,
445 struct attribute *attr, char *buf)
446{
447 return sprintf(buf, "%lu\n", go_hispeed_load);
448}
449
450static ssize_t store_go_hispeed_load(struct kobject *kobj,
451 struct attribute *attr, const char *buf, size_t count)
452{
453 int ret;
454 unsigned long val;
455
456 ret = strict_strtoul(buf, 0, &val);
457 if (ret < 0)
458 return ret;
459 go_hispeed_load = val;
460 return count;
461}
462
463static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
464 show_go_hispeed_load, store_go_hispeed_load);
465
466static ssize_t show_min_sample_time(struct kobject *kobj,
467 struct attribute *attr, char *buf)
468{
469 return sprintf(buf, "%lu\n", min_sample_time);
470}
471
472static ssize_t store_min_sample_time(struct kobject *kobj,
473 struct attribute *attr, const char *buf, size_t count)
474{
475 int ret;
476 unsigned long val;
477
478 ret = strict_strtoul(buf, 0, &val);
479 if (ret < 0)
480 return ret;
481 min_sample_time = val;
482 return count;
483}
484
485static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
486 show_min_sample_time, store_min_sample_time);
487
Todd Poynor596cf1f2012-04-13 20:18:02 -0700488static ssize_t show_above_hispeed_delay(struct kobject *kobj,
489 struct attribute *attr, char *buf)
490{
491 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
492}
493
494static ssize_t store_above_hispeed_delay(struct kobject *kobj,
495 struct attribute *attr,
496 const char *buf, size_t count)
497{
498 int ret;
499 unsigned long val;
500
501 ret = strict_strtoul(buf, 0, &val);
502 if (ret < 0)
503 return ret;
504 above_hispeed_delay_val = val;
505 return count;
506}
507
508define_one_global_rw(above_hispeed_delay);
509
Mike Chan9d49b702010-06-22 11:26:45 -0700510static ssize_t show_timer_rate(struct kobject *kobj,
511 struct attribute *attr, char *buf)
512{
513 return sprintf(buf, "%lu\n", timer_rate);
514}
515
516static ssize_t store_timer_rate(struct kobject *kobj,
517 struct attribute *attr, const char *buf, size_t count)
518{
519 int ret;
520 unsigned long val;
521
522 ret = strict_strtoul(buf, 0, &val);
523 if (ret < 0)
524 return ret;
525 timer_rate = val;
526 return count;
527}
528
529static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
530 show_timer_rate, store_timer_rate);
531
Todd Poynor9fb15312012-04-23 20:42:41 -0700532static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
533 char *buf)
534{
535 return sprintf(buf, "%d\n", boost_val);
536}
537
538static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
539 const char *buf, size_t count)
540{
541 int ret;
542 unsigned long val;
543
544 ret = kstrtoul(buf, 0, &val);
545 if (ret < 0)
546 return ret;
547
548 boost_val = val;
549
Todd Poynor2e739a02012-05-03 00:16:55 -0700550 if (boost_val) {
551 trace_cpufreq_interactive_boost("on");
Todd Poynor9fb15312012-04-23 20:42:41 -0700552 cpufreq_interactive_boost();
Todd Poynor2e739a02012-05-03 00:16:55 -0700553 } else {
554 trace_cpufreq_interactive_unboost("off");
555 }
Todd Poynor9fb15312012-04-23 20:42:41 -0700556
557 return count;
558}
559
560define_one_global_rw(boost);
561
Todd Poynor2e739a02012-05-03 00:16:55 -0700562static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
563 const char *buf, size_t count)
564{
565 int ret;
566 unsigned long val;
567
568 ret = kstrtoul(buf, 0, &val);
569 if (ret < 0)
570 return ret;
571
572 trace_cpufreq_interactive_boost("pulse");
573 cpufreq_interactive_boost();
574 return count;
575}
576
577static struct global_attr boostpulse =
578 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
579
Mike Chan9d49b702010-06-22 11:26:45 -0700580static struct attribute *interactive_attributes[] = {
581 &hispeed_freq_attr.attr,
582 &go_hispeed_load_attr.attr,
Todd Poynor596cf1f2012-04-13 20:18:02 -0700583 &above_hispeed_delay.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700584 &min_sample_time_attr.attr,
585 &timer_rate_attr.attr,
Todd Poynor9fb15312012-04-23 20:42:41 -0700586 &boost.attr,
Todd Poynor2e739a02012-05-03 00:16:55 -0700587 &boostpulse.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700588 NULL,
589};
590
591static struct attribute_group interactive_attr_group = {
592 .attrs = interactive_attributes,
593 .name = "interactive",
594};
595
Sam Lefflera04e4412012-06-27 10:12:04 -0700596static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
597 unsigned long val,
598 void *data)
599{
600 switch (val) {
601 case IDLE_START:
602 cpufreq_interactive_idle_start();
603 break;
604 case IDLE_END:
605 cpufreq_interactive_idle_end();
606 break;
607 }
608
609 return 0;
610}
611
612static struct notifier_block cpufreq_interactive_idle_nb = {
613 .notifier_call = cpufreq_interactive_idle_notifier,
614};
615
Mike Chan9d49b702010-06-22 11:26:45 -0700616static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
617 unsigned int event)
618{
619 int rc;
620 unsigned int j;
621 struct cpufreq_interactive_cpuinfo *pcpu;
622 struct cpufreq_frequency_table *freq_table;
623
624 switch (event) {
625 case CPUFREQ_GOV_START:
626 if (!cpu_online(policy->cpu))
627 return -EINVAL;
628
629 freq_table =
630 cpufreq_frequency_get_table(policy->cpu);
Todd Poynor1913e0f2012-11-05 13:09:03 -0800631 if (!hispeed_freq)
632 hispeed_freq = policy->max;
Mike Chan9d49b702010-06-22 11:26:45 -0700633
634 for_each_cpu(j, policy->cpus) {
635 pcpu = &per_cpu(cpuinfo, j);
636 pcpu->policy = policy;
637 pcpu->target_freq = policy->cur;
638 pcpu->freq_table = freq_table;
Todd Poynor0a92d482012-04-06 19:59:36 -0700639 pcpu->target_set_time_in_idle =
Mike Chan9d49b702010-06-22 11:26:45 -0700640 get_cpu_idle_time_us(j,
Todd Poynor0a92d482012-04-06 19:59:36 -0700641 &pcpu->target_set_time);
Todd Poynoraad27322012-04-26 21:41:40 -0700642 pcpu->floor_freq = pcpu->target_freq;
643 pcpu->floor_validate_time =
Todd Poynorbc699d82012-04-20 13:18:32 -0700644 pcpu->target_set_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700645 pcpu->hispeed_validate_time =
646 pcpu->target_set_time;
Mike Chan9d49b702010-06-22 11:26:45 -0700647 pcpu->governor_enabled = 1;
648 smp_wmb();
Todd Poynor1913e0f2012-11-05 13:09:03 -0800649 pcpu->cpu_timer.expires =
650 jiffies + usecs_to_jiffies(timer_rate);
651 add_timer_on(&pcpu->cpu_timer, j);
Mike Chan9d49b702010-06-22 11:26:45 -0700652 }
653
Mike Chan9d49b702010-06-22 11:26:45 -0700654 /*
655 * Do not register the idle hook and create sysfs
656 * entries if we have already done so.
657 */
658 if (atomic_inc_return(&active_count) > 1)
659 return 0;
660
661 rc = sysfs_create_group(cpufreq_global_kobject,
662 &interactive_attr_group);
663 if (rc)
664 return rc;
665
Sam Lefflera04e4412012-06-27 10:12:04 -0700666 idle_notifier_register(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700667 break;
668
669 case CPUFREQ_GOV_STOP:
670 for_each_cpu(j, policy->cpus) {
671 pcpu = &per_cpu(cpuinfo, j);
672 pcpu->governor_enabled = 0;
673 smp_wmb();
674 del_timer_sync(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700675 }
676
Mike Chan9d49b702010-06-22 11:26:45 -0700677 if (atomic_dec_return(&active_count) > 0)
678 return 0;
679
Sam Lefflera04e4412012-06-27 10:12:04 -0700680 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700681 sysfs_remove_group(cpufreq_global_kobject,
682 &interactive_attr_group);
683
684 break;
685
686 case CPUFREQ_GOV_LIMITS:
687 if (policy->max < policy->cur)
688 __cpufreq_driver_target(policy,
689 policy->max, CPUFREQ_RELATION_H);
690 else if (policy->min > policy->cur)
691 __cpufreq_driver_target(policy,
692 policy->min, CPUFREQ_RELATION_L);
693 break;
694 }
695 return 0;
696}
697
Mike Chan9d49b702010-06-22 11:26:45 -0700698static int __init cpufreq_interactive_init(void)
699{
700 unsigned int i;
701 struct cpufreq_interactive_cpuinfo *pcpu;
702 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
703
704 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
705 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Todd Poynor596cf1f2012-04-13 20:18:02 -0700706 above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
Mike Chan9d49b702010-06-22 11:26:45 -0700707 timer_rate = DEFAULT_TIMER_RATE;
708
709 /* Initalize per-cpu timers */
710 for_each_possible_cpu(i) {
711 pcpu = &per_cpu(cpuinfo, i);
Lianwei Wangd4edd5d2012-11-01 09:59:52 +0800712 if (governidle)
713 init_timer(&pcpu->cpu_timer);
714 else
715 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700716 pcpu->cpu_timer.function = cpufreq_interactive_timer;
717 pcpu->cpu_timer.data = i;
718 }
719
Todd Poynor02442cf2012-07-16 17:07:15 -0700720 spin_lock_init(&speedchange_cpumask_lock);
721 speedchange_task =
722 kthread_create(cpufreq_interactive_speedchange_task, NULL,
723 "cfinteractive");
724 if (IS_ERR(speedchange_task))
725 return PTR_ERR(speedchange_task);
Sam Lefflera13f4152012-06-27 12:55:56 -0700726
Todd Poynor02442cf2012-07-16 17:07:15 -0700727 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
728 get_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700729
Sam Lefflera13f4152012-06-27 12:55:56 -0700730 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor02442cf2012-07-16 17:07:15 -0700731 wake_up_process(speedchange_task);
Sam Lefflera13f4152012-06-27 12:55:56 -0700732
Mike Chan9d49b702010-06-22 11:26:45 -0700733 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chan9d49b702010-06-22 11:26:45 -0700734}
735
736#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
737fs_initcall(cpufreq_interactive_init);
738#else
739module_init(cpufreq_interactive_init);
740#endif
741
742static void __exit cpufreq_interactive_exit(void)
743{
744 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor02442cf2012-07-16 17:07:15 -0700745 kthread_stop(speedchange_task);
746 put_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700747}
748
749module_exit(cpufreq_interactive_exit);
750
751MODULE_AUTHOR("Mike Chan <mike@android.com>");
752MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
753 "Latency sensitive workloads");
754MODULE_LICENSE("GPL");