blob: 527d893ff616e5c5e5ce1834b7faaf605360f35b [file] [log] [blame]
Mike Chan9d49b702010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
23#include <linux/mutex.h>
24#include <linux/sched.h>
25#include <linux/tick.h>
26#include <linux/time.h>
27#include <linux/timer.h>
28#include <linux/workqueue.h>
29#include <linux/kthread.h>
30#include <linux/mutex.h>
Todd Poynor7820a652012-04-02 17:17:14 -070031#include <linux/slab.h>
Todd Poynor9fb15312012-04-23 20:42:41 -070032#include <asm/cputime.h>
Mike Chan9d49b702010-06-22 11:26:45 -070033
Todd Poynora1e19512012-02-16 16:27:59 -080034#define CREATE_TRACE_POINTS
35#include <trace/events/cpufreq_interactive.h>
36
Mike Chan9d49b702010-06-22 11:26:45 -070037static atomic_t active_count = ATOMIC_INIT(0);
38
39struct cpufreq_interactive_cpuinfo {
40 struct timer_list cpu_timer;
41 int timer_idlecancel;
42 u64 time_in_idle;
43 u64 idle_exit_time;
Todd Poynor0a92d482012-04-06 19:59:36 -070044 u64 target_set_time;
45 u64 target_set_time_in_idle;
Mike Chan9d49b702010-06-22 11:26:45 -070046 struct cpufreq_policy *policy;
47 struct cpufreq_frequency_table *freq_table;
48 unsigned int target_freq;
Todd Poynoraad27322012-04-26 21:41:40 -070049 unsigned int floor_freq;
50 u64 floor_validate_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -070051 u64 hispeed_validate_time;
Mike Chan9d49b702010-06-22 11:26:45 -070052 int governor_enabled;
53};
54
55static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
56
Todd Poynor02442cf2012-07-16 17:07:15 -070057/* realtime thread handles frequency scaling */
58static struct task_struct *speedchange_task;
59static cpumask_t speedchange_cpumask;
60static spinlock_t speedchange_cpumask_lock;
Mike Chan9d49b702010-06-22 11:26:45 -070061
62/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynorf090ef02012-10-03 00:39:56 -070063static unsigned int hispeed_freq;
Mike Chan9d49b702010-06-22 11:26:45 -070064
65/* Go to hi speed when CPU load at or above this value. */
Todd Poynora0ec4362012-04-17 17:39:34 -070066#define DEFAULT_GO_HISPEED_LOAD 85
Mike Chan9d49b702010-06-22 11:26:45 -070067static unsigned long go_hispeed_load;
68
69/*
70 * The minimum amount of time to spend at a frequency before we can ramp down.
71 */
Todd Poynora0ec4362012-04-17 17:39:34 -070072#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070073static unsigned long min_sample_time;
74
75/*
76 * The sample rate of the timer used to increase frequency
77 */
Todd Poynora0ec4362012-04-17 17:39:34 -070078#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070079static unsigned long timer_rate;
80
Todd Poynor596cf1f2012-04-13 20:18:02 -070081/*
82 * Wait this long before raising speed above hispeed, by default a single
83 * timer interval.
84 */
85#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
86static unsigned long above_hispeed_delay_val;
87
Todd Poynor7820a652012-04-02 17:17:14 -070088/*
Todd Poynor9fb15312012-04-23 20:42:41 -070089 * Non-zero means longer-term speed boost active.
90 */
91
92static int boost_val;
93
Mike Chan9d49b702010-06-22 11:26:45 -070094static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
95 unsigned int event);
96
97#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
98static
99#endif
100struct cpufreq_governor cpufreq_gov_interactive = {
101 .name = "interactive",
102 .governor = cpufreq_governor_interactive,
103 .max_transition_latency = 10000000,
104 .owner = THIS_MODULE,
105};
106
107static void cpufreq_interactive_timer(unsigned long data)
108{
Todd Poynor1913e0f2012-11-05 13:09:03 -0800109 u64 now;
Mike Chan9d49b702010-06-22 11:26:45 -0700110 unsigned int delta_idle;
111 unsigned int delta_time;
112 int cpu_load;
113 int load_since_change;
114 u64 time_in_idle;
115 u64 idle_exit_time;
116 struct cpufreq_interactive_cpuinfo *pcpu =
117 &per_cpu(cpuinfo, data);
118 u64 now_idle;
119 unsigned int new_freq;
120 unsigned int index;
121 unsigned long flags;
122
123 smp_rmb();
124
125 if (!pcpu->governor_enabled)
126 goto exit;
127
Mike Chan9d49b702010-06-22 11:26:45 -0700128 time_in_idle = pcpu->time_in_idle;
129 idle_exit_time = pcpu->idle_exit_time;
Todd Poynor1913e0f2012-11-05 13:09:03 -0800130 now_idle = get_cpu_idle_time_us(data, &now);
Mike Chan9d49b702010-06-22 11:26:45 -0700131 delta_idle = (unsigned int)(now_idle - time_in_idle);
Todd Poynor1913e0f2012-11-05 13:09:03 -0800132 delta_time = (unsigned int)(now - idle_exit_time);
Mike Chan9d49b702010-06-22 11:26:45 -0700133
134 /*
135 * If timer ran less than 1ms after short-term sample started, retry.
136 */
137 if (delta_time < 1000)
138 goto rearm;
139
140 if (delta_idle > delta_time)
141 cpu_load = 0;
142 else
143 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
144
Todd Poynor0a92d482012-04-06 19:59:36 -0700145 delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
Todd Poynor1913e0f2012-11-05 13:09:03 -0800146 delta_time = (unsigned int)(now - pcpu->target_set_time);
Mike Chan9d49b702010-06-22 11:26:45 -0700147
148 if ((delta_time == 0) || (delta_idle > delta_time))
149 load_since_change = 0;
150 else
151 load_since_change =
152 100 * (delta_time - delta_idle) / delta_time;
153
154 /*
155 * Choose greater of short-term load (since last idle timer
156 * started or timer function re-armed itself) or long-term load
157 * (since last frequency change).
158 */
159 if (load_since_change > cpu_load)
160 cpu_load = load_since_change;
161
Todd Poynor9fb15312012-04-23 20:42:41 -0700162 if (cpu_load >= go_hispeed_load || boost_val) {
Todd Poynor53676312012-09-24 18:03:58 -0700163 if (pcpu->target_freq < hispeed_freq &&
164 hispeed_freq < pcpu->policy->max) {
Mike Chan9d49b702010-06-22 11:26:45 -0700165 new_freq = hispeed_freq;
Todd Poynor8dc352c2012-04-06 19:50:12 -0700166 } else {
Mike Chan9d49b702010-06-22 11:26:45 -0700167 new_freq = pcpu->policy->max * cpu_load / 100;
Todd Poynor8dc352c2012-04-06 19:50:12 -0700168
169 if (new_freq < hispeed_freq)
170 new_freq = hispeed_freq;
Todd Poynor596cf1f2012-04-13 20:18:02 -0700171
172 if (pcpu->target_freq == hispeed_freq &&
173 new_freq > hispeed_freq &&
Todd Poynor1913e0f2012-11-05 13:09:03 -0800174 now - pcpu->hispeed_validate_time
Todd Poynor596cf1f2012-04-13 20:18:02 -0700175 < above_hispeed_delay_val) {
176 trace_cpufreq_interactive_notyet(data, cpu_load,
177 pcpu->target_freq,
178 new_freq);
179 goto rearm;
180 }
Todd Poynor8dc352c2012-04-06 19:50:12 -0700181 }
Mike Chan9d49b702010-06-22 11:26:45 -0700182 } else {
Todd Poynorf090ef02012-10-03 00:39:56 -0700183 new_freq = hispeed_freq * cpu_load / 100;
Mike Chan9d49b702010-06-22 11:26:45 -0700184 }
185
Todd Poynor5a5aa702012-05-10 23:28:06 -0700186 if (new_freq <= hispeed_freq)
Todd Poynor1913e0f2012-11-05 13:09:03 -0800187 pcpu->hispeed_validate_time = now;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700188
Mike Chan9d49b702010-06-22 11:26:45 -0700189 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
190 new_freq, CPUFREQ_RELATION_H,
191 &index)) {
192 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
193 (int) data);
194 goto rearm;
195 }
196
197 new_freq = pcpu->freq_table[index].frequency;
198
Mike Chan9d49b702010-06-22 11:26:45 -0700199 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700200 * Do not scale below floor_freq unless we have been at or above the
201 * floor frequency for the minimum sample time since last validated.
Mike Chan9d49b702010-06-22 11:26:45 -0700202 */
Todd Poynoraad27322012-04-26 21:41:40 -0700203 if (new_freq < pcpu->floor_freq) {
Todd Poynor1913e0f2012-11-05 13:09:03 -0800204 if (now - pcpu->floor_validate_time < min_sample_time) {
Todd Poynora1e19512012-02-16 16:27:59 -0800205 trace_cpufreq_interactive_notyet(data, cpu_load,
206 pcpu->target_freq, new_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700207 goto rearm;
Todd Poynora1e19512012-02-16 16:27:59 -0800208 }
Mike Chan9d49b702010-06-22 11:26:45 -0700209 }
210
Todd Poynoraad27322012-04-26 21:41:40 -0700211 pcpu->floor_freq = new_freq;
Todd Poynor1913e0f2012-11-05 13:09:03 -0800212 pcpu->floor_validate_time = now;
Todd Poynor0a92d482012-04-06 19:59:36 -0700213
214 if (pcpu->target_freq == new_freq) {
215 trace_cpufreq_interactive_already(data, cpu_load,
216 pcpu->target_freq, new_freq);
217 goto rearm_if_notmax;
218 }
219
Todd Poynora1e19512012-02-16 16:27:59 -0800220 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
221 new_freq);
Todd Poynorbc699d82012-04-20 13:18:32 -0700222 pcpu->target_set_time_in_idle = now_idle;
Todd Poynor1913e0f2012-11-05 13:09:03 -0800223 pcpu->target_set_time = now;
Todd Poynora1e19512012-02-16 16:27:59 -0800224
Todd Poynor02442cf2012-07-16 17:07:15 -0700225 pcpu->target_freq = new_freq;
226 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
227 cpumask_set_cpu(data, &speedchange_cpumask);
228 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
229 wake_up_process(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700230
231rearm_if_notmax:
232 /*
233 * Already set max speed and don't see a need to change that,
234 * wait until next idle to re-evaluate, don't need timer.
235 */
236 if (pcpu->target_freq == pcpu->policy->max)
237 goto exit;
238
239rearm:
240 if (!timer_pending(&pcpu->cpu_timer)) {
241 /*
Todd Poynor1913e0f2012-11-05 13:09:03 -0800242 * If already at min, cancel the timer if that CPU goes idle.
243 * We don't need to re-evaluate speed until the next idle exit.
Mike Chan9d49b702010-06-22 11:26:45 -0700244 */
Todd Poynor1913e0f2012-11-05 13:09:03 -0800245 if (pcpu->target_freq == pcpu->policy->min)
Mike Chan9d49b702010-06-22 11:26:45 -0700246 pcpu->timer_idlecancel = 1;
Mike Chan9d49b702010-06-22 11:26:45 -0700247
248 pcpu->time_in_idle = get_cpu_idle_time_us(
249 data, &pcpu->idle_exit_time);
Todd Poynor1913e0f2012-11-05 13:09:03 -0800250 mod_timer_pinned(&pcpu->cpu_timer,
251 jiffies + usecs_to_jiffies(timer_rate));
Mike Chan9d49b702010-06-22 11:26:45 -0700252 }
253
254exit:
255 return;
256}
257
258static void cpufreq_interactive_idle_start(void)
259{
260 struct cpufreq_interactive_cpuinfo *pcpu =
261 &per_cpu(cpuinfo, smp_processor_id());
262 int pending;
263
264 if (!pcpu->governor_enabled)
265 return;
266
Mike Chan9d49b702010-06-22 11:26:45 -0700267 pending = timer_pending(&pcpu->cpu_timer);
268
269 if (pcpu->target_freq != pcpu->policy->min) {
270#ifdef CONFIG_SMP
271 /*
272 * Entering idle while not at lowest speed. On some
273 * platforms this can hold the other CPU(s) at that speed
274 * even though the CPU is idle. Set a timer to re-evaluate
275 * speed so this idle CPU doesn't hold the other CPUs above
276 * min indefinitely. This should probably be a quirk of
277 * the CPUFreq driver.
278 */
279 if (!pending) {
280 pcpu->time_in_idle = get_cpu_idle_time_us(
281 smp_processor_id(), &pcpu->idle_exit_time);
282 pcpu->timer_idlecancel = 0;
Todd Poynor1913e0f2012-11-05 13:09:03 -0800283 mod_timer_pinned(
284 &pcpu->cpu_timer,
285 jiffies + usecs_to_jiffies(timer_rate));
Mike Chan9d49b702010-06-22 11:26:45 -0700286 }
287#endif
288 } else {
289 /*
290 * If at min speed and entering idle after load has
291 * already been evaluated, and a timer has been set just in
292 * case the CPU suddenly goes busy, cancel that timer. The
293 * CPU didn't go busy; we'll recheck things upon idle exit.
294 */
295 if (pending && pcpu->timer_idlecancel) {
296 del_timer(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700297 pcpu->timer_idlecancel = 0;
298 }
299 }
300
301}
302
303static void cpufreq_interactive_idle_end(void)
304{
305 struct cpufreq_interactive_cpuinfo *pcpu =
306 &per_cpu(cpuinfo, smp_processor_id());
307
Sam Lefflera04e4412012-06-27 10:12:04 -0700308 if (!pcpu->governor_enabled)
309 return;
310
Todd Poynor1913e0f2012-11-05 13:09:03 -0800311 /* Arm the timer for 1-2 ticks later if not already. */
312 if (!timer_pending(&pcpu->cpu_timer)) {
Mike Chan9d49b702010-06-22 11:26:45 -0700313 pcpu->time_in_idle =
314 get_cpu_idle_time_us(smp_processor_id(),
315 &pcpu->idle_exit_time);
316 pcpu->timer_idlecancel = 0;
Todd Poynor1913e0f2012-11-05 13:09:03 -0800317 mod_timer_pinned(
318 &pcpu->cpu_timer,
319 jiffies + usecs_to_jiffies(timer_rate));
Mike Chan9d49b702010-06-22 11:26:45 -0700320 }
321
322}
323
Todd Poynor02442cf2012-07-16 17:07:15 -0700324static int cpufreq_interactive_speedchange_task(void *data)
Mike Chan9d49b702010-06-22 11:26:45 -0700325{
326 unsigned int cpu;
327 cpumask_t tmp_mask;
328 unsigned long flags;
329 struct cpufreq_interactive_cpuinfo *pcpu;
330
331 while (1) {
332 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor02442cf2012-07-16 17:07:15 -0700333 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700334
Todd Poynor02442cf2012-07-16 17:07:15 -0700335 if (cpumask_empty(&speedchange_cpumask)) {
336 spin_unlock_irqrestore(&speedchange_cpumask_lock,
337 flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700338 schedule();
339
340 if (kthread_should_stop())
341 break;
342
Todd Poynor02442cf2012-07-16 17:07:15 -0700343 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700344 }
345
346 set_current_state(TASK_RUNNING);
Todd Poynor02442cf2012-07-16 17:07:15 -0700347 tmp_mask = speedchange_cpumask;
348 cpumask_clear(&speedchange_cpumask);
349 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700350
351 for_each_cpu(cpu, &tmp_mask) {
352 unsigned int j;
353 unsigned int max_freq = 0;
354
355 pcpu = &per_cpu(cpuinfo, cpu);
356 smp_rmb();
357
358 if (!pcpu->governor_enabled)
359 continue;
360
Mike Chan9d49b702010-06-22 11:26:45 -0700361 for_each_cpu(j, pcpu->policy->cpus) {
362 struct cpufreq_interactive_cpuinfo *pjcpu =
363 &per_cpu(cpuinfo, j);
364
365 if (pjcpu->target_freq > max_freq)
366 max_freq = pjcpu->target_freq;
367 }
368
369 if (max_freq != pcpu->policy->cur)
370 __cpufreq_driver_target(pcpu->policy,
371 max_freq,
372 CPUFREQ_RELATION_H);
Todd Poynor02442cf2012-07-16 17:07:15 -0700373 trace_cpufreq_interactive_setspeed(cpu,
374 pcpu->target_freq,
Todd Poynora1e19512012-02-16 16:27:59 -0800375 pcpu->policy->cur);
Mike Chan9d49b702010-06-22 11:26:45 -0700376 }
377 }
378
379 return 0;
380}
381
Todd Poynor7820a652012-04-02 17:17:14 -0700382static void cpufreq_interactive_boost(void)
383{
384 int i;
385 int anyboost = 0;
386 unsigned long flags;
387 struct cpufreq_interactive_cpuinfo *pcpu;
388
Todd Poynor02442cf2012-07-16 17:07:15 -0700389 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700390
391 for_each_online_cpu(i) {
392 pcpu = &per_cpu(cpuinfo, i);
393
394 if (pcpu->target_freq < hispeed_freq) {
395 pcpu->target_freq = hispeed_freq;
Todd Poynor02442cf2012-07-16 17:07:15 -0700396 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynor7820a652012-04-02 17:17:14 -0700397 pcpu->target_set_time_in_idle =
398 get_cpu_idle_time_us(i, &pcpu->target_set_time);
Todd Poynor5a5aa702012-05-10 23:28:06 -0700399 pcpu->hispeed_validate_time = pcpu->target_set_time;
Todd Poynor7820a652012-04-02 17:17:14 -0700400 anyboost = 1;
401 }
402
403 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700404 * Set floor freq and (re)start timer for when last
405 * validated.
Todd Poynor7820a652012-04-02 17:17:14 -0700406 */
407
Todd Poynoraad27322012-04-26 21:41:40 -0700408 pcpu->floor_freq = hispeed_freq;
409 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700410 }
411
Todd Poynor02442cf2012-07-16 17:07:15 -0700412 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700413
414 if (anyboost)
Todd Poynor02442cf2012-07-16 17:07:15 -0700415 wake_up_process(speedchange_task);
Todd Poynor7820a652012-04-02 17:17:14 -0700416}
417
Mike Chan9d49b702010-06-22 11:26:45 -0700418static ssize_t show_hispeed_freq(struct kobject *kobj,
419 struct attribute *attr, char *buf)
420{
Todd Poynorf090ef02012-10-03 00:39:56 -0700421 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700422}
423
424static ssize_t store_hispeed_freq(struct kobject *kobj,
425 struct attribute *attr, const char *buf,
426 size_t count)
427{
428 int ret;
Todd Poynorf090ef02012-10-03 00:39:56 -0700429 long unsigned int val;
Mike Chan9d49b702010-06-22 11:26:45 -0700430
Todd Poynorf090ef02012-10-03 00:39:56 -0700431 ret = strict_strtoul(buf, 0, &val);
Mike Chan9d49b702010-06-22 11:26:45 -0700432 if (ret < 0)
433 return ret;
434 hispeed_freq = val;
435 return count;
436}
437
438static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
439 show_hispeed_freq, store_hispeed_freq);
440
441
442static ssize_t show_go_hispeed_load(struct kobject *kobj,
443 struct attribute *attr, char *buf)
444{
445 return sprintf(buf, "%lu\n", go_hispeed_load);
446}
447
448static ssize_t store_go_hispeed_load(struct kobject *kobj,
449 struct attribute *attr, const char *buf, size_t count)
450{
451 int ret;
452 unsigned long val;
453
454 ret = strict_strtoul(buf, 0, &val);
455 if (ret < 0)
456 return ret;
457 go_hispeed_load = val;
458 return count;
459}
460
461static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
462 show_go_hispeed_load, store_go_hispeed_load);
463
464static ssize_t show_min_sample_time(struct kobject *kobj,
465 struct attribute *attr, char *buf)
466{
467 return sprintf(buf, "%lu\n", min_sample_time);
468}
469
470static ssize_t store_min_sample_time(struct kobject *kobj,
471 struct attribute *attr, const char *buf, size_t count)
472{
473 int ret;
474 unsigned long val;
475
476 ret = strict_strtoul(buf, 0, &val);
477 if (ret < 0)
478 return ret;
479 min_sample_time = val;
480 return count;
481}
482
483static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
484 show_min_sample_time, store_min_sample_time);
485
Todd Poynor596cf1f2012-04-13 20:18:02 -0700486static ssize_t show_above_hispeed_delay(struct kobject *kobj,
487 struct attribute *attr, char *buf)
488{
489 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
490}
491
492static ssize_t store_above_hispeed_delay(struct kobject *kobj,
493 struct attribute *attr,
494 const char *buf, size_t count)
495{
496 int ret;
497 unsigned long val;
498
499 ret = strict_strtoul(buf, 0, &val);
500 if (ret < 0)
501 return ret;
502 above_hispeed_delay_val = val;
503 return count;
504}
505
506define_one_global_rw(above_hispeed_delay);
507
Mike Chan9d49b702010-06-22 11:26:45 -0700508static ssize_t show_timer_rate(struct kobject *kobj,
509 struct attribute *attr, char *buf)
510{
511 return sprintf(buf, "%lu\n", timer_rate);
512}
513
514static ssize_t store_timer_rate(struct kobject *kobj,
515 struct attribute *attr, const char *buf, size_t count)
516{
517 int ret;
518 unsigned long val;
519
520 ret = strict_strtoul(buf, 0, &val);
521 if (ret < 0)
522 return ret;
523 timer_rate = val;
524 return count;
525}
526
527static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
528 show_timer_rate, store_timer_rate);
529
Todd Poynor9fb15312012-04-23 20:42:41 -0700530static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
531 char *buf)
532{
533 return sprintf(buf, "%d\n", boost_val);
534}
535
536static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
537 const char *buf, size_t count)
538{
539 int ret;
540 unsigned long val;
541
542 ret = kstrtoul(buf, 0, &val);
543 if (ret < 0)
544 return ret;
545
546 boost_val = val;
547
Todd Poynor2e739a02012-05-03 00:16:55 -0700548 if (boost_val) {
549 trace_cpufreq_interactive_boost("on");
Todd Poynor9fb15312012-04-23 20:42:41 -0700550 cpufreq_interactive_boost();
Todd Poynor2e739a02012-05-03 00:16:55 -0700551 } else {
552 trace_cpufreq_interactive_unboost("off");
553 }
Todd Poynor9fb15312012-04-23 20:42:41 -0700554
555 return count;
556}
557
558define_one_global_rw(boost);
559
Todd Poynor2e739a02012-05-03 00:16:55 -0700560static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
561 const char *buf, size_t count)
562{
563 int ret;
564 unsigned long val;
565
566 ret = kstrtoul(buf, 0, &val);
567 if (ret < 0)
568 return ret;
569
570 trace_cpufreq_interactive_boost("pulse");
571 cpufreq_interactive_boost();
572 return count;
573}
574
575static struct global_attr boostpulse =
576 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
577
Mike Chan9d49b702010-06-22 11:26:45 -0700578static struct attribute *interactive_attributes[] = {
579 &hispeed_freq_attr.attr,
580 &go_hispeed_load_attr.attr,
Todd Poynor596cf1f2012-04-13 20:18:02 -0700581 &above_hispeed_delay.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700582 &min_sample_time_attr.attr,
583 &timer_rate_attr.attr,
Todd Poynor9fb15312012-04-23 20:42:41 -0700584 &boost.attr,
Todd Poynor2e739a02012-05-03 00:16:55 -0700585 &boostpulse.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700586 NULL,
587};
588
589static struct attribute_group interactive_attr_group = {
590 .attrs = interactive_attributes,
591 .name = "interactive",
592};
593
Sam Lefflera04e4412012-06-27 10:12:04 -0700594static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
595 unsigned long val,
596 void *data)
597{
598 switch (val) {
599 case IDLE_START:
600 cpufreq_interactive_idle_start();
601 break;
602 case IDLE_END:
603 cpufreq_interactive_idle_end();
604 break;
605 }
606
607 return 0;
608}
609
610static struct notifier_block cpufreq_interactive_idle_nb = {
611 .notifier_call = cpufreq_interactive_idle_notifier,
612};
613
Mike Chan9d49b702010-06-22 11:26:45 -0700614static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
615 unsigned int event)
616{
617 int rc;
618 unsigned int j;
619 struct cpufreq_interactive_cpuinfo *pcpu;
620 struct cpufreq_frequency_table *freq_table;
621
622 switch (event) {
623 case CPUFREQ_GOV_START:
624 if (!cpu_online(policy->cpu))
625 return -EINVAL;
626
627 freq_table =
628 cpufreq_frequency_get_table(policy->cpu);
Todd Poynor1913e0f2012-11-05 13:09:03 -0800629 if (!hispeed_freq)
630 hispeed_freq = policy->max;
Mike Chan9d49b702010-06-22 11:26:45 -0700631
632 for_each_cpu(j, policy->cpus) {
633 pcpu = &per_cpu(cpuinfo, j);
634 pcpu->policy = policy;
635 pcpu->target_freq = policy->cur;
636 pcpu->freq_table = freq_table;
Todd Poynor0a92d482012-04-06 19:59:36 -0700637 pcpu->target_set_time_in_idle =
Mike Chan9d49b702010-06-22 11:26:45 -0700638 get_cpu_idle_time_us(j,
Todd Poynor0a92d482012-04-06 19:59:36 -0700639 &pcpu->target_set_time);
Todd Poynoraad27322012-04-26 21:41:40 -0700640 pcpu->floor_freq = pcpu->target_freq;
641 pcpu->floor_validate_time =
Todd Poynorbc699d82012-04-20 13:18:32 -0700642 pcpu->target_set_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700643 pcpu->hispeed_validate_time =
644 pcpu->target_set_time;
Mike Chan9d49b702010-06-22 11:26:45 -0700645 pcpu->governor_enabled = 1;
646 smp_wmb();
Todd Poynor1913e0f2012-11-05 13:09:03 -0800647 pcpu->cpu_timer.expires =
648 jiffies + usecs_to_jiffies(timer_rate);
649 add_timer_on(&pcpu->cpu_timer, j);
Mike Chan9d49b702010-06-22 11:26:45 -0700650 }
651
Mike Chan9d49b702010-06-22 11:26:45 -0700652 /*
653 * Do not register the idle hook and create sysfs
654 * entries if we have already done so.
655 */
656 if (atomic_inc_return(&active_count) > 1)
657 return 0;
658
659 rc = sysfs_create_group(cpufreq_global_kobject,
660 &interactive_attr_group);
661 if (rc)
662 return rc;
663
Sam Lefflera04e4412012-06-27 10:12:04 -0700664 idle_notifier_register(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700665 break;
666
667 case CPUFREQ_GOV_STOP:
668 for_each_cpu(j, policy->cpus) {
669 pcpu = &per_cpu(cpuinfo, j);
670 pcpu->governor_enabled = 0;
671 smp_wmb();
672 del_timer_sync(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700673 }
674
Mike Chan9d49b702010-06-22 11:26:45 -0700675 if (atomic_dec_return(&active_count) > 0)
676 return 0;
677
Sam Lefflera04e4412012-06-27 10:12:04 -0700678 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700679 sysfs_remove_group(cpufreq_global_kobject,
680 &interactive_attr_group);
681
682 break;
683
684 case CPUFREQ_GOV_LIMITS:
685 if (policy->max < policy->cur)
686 __cpufreq_driver_target(policy,
687 policy->max, CPUFREQ_RELATION_H);
688 else if (policy->min > policy->cur)
689 __cpufreq_driver_target(policy,
690 policy->min, CPUFREQ_RELATION_L);
691 break;
692 }
693 return 0;
694}
695
Mike Chan9d49b702010-06-22 11:26:45 -0700696static int __init cpufreq_interactive_init(void)
697{
698 unsigned int i;
699 struct cpufreq_interactive_cpuinfo *pcpu;
700 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
701
702 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
703 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Todd Poynor596cf1f2012-04-13 20:18:02 -0700704 above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
Mike Chan9d49b702010-06-22 11:26:45 -0700705 timer_rate = DEFAULT_TIMER_RATE;
706
707 /* Initalize per-cpu timers */
708 for_each_possible_cpu(i) {
709 pcpu = &per_cpu(cpuinfo, i);
710 init_timer(&pcpu->cpu_timer);
711 pcpu->cpu_timer.function = cpufreq_interactive_timer;
712 pcpu->cpu_timer.data = i;
713 }
714
Todd Poynor02442cf2012-07-16 17:07:15 -0700715 spin_lock_init(&speedchange_cpumask_lock);
716 speedchange_task =
717 kthread_create(cpufreq_interactive_speedchange_task, NULL,
718 "cfinteractive");
719 if (IS_ERR(speedchange_task))
720 return PTR_ERR(speedchange_task);
Sam Lefflera13f4152012-06-27 12:55:56 -0700721
Todd Poynor02442cf2012-07-16 17:07:15 -0700722 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
723 get_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700724
Sam Lefflera13f4152012-06-27 12:55:56 -0700725 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor02442cf2012-07-16 17:07:15 -0700726 wake_up_process(speedchange_task);
Sam Lefflera13f4152012-06-27 12:55:56 -0700727
Mike Chan9d49b702010-06-22 11:26:45 -0700728 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chan9d49b702010-06-22 11:26:45 -0700729}
730
731#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
732fs_initcall(cpufreq_interactive_init);
733#else
734module_init(cpufreq_interactive_init);
735#endif
736
737static void __exit cpufreq_interactive_exit(void)
738{
739 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor02442cf2012-07-16 17:07:15 -0700740 kthread_stop(speedchange_task);
741 put_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700742}
743
744module_exit(cpufreq_interactive_exit);
745
746MODULE_AUTHOR("Mike Chan <mike@android.com>");
747MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
748 "Latency sensitive workloads");
749MODULE_LICENSE("GPL");