blob: 45266d5b6cd566d33150f65ba714fd0d9857572f [file] [log] [blame]
Mike Chan1dab2592010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/mutex.h>
23#include <linux/sched.h>
24#include <linux/tick.h>
Todd Poynor5113cb12011-11-09 16:56:18 -080025#include <linux/time.h>
Mike Chan1dab2592010-06-22 11:26:45 -070026#include <linux/timer.h>
27#include <linux/workqueue.h>
28#include <linux/kthread.h>
Todd Poynora50b0532011-06-22 14:34:23 -070029#include <linux/mutex.h>
Mike Chan1dab2592010-06-22 11:26:45 -070030
31#include <asm/cputime.h>
32
Mike Chan1dab2592010-06-22 11:26:45 -070033static atomic_t active_count = ATOMIC_INIT(0);
34
35struct cpufreq_interactive_cpuinfo {
36 struct timer_list cpu_timer;
37 int timer_idlecancel;
38 u64 time_in_idle;
39 u64 idle_exit_time;
40 u64 timer_run_time;
41 int idling;
42 u64 freq_change_time;
43 u64 freq_change_time_in_idle;
44 struct cpufreq_policy *policy;
45 struct cpufreq_frequency_table *freq_table;
46 unsigned int target_freq;
47 int governor_enabled;
48};
49
50static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
51
52/* Workqueues handle frequency scaling */
53static struct task_struct *up_task;
54static struct workqueue_struct *down_wq;
55static struct work_struct freq_scale_down_work;
56static cpumask_t up_cpumask;
57static spinlock_t up_cpumask_lock;
58static cpumask_t down_cpumask;
59static spinlock_t down_cpumask_lock;
Todd Poynora50b0532011-06-22 14:34:23 -070060static struct mutex set_speed_lock;
Mike Chan1dab2592010-06-22 11:26:45 -070061
Todd Poynor96cd2502011-11-08 19:54:07 -080062/* Hi speed to bump to from lo speed when load burst (default max) */
63static u64 hispeed_freq;
64
65/* Go to hi speed when CPU load at or above this value. */
66#define DEFAULT_GO_HISPEED_LOAD 95
67static unsigned long go_hispeed_load;
Mike Chan1dab2592010-06-22 11:26:45 -070068
69/*
70 * The minimum amount of time to spend at a frequency before we can ramp down.
71 */
Todd Poynor5113cb12011-11-09 16:56:18 -080072#define DEFAULT_MIN_SAMPLE_TIME 20 * USEC_PER_MSEC
Mike Chan1dab2592010-06-22 11:26:45 -070073static unsigned long min_sample_time;
74
Allen Martin5d14f982011-06-30 23:54:07 -070075/*
76 * The sample rate of the timer used to increase frequency
77 */
Todd Poynor5113cb12011-11-09 16:56:18 -080078#define DEFAULT_TIMER_RATE 20 * USEC_PER_MSEC
Allen Martin5d14f982011-06-30 23:54:07 -070079static unsigned long timer_rate;
80
Mike Chan1dab2592010-06-22 11:26:45 -070081static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
82 unsigned int event);
83
84#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
85static
86#endif
87struct cpufreq_governor cpufreq_gov_interactive = {
88 .name = "interactive",
89 .governor = cpufreq_governor_interactive,
90 .max_transition_latency = 10000000,
91 .owner = THIS_MODULE,
92};
93
94static void cpufreq_interactive_timer(unsigned long data)
95{
96 unsigned int delta_idle;
97 unsigned int delta_time;
98 int cpu_load;
99 int load_since_change;
100 u64 time_in_idle;
101 u64 idle_exit_time;
102 struct cpufreq_interactive_cpuinfo *pcpu =
103 &per_cpu(cpuinfo, data);
104 u64 now_idle;
105 unsigned int new_freq;
106 unsigned int index;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800107 unsigned long flags;
Mike Chan1dab2592010-06-22 11:26:45 -0700108
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800109 smp_rmb();
110
111 if (!pcpu->governor_enabled)
112 goto exit;
113
Mike Chan1dab2592010-06-22 11:26:45 -0700114 /*
115 * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
116 * this lets idle exit know the current idle time sample has
117 * been processed, and idle exit can generate a new sample and
118 * re-arm the timer. This prevents a concurrent idle
119 * exit on that CPU from writing a new set of info at the same time
120 * the timer function runs (the timer function can't use that info
121 * until more time passes).
122 */
123 time_in_idle = pcpu->time_in_idle;
124 idle_exit_time = pcpu->idle_exit_time;
125 now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
126 smp_wmb();
127
128 /* If we raced with cancelling a timer, skip. */
Allen Martin13b2b142011-06-28 10:40:30 -0700129 if (!idle_exit_time)
Mike Chan1dab2592010-06-22 11:26:45 -0700130 goto exit;
Mike Chan1dab2592010-06-22 11:26:45 -0700131
132 delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle);
133 delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
134 idle_exit_time);
135
136 /*
137 * If timer ran less than 1ms after short-term sample started, retry.
138 */
Allen Martin13b2b142011-06-28 10:40:30 -0700139 if (delta_time < 1000)
Mike Chan1dab2592010-06-22 11:26:45 -0700140 goto rearm;
Mike Chan1dab2592010-06-22 11:26:45 -0700141
142 if (delta_idle > delta_time)
143 cpu_load = 0;
144 else
145 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
146
147 delta_idle = (unsigned int) cputime64_sub(now_idle,
Allen Martinadce6892011-07-01 11:19:14 -0700148 pcpu->freq_change_time_in_idle);
Mike Chan1dab2592010-06-22 11:26:45 -0700149 delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
150 pcpu->freq_change_time);
151
Axel Haslam3c053e12011-10-05 14:04:57 -0700152 if ((delta_time == 0) || (delta_idle > delta_time))
Mike Chan1dab2592010-06-22 11:26:45 -0700153 load_since_change = 0;
154 else
155 load_since_change =
156 100 * (delta_time - delta_idle) / delta_time;
157
158 /*
159 * Choose greater of short-term load (since last idle timer
160 * started or timer function re-armed itself) or long-term load
161 * (since last frequency change).
162 */
163 if (load_since_change > cpu_load)
164 cpu_load = load_since_change;
165
Todd Poynor96cd2502011-11-08 19:54:07 -0800166 if (cpu_load >= go_hispeed_load) {
Todd Poynoreb5a3422011-11-03 21:22:54 -0700167 if (pcpu->policy->cur == pcpu->policy->min)
Todd Poynor96cd2502011-11-08 19:54:07 -0800168 new_freq = hispeed_freq;
Todd Poynoreb5a3422011-11-03 21:22:54 -0700169 else
170 new_freq = pcpu->policy->max * cpu_load / 100;
171 } else {
Todd Poynora610c812011-10-27 18:25:59 -0700172 new_freq = pcpu->policy->cur * cpu_load / 100;
Todd Poynoreb5a3422011-11-03 21:22:54 -0700173 }
Mike Chan1dab2592010-06-22 11:26:45 -0700174
175 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
176 new_freq, CPUFREQ_RELATION_H,
177 &index)) {
Allen Martin13b2b142011-06-28 10:40:30 -0700178 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
179 (int) data);
Mike Chan1dab2592010-06-22 11:26:45 -0700180 goto rearm;
181 }
182
183 new_freq = pcpu->freq_table[index].frequency;
184
185 if (pcpu->target_freq == new_freq)
Mike Chan1dab2592010-06-22 11:26:45 -0700186 goto rearm_if_notmax;
Mike Chan1dab2592010-06-22 11:26:45 -0700187
188 /*
189 * Do not scale down unless we have been at this frequency for the
190 * minimum sample time.
191 */
192 if (new_freq < pcpu->target_freq) {
Allen Martinadce6892011-07-01 11:19:14 -0700193 if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time)
194 < min_sample_time)
Mike Chan1dab2592010-06-22 11:26:45 -0700195 goto rearm;
Mike Chan1dab2592010-06-22 11:26:45 -0700196 }
197
Mike Chan1dab2592010-06-22 11:26:45 -0700198 if (new_freq < pcpu->target_freq) {
199 pcpu->target_freq = new_freq;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800200 spin_lock_irqsave(&down_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700201 cpumask_set_cpu(data, &down_cpumask);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800202 spin_unlock_irqrestore(&down_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700203 queue_work(down_wq, &freq_scale_down_work);
204 } else {
205 pcpu->target_freq = new_freq;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800206 spin_lock_irqsave(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700207 cpumask_set_cpu(data, &up_cpumask);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800208 spin_unlock_irqrestore(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700209 wake_up_process(up_task);
210 }
211
212rearm_if_notmax:
213 /*
214 * Already set max speed and don't see a need to change that,
215 * wait until next idle to re-evaluate, don't need timer.
216 */
217 if (pcpu->target_freq == pcpu->policy->max)
218 goto exit;
219
220rearm:
221 if (!timer_pending(&pcpu->cpu_timer)) {
222 /*
223 * If already at min: if that CPU is idle, don't set timer.
224 * Else cancel the timer if that CPU goes idle. We don't
225 * need to re-evaluate speed until the next idle exit.
226 */
227 if (pcpu->target_freq == pcpu->policy->min) {
228 smp_rmb();
229
Allen Martin13b2b142011-06-28 10:40:30 -0700230 if (pcpu->idling)
Mike Chan1dab2592010-06-22 11:26:45 -0700231 goto exit;
Mike Chan1dab2592010-06-22 11:26:45 -0700232
233 pcpu->timer_idlecancel = 1;
234 }
235
236 pcpu->time_in_idle = get_cpu_idle_time_us(
237 data, &pcpu->idle_exit_time);
Allen Martin9549cff2011-07-19 14:56:21 -0700238 mod_timer(&pcpu->cpu_timer,
239 jiffies + usecs_to_jiffies(timer_rate));
Mike Chan1dab2592010-06-22 11:26:45 -0700240 }
241
242exit:
243 return;
244}
245
Allen Martin80e65d92011-06-28 09:58:39 -0700246static void cpufreq_interactive_idle_start(void)
Mike Chan1dab2592010-06-22 11:26:45 -0700247{
248 struct cpufreq_interactive_cpuinfo *pcpu =
249 &per_cpu(cpuinfo, smp_processor_id());
250 int pending;
251
Allen Martinadce6892011-07-01 11:19:14 -0700252 if (!pcpu->governor_enabled)
Mike Chan1dab2592010-06-22 11:26:45 -0700253 return;
Mike Chan1dab2592010-06-22 11:26:45 -0700254
255 pcpu->idling = 1;
256 smp_wmb();
257 pending = timer_pending(&pcpu->cpu_timer);
258
259 if (pcpu->target_freq != pcpu->policy->min) {
260#ifdef CONFIG_SMP
261 /*
262 * Entering idle while not at lowest speed. On some
263 * platforms this can hold the other CPU(s) at that speed
264 * even though the CPU is idle. Set a timer to re-evaluate
265 * speed so this idle CPU doesn't hold the other CPUs above
266 * min indefinitely. This should probably be a quirk of
267 * the CPUFreq driver.
268 */
269 if (!pending) {
270 pcpu->time_in_idle = get_cpu_idle_time_us(
271 smp_processor_id(), &pcpu->idle_exit_time);
272 pcpu->timer_idlecancel = 0;
Allen Martin9549cff2011-07-19 14:56:21 -0700273 mod_timer(&pcpu->cpu_timer,
274 jiffies + usecs_to_jiffies(timer_rate));
Mike Chan1dab2592010-06-22 11:26:45 -0700275 }
276#endif
277 } else {
278 /*
279 * If at min speed and entering idle after load has
280 * already been evaluated, and a timer has been set just in
281 * case the CPU suddenly goes busy, cancel that timer. The
282 * CPU didn't go busy; we'll recheck things upon idle exit.
283 */
284 if (pending && pcpu->timer_idlecancel) {
Mike Chan1dab2592010-06-22 11:26:45 -0700285 del_timer(&pcpu->cpu_timer);
286 /*
287 * Ensure last timer run time is after current idle
288 * sample start time, so next idle exit will always
289 * start a new idle sampling period.
290 */
291 pcpu->idle_exit_time = 0;
292 pcpu->timer_idlecancel = 0;
293 }
294 }
295
Allen Martin80e65d92011-06-28 09:58:39 -0700296}
297
298static void cpufreq_interactive_idle_end(void)
299{
300 struct cpufreq_interactive_cpuinfo *pcpu =
301 &per_cpu(cpuinfo, smp_processor_id());
302
Mike Chan1dab2592010-06-22 11:26:45 -0700303 pcpu->idling = 0;
304 smp_wmb();
305
306 /*
307 * Arm the timer for 1-2 ticks later if not already, and if the timer
308 * function has already processed the previous load sampling
309 * interval. (If the timer is not pending but has not processed
310 * the previous interval, it is probably racing with us on another
311 * CPU. Let it compute load based on the previous sample and then
312 * re-arm the timer for another interval when it's done, rather
313 * than updating the interval start time to be "now", which doesn't
314 * give the timer function enough time to make a decision on this
315 * run.)
316 */
317 if (timer_pending(&pcpu->cpu_timer) == 0 &&
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800318 pcpu->timer_run_time >= pcpu->idle_exit_time &&
319 pcpu->governor_enabled) {
Mike Chan1dab2592010-06-22 11:26:45 -0700320 pcpu->time_in_idle =
321 get_cpu_idle_time_us(smp_processor_id(),
322 &pcpu->idle_exit_time);
323 pcpu->timer_idlecancel = 0;
Allen Martin9549cff2011-07-19 14:56:21 -0700324 mod_timer(&pcpu->cpu_timer,
325 jiffies + usecs_to_jiffies(timer_rate));
Mike Chan1dab2592010-06-22 11:26:45 -0700326 }
327
328}
329
330static int cpufreq_interactive_up_task(void *data)
331{
332 unsigned int cpu;
333 cpumask_t tmp_mask;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800334 unsigned long flags;
Mike Chan1dab2592010-06-22 11:26:45 -0700335 struct cpufreq_interactive_cpuinfo *pcpu;
336
Mike Chan1dab2592010-06-22 11:26:45 -0700337 while (1) {
338 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800339 spin_lock_irqsave(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700340
341 if (cpumask_empty(&up_cpumask)) {
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800342 spin_unlock_irqrestore(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700343 schedule();
344
345 if (kthread_should_stop())
346 break;
347
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800348 spin_lock_irqsave(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700349 }
350
351 set_current_state(TASK_RUNNING);
Mike Chan1dab2592010-06-22 11:26:45 -0700352 tmp_mask = up_cpumask;
353 cpumask_clear(&up_cpumask);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800354 spin_unlock_irqrestore(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700355
356 for_each_cpu(cpu, &tmp_mask) {
Todd Poynora50b0532011-06-22 14:34:23 -0700357 unsigned int j;
358 unsigned int max_freq = 0;
Mike Chan1dab2592010-06-22 11:26:45 -0700359
Todd Poynora50b0532011-06-22 14:34:23 -0700360 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800361 smp_rmb();
362
363 if (!pcpu->governor_enabled)
364 continue;
365
Todd Poynora50b0532011-06-22 14:34:23 -0700366 mutex_lock(&set_speed_lock);
367
368 for_each_cpu(j, pcpu->policy->cpus) {
369 struct cpufreq_interactive_cpuinfo *pjcpu =
370 &per_cpu(cpuinfo, j);
371
372 if (pjcpu->target_freq > max_freq)
373 max_freq = pjcpu->target_freq;
374 }
375
376 if (max_freq != pcpu->policy->cur)
377 __cpufreq_driver_target(pcpu->policy,
378 max_freq,
379 CPUFREQ_RELATION_H);
380 mutex_unlock(&set_speed_lock);
381
Mike Chan1dab2592010-06-22 11:26:45 -0700382 pcpu->freq_change_time_in_idle =
383 get_cpu_idle_time_us(cpu,
384 &pcpu->freq_change_time);
Mike Chan1dab2592010-06-22 11:26:45 -0700385 }
386 }
387
388 return 0;
389}
390
391static void cpufreq_interactive_freq_down(struct work_struct *work)
392{
393 unsigned int cpu;
394 cpumask_t tmp_mask;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800395 unsigned long flags;
Mike Chan1dab2592010-06-22 11:26:45 -0700396 struct cpufreq_interactive_cpuinfo *pcpu;
397
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800398 spin_lock_irqsave(&down_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700399 tmp_mask = down_cpumask;
400 cpumask_clear(&down_cpumask);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800401 spin_unlock_irqrestore(&down_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700402
403 for_each_cpu(cpu, &tmp_mask) {
Todd Poynora50b0532011-06-22 14:34:23 -0700404 unsigned int j;
405 unsigned int max_freq = 0;
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800406
Todd Poynora50b0532011-06-22 14:34:23 -0700407 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800408 smp_rmb();
409
410 if (!pcpu->governor_enabled)
411 continue;
412
Todd Poynora50b0532011-06-22 14:34:23 -0700413 mutex_lock(&set_speed_lock);
414
415 for_each_cpu(j, pcpu->policy->cpus) {
416 struct cpufreq_interactive_cpuinfo *pjcpu =
417 &per_cpu(cpuinfo, j);
418
419 if (pjcpu->target_freq > max_freq)
420 max_freq = pjcpu->target_freq;
421 }
422
423 if (max_freq != pcpu->policy->cur)
424 __cpufreq_driver_target(pcpu->policy, max_freq,
425 CPUFREQ_RELATION_H);
426
427 mutex_unlock(&set_speed_lock);
Mike Chan1dab2592010-06-22 11:26:45 -0700428 pcpu->freq_change_time_in_idle =
429 get_cpu_idle_time_us(cpu,
430 &pcpu->freq_change_time);
Mike Chan1dab2592010-06-22 11:26:45 -0700431 }
432}
433
Todd Poynor96cd2502011-11-08 19:54:07 -0800434static ssize_t show_hispeed_freq(struct kobject *kobj,
435 struct attribute *attr, char *buf)
Mike Chan1dab2592010-06-22 11:26:45 -0700436{
Todd Poynor96cd2502011-11-08 19:54:07 -0800437 return sprintf(buf, "%llu\n", hispeed_freq);
Mike Chan1dab2592010-06-22 11:26:45 -0700438}
439
Todd Poynor96cd2502011-11-08 19:54:07 -0800440static ssize_t store_hispeed_freq(struct kobject *kobj,
441 struct attribute *attr, const char *buf,
442 size_t count)
443{
444 int ret;
445 u64 val;
446
447 ret = strict_strtoull(buf, 0, &val);
448 if (ret < 0)
449 return ret;
450 hispeed_freq = val;
451 return count;
452}
453
454static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
455 show_hispeed_freq, store_hispeed_freq);
456
457
458static ssize_t show_go_hispeed_load(struct kobject *kobj,
459 struct attribute *attr, char *buf)
460{
461 return sprintf(buf, "%lu\n", go_hispeed_load);
462}
463
464static ssize_t store_go_hispeed_load(struct kobject *kobj,
Mike Chan1dab2592010-06-22 11:26:45 -0700465 struct attribute *attr, const char *buf, size_t count)
466{
Allen Martin8b125522011-06-30 16:59:19 -0700467 int ret;
468 unsigned long val;
469
470 ret = strict_strtoul(buf, 0, &val);
471 if (ret < 0)
472 return ret;
Todd Poynor96cd2502011-11-08 19:54:07 -0800473 go_hispeed_load = val;
Allen Martin8b125522011-06-30 16:59:19 -0700474 return count;
Mike Chan1dab2592010-06-22 11:26:45 -0700475}
476
Todd Poynor96cd2502011-11-08 19:54:07 -0800477static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
478 show_go_hispeed_load, store_go_hispeed_load);
Mike Chan1dab2592010-06-22 11:26:45 -0700479
480static ssize_t show_min_sample_time(struct kobject *kobj,
481 struct attribute *attr, char *buf)
482{
483 return sprintf(buf, "%lu\n", min_sample_time);
484}
485
486static ssize_t store_min_sample_time(struct kobject *kobj,
487 struct attribute *attr, const char *buf, size_t count)
488{
Allen Martin8b125522011-06-30 16:59:19 -0700489 int ret;
490 unsigned long val;
491
492 ret = strict_strtoul(buf, 0, &val);
493 if (ret < 0)
494 return ret;
495 min_sample_time = val;
496 return count;
Mike Chan1dab2592010-06-22 11:26:45 -0700497}
498
499static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
500 show_min_sample_time, store_min_sample_time);
501
Allen Martin5d14f982011-06-30 23:54:07 -0700502static ssize_t show_timer_rate(struct kobject *kobj,
503 struct attribute *attr, char *buf)
504{
505 return sprintf(buf, "%lu\n", timer_rate);
506}
507
508static ssize_t store_timer_rate(struct kobject *kobj,
509 struct attribute *attr, const char *buf, size_t count)
510{
511 int ret;
512 unsigned long val;
513
514 ret = strict_strtoul(buf, 0, &val);
515 if (ret < 0)
516 return ret;
517 timer_rate = val;
518 return count;
519}
520
521static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
522 show_timer_rate, store_timer_rate);
523
Mike Chan1dab2592010-06-22 11:26:45 -0700524static struct attribute *interactive_attributes[] = {
Todd Poynor96cd2502011-11-08 19:54:07 -0800525 &hispeed_freq_attr.attr,
526 &go_hispeed_load_attr.attr,
Mike Chan1dab2592010-06-22 11:26:45 -0700527 &min_sample_time_attr.attr,
Allen Martin5d14f982011-06-30 23:54:07 -0700528 &timer_rate_attr.attr,
Mike Chan1dab2592010-06-22 11:26:45 -0700529 NULL,
530};
531
532static struct attribute_group interactive_attr_group = {
533 .attrs = interactive_attributes,
534 .name = "interactive",
535};
536
Todd Poynor097d3962011-06-06 18:30:23 -0700537static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
Mike Chan1dab2592010-06-22 11:26:45 -0700538 unsigned int event)
539{
540 int rc;
Todd Poynor097d3962011-06-06 18:30:23 -0700541 unsigned int j;
542 struct cpufreq_interactive_cpuinfo *pcpu;
543 struct cpufreq_frequency_table *freq_table;
Mike Chan1dab2592010-06-22 11:26:45 -0700544
545 switch (event) {
546 case CPUFREQ_GOV_START:
Todd Poynor097d3962011-06-06 18:30:23 -0700547 if (!cpu_online(policy->cpu))
Mike Chan1dab2592010-06-22 11:26:45 -0700548 return -EINVAL;
549
Todd Poynor097d3962011-06-06 18:30:23 -0700550 freq_table =
551 cpufreq_frequency_get_table(policy->cpu);
552
553 for_each_cpu(j, policy->cpus) {
554 pcpu = &per_cpu(cpuinfo, j);
555 pcpu->policy = policy;
556 pcpu->target_freq = policy->cur;
557 pcpu->freq_table = freq_table;
558 pcpu->freq_change_time_in_idle =
559 get_cpu_idle_time_us(j,
Mike Chan1dab2592010-06-22 11:26:45 -0700560 &pcpu->freq_change_time);
Todd Poynor097d3962011-06-06 18:30:23 -0700561 pcpu->governor_enabled = 1;
562 smp_wmb();
563 }
564
Todd Poynor96cd2502011-11-08 19:54:07 -0800565 if (!hispeed_freq)
566 hispeed_freq = policy->max;
567
Mike Chan1dab2592010-06-22 11:26:45 -0700568 /*
569 * Do not register the idle hook and create sysfs
570 * entries if we have already done so.
571 */
572 if (atomic_inc_return(&active_count) > 1)
573 return 0;
574
575 rc = sysfs_create_group(cpufreq_global_kobject,
576 &interactive_attr_group);
577 if (rc)
578 return rc;
579
Mike Chan1dab2592010-06-22 11:26:45 -0700580 break;
581
582 case CPUFREQ_GOV_STOP:
Todd Poynor097d3962011-06-06 18:30:23 -0700583 for_each_cpu(j, policy->cpus) {
584 pcpu = &per_cpu(cpuinfo, j);
585 pcpu->governor_enabled = 0;
586 smp_wmb();
587 del_timer_sync(&pcpu->cpu_timer);
Mike Chan1dab2592010-06-22 11:26:45 -0700588
Todd Poynor097d3962011-06-06 18:30:23 -0700589 /*
590 * Reset idle exit time since we may cancel the timer
591 * before it can run after the last idle exit time,
592 * to avoid tripping the check in idle exit for a timer
593 * that is trying to run.
594 */
595 pcpu->idle_exit_time = 0;
596 }
597
598 flush_work(&freq_scale_down_work);
Mike Chan1dab2592010-06-22 11:26:45 -0700599 if (atomic_dec_return(&active_count) > 0)
600 return 0;
601
602 sysfs_remove_group(cpufreq_global_kobject,
603 &interactive_attr_group);
604
Mike Chan1dab2592010-06-22 11:26:45 -0700605 break;
606
607 case CPUFREQ_GOV_LIMITS:
Todd Poynor097d3962011-06-06 18:30:23 -0700608 if (policy->max < policy->cur)
609 __cpufreq_driver_target(policy,
610 policy->max, CPUFREQ_RELATION_H);
611 else if (policy->min > policy->cur)
612 __cpufreq_driver_target(policy,
613 policy->min, CPUFREQ_RELATION_L);
Mike Chan1dab2592010-06-22 11:26:45 -0700614 break;
615 }
616 return 0;
617}
618
Allen Martin80e65d92011-06-28 09:58:39 -0700619static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
620 unsigned long val,
621 void *data)
622{
623 switch (val) {
624 case IDLE_START:
625 cpufreq_interactive_idle_start();
626 break;
627 case IDLE_END:
628 cpufreq_interactive_idle_end();
629 break;
630 }
631
632 return 0;
633}
634
635static struct notifier_block cpufreq_interactive_idle_nb = {
636 .notifier_call = cpufreq_interactive_idle_notifier,
637};
638
Mike Chan1dab2592010-06-22 11:26:45 -0700639static int __init cpufreq_interactive_init(void)
640{
641 unsigned int i;
642 struct cpufreq_interactive_cpuinfo *pcpu;
643 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
644
Todd Poynor96cd2502011-11-08 19:54:07 -0800645 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
Mike Chan1dab2592010-06-22 11:26:45 -0700646 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Allen Martin5d14f982011-06-30 23:54:07 -0700647 timer_rate = DEFAULT_TIMER_RATE;
Mike Chan1dab2592010-06-22 11:26:45 -0700648
649 /* Initalize per-cpu timers */
650 for_each_possible_cpu(i) {
651 pcpu = &per_cpu(cpuinfo, i);
652 init_timer(&pcpu->cpu_timer);
653 pcpu->cpu_timer.function = cpufreq_interactive_timer;
654 pcpu->cpu_timer.data = i;
655 }
656
657 up_task = kthread_create(cpufreq_interactive_up_task, NULL,
658 "kinteractiveup");
659 if (IS_ERR(up_task))
660 return PTR_ERR(up_task);
661
662 sched_setscheduler_nocheck(up_task, SCHED_FIFO, &param);
663 get_task_struct(up_task);
664
665 /* No rescuer thread, bind to CPU queuing the work for possibly
666 warm cache (probably doesn't matter much). */
667 down_wq = alloc_workqueue("knteractive_down", 0, 1);
668
Allen Martinadce6892011-07-01 11:19:14 -0700669 if (!down_wq)
Mike Chan1dab2592010-06-22 11:26:45 -0700670 goto err_freeuptask;
671
672 INIT_WORK(&freq_scale_down_work,
673 cpufreq_interactive_freq_down);
674
675 spin_lock_init(&up_cpumask_lock);
676 spin_lock_init(&down_cpumask_lock);
Todd Poynora50b0532011-06-22 14:34:23 -0700677 mutex_init(&set_speed_lock);
Mike Chan1dab2592010-06-22 11:26:45 -0700678
Allen Martin80e65d92011-06-28 09:58:39 -0700679 idle_notifier_register(&cpufreq_interactive_idle_nb);
680
Mike Chan1dab2592010-06-22 11:26:45 -0700681 return cpufreq_register_governor(&cpufreq_gov_interactive);
682
683err_freeuptask:
684 put_task_struct(up_task);
685 return -ENOMEM;
686}
687
688#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
689fs_initcall(cpufreq_interactive_init);
690#else
691module_init(cpufreq_interactive_init);
692#endif
693
694static void __exit cpufreq_interactive_exit(void)
695{
696 cpufreq_unregister_governor(&cpufreq_gov_interactive);
697 kthread_stop(up_task);
698 put_task_struct(up_task);
699 destroy_workqueue(down_wq);
700}
701
702module_exit(cpufreq_interactive_exit);
703
704MODULE_AUTHOR("Mike Chan <mike@android.com>");
705MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
706 "Latency sensitive workloads");
707MODULE_LICENSE("GPL");