blob: 9100c3b6adf54c90011978831c7258a8610259a0 [file] [log] [blame]
Mike Chan1dab2592010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/mutex.h>
23#include <linux/sched.h>
24#include <linux/tick.h>
25#include <linux/timer.h>
26#include <linux/workqueue.h>
27#include <linux/kthread.h>
28
29#include <asm/cputime.h>
30
Mike Chan1dab2592010-06-22 11:26:45 -070031static atomic_t active_count = ATOMIC_INIT(0);
32
33struct cpufreq_interactive_cpuinfo {
34 struct timer_list cpu_timer;
35 int timer_idlecancel;
36 u64 time_in_idle;
37 u64 idle_exit_time;
38 u64 timer_run_time;
39 int idling;
40 u64 freq_change_time;
41 u64 freq_change_time_in_idle;
42 struct cpufreq_policy *policy;
43 struct cpufreq_frequency_table *freq_table;
44 unsigned int target_freq;
45 int governor_enabled;
46};
47
48static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
49
50/* Workqueues handle frequency scaling */
51static struct task_struct *up_task;
52static struct workqueue_struct *down_wq;
53static struct work_struct freq_scale_down_work;
54static cpumask_t up_cpumask;
55static spinlock_t up_cpumask_lock;
56static cpumask_t down_cpumask;
57static spinlock_t down_cpumask_lock;
58
59/* Go to max speed when CPU load at or above this value. */
60#define DEFAULT_GO_MAXSPEED_LOAD 85
61static unsigned long go_maxspeed_load;
62
63/*
64 * The minimum amount of time to spend at a frequency before we can ramp down.
65 */
66#define DEFAULT_MIN_SAMPLE_TIME 80000;
67static unsigned long min_sample_time;
68
Mike Chan1dab2592010-06-22 11:26:45 -070069static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
70 unsigned int event);
71
72#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
73static
74#endif
75struct cpufreq_governor cpufreq_gov_interactive = {
76 .name = "interactive",
77 .governor = cpufreq_governor_interactive,
78 .max_transition_latency = 10000000,
79 .owner = THIS_MODULE,
80};
81
82static void cpufreq_interactive_timer(unsigned long data)
83{
84 unsigned int delta_idle;
85 unsigned int delta_time;
86 int cpu_load;
87 int load_since_change;
88 u64 time_in_idle;
89 u64 idle_exit_time;
90 struct cpufreq_interactive_cpuinfo *pcpu =
91 &per_cpu(cpuinfo, data);
92 u64 now_idle;
93 unsigned int new_freq;
94 unsigned int index;
Todd Poynor1c4f95f2010-12-03 11:20:09 -080095 unsigned long flags;
Mike Chan1dab2592010-06-22 11:26:45 -070096
Todd Poynor9b6f4a32010-12-23 17:33:07 -080097 smp_rmb();
98
99 if (!pcpu->governor_enabled)
100 goto exit;
101
Mike Chan1dab2592010-06-22 11:26:45 -0700102 /*
103 * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
104 * this lets idle exit know the current idle time sample has
105 * been processed, and idle exit can generate a new sample and
106 * re-arm the timer. This prevents a concurrent idle
107 * exit on that CPU from writing a new set of info at the same time
108 * the timer function runs (the timer function can't use that info
109 * until more time passes).
110 */
111 time_in_idle = pcpu->time_in_idle;
112 idle_exit_time = pcpu->idle_exit_time;
113 now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
114 smp_wmb();
115
116 /* If we raced with cancelling a timer, skip. */
Allen Martin13b2b142011-06-28 10:40:30 -0700117 if (!idle_exit_time)
Mike Chan1dab2592010-06-22 11:26:45 -0700118 goto exit;
Mike Chan1dab2592010-06-22 11:26:45 -0700119
120 delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle);
121 delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
122 idle_exit_time);
123
124 /*
125 * If timer ran less than 1ms after short-term sample started, retry.
126 */
Allen Martin13b2b142011-06-28 10:40:30 -0700127 if (delta_time < 1000)
Mike Chan1dab2592010-06-22 11:26:45 -0700128 goto rearm;
Mike Chan1dab2592010-06-22 11:26:45 -0700129
130 if (delta_idle > delta_time)
131 cpu_load = 0;
132 else
133 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
134
135 delta_idle = (unsigned int) cputime64_sub(now_idle,
136 pcpu->freq_change_time_in_idle);
137 delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
138 pcpu->freq_change_time);
139
140 if (delta_idle > delta_time)
141 load_since_change = 0;
142 else
143 load_since_change =
144 100 * (delta_time - delta_idle) / delta_time;
145
146 /*
147 * Choose greater of short-term load (since last idle timer
148 * started or timer function re-armed itself) or long-term load
149 * (since last frequency change).
150 */
151 if (load_since_change > cpu_load)
152 cpu_load = load_since_change;
153
154 if (cpu_load >= go_maxspeed_load)
155 new_freq = pcpu->policy->max;
156 else
157 new_freq = pcpu->policy->max * cpu_load / 100;
158
159 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
160 new_freq, CPUFREQ_RELATION_H,
161 &index)) {
Allen Martin13b2b142011-06-28 10:40:30 -0700162 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
163 (int) data);
Mike Chan1dab2592010-06-22 11:26:45 -0700164 goto rearm;
165 }
166
167 new_freq = pcpu->freq_table[index].frequency;
168
169 if (pcpu->target_freq == new_freq)
Mike Chan1dab2592010-06-22 11:26:45 -0700170 goto rearm_if_notmax;
Mike Chan1dab2592010-06-22 11:26:45 -0700171
172 /*
173 * Do not scale down unless we have been at this frequency for the
174 * minimum sample time.
175 */
176 if (new_freq < pcpu->target_freq) {
177 if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time) <
Allen Martin13b2b142011-06-28 10:40:30 -0700178 min_sample_time)
Mike Chan1dab2592010-06-22 11:26:45 -0700179 goto rearm;
Mike Chan1dab2592010-06-22 11:26:45 -0700180 }
181
Mike Chan1dab2592010-06-22 11:26:45 -0700182 if (new_freq < pcpu->target_freq) {
183 pcpu->target_freq = new_freq;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800184 spin_lock_irqsave(&down_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700185 cpumask_set_cpu(data, &down_cpumask);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800186 spin_unlock_irqrestore(&down_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700187 queue_work(down_wq, &freq_scale_down_work);
188 } else {
189 pcpu->target_freq = new_freq;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800190 spin_lock_irqsave(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700191 cpumask_set_cpu(data, &up_cpumask);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800192 spin_unlock_irqrestore(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700193 wake_up_process(up_task);
194 }
195
196rearm_if_notmax:
197 /*
198 * Already set max speed and don't see a need to change that,
199 * wait until next idle to re-evaluate, don't need timer.
200 */
201 if (pcpu->target_freq == pcpu->policy->max)
202 goto exit;
203
204rearm:
205 if (!timer_pending(&pcpu->cpu_timer)) {
206 /*
207 * If already at min: if that CPU is idle, don't set timer.
208 * Else cancel the timer if that CPU goes idle. We don't
209 * need to re-evaluate speed until the next idle exit.
210 */
211 if (pcpu->target_freq == pcpu->policy->min) {
212 smp_rmb();
213
Allen Martin13b2b142011-06-28 10:40:30 -0700214 if (pcpu->idling)
Mike Chan1dab2592010-06-22 11:26:45 -0700215 goto exit;
Mike Chan1dab2592010-06-22 11:26:45 -0700216
217 pcpu->timer_idlecancel = 1;
218 }
219
220 pcpu->time_in_idle = get_cpu_idle_time_us(
221 data, &pcpu->idle_exit_time);
222 mod_timer(&pcpu->cpu_timer, jiffies + 2);
Mike Chan1dab2592010-06-22 11:26:45 -0700223 }
224
225exit:
226 return;
227}
228
Allen Martin80e65d92011-06-28 09:58:39 -0700229static void cpufreq_interactive_idle_start(void)
Mike Chan1dab2592010-06-22 11:26:45 -0700230{
231 struct cpufreq_interactive_cpuinfo *pcpu =
232 &per_cpu(cpuinfo, smp_processor_id());
233 int pending;
234
235 if (!pcpu->governor_enabled) {
Mike Chan1dab2592010-06-22 11:26:45 -0700236 return;
237 }
238
239 pcpu->idling = 1;
240 smp_wmb();
241 pending = timer_pending(&pcpu->cpu_timer);
242
243 if (pcpu->target_freq != pcpu->policy->min) {
244#ifdef CONFIG_SMP
245 /*
246 * Entering idle while not at lowest speed. On some
247 * platforms this can hold the other CPU(s) at that speed
248 * even though the CPU is idle. Set a timer to re-evaluate
249 * speed so this idle CPU doesn't hold the other CPUs above
250 * min indefinitely. This should probably be a quirk of
251 * the CPUFreq driver.
252 */
253 if (!pending) {
254 pcpu->time_in_idle = get_cpu_idle_time_us(
255 smp_processor_id(), &pcpu->idle_exit_time);
256 pcpu->timer_idlecancel = 0;
257 mod_timer(&pcpu->cpu_timer, jiffies + 2);
Mike Chan1dab2592010-06-22 11:26:45 -0700258 }
259#endif
260 } else {
261 /*
262 * If at min speed and entering idle after load has
263 * already been evaluated, and a timer has been set just in
264 * case the CPU suddenly goes busy, cancel that timer. The
265 * CPU didn't go busy; we'll recheck things upon idle exit.
266 */
267 if (pending && pcpu->timer_idlecancel) {
Mike Chan1dab2592010-06-22 11:26:45 -0700268 del_timer(&pcpu->cpu_timer);
269 /*
270 * Ensure last timer run time is after current idle
271 * sample start time, so next idle exit will always
272 * start a new idle sampling period.
273 */
274 pcpu->idle_exit_time = 0;
275 pcpu->timer_idlecancel = 0;
276 }
277 }
278
Allen Martin80e65d92011-06-28 09:58:39 -0700279}
280
281static void cpufreq_interactive_idle_end(void)
282{
283 struct cpufreq_interactive_cpuinfo *pcpu =
284 &per_cpu(cpuinfo, smp_processor_id());
285
Mike Chan1dab2592010-06-22 11:26:45 -0700286 pcpu->idling = 0;
287 smp_wmb();
288
289 /*
290 * Arm the timer for 1-2 ticks later if not already, and if the timer
291 * function has already processed the previous load sampling
292 * interval. (If the timer is not pending but has not processed
293 * the previous interval, it is probably racing with us on another
294 * CPU. Let it compute load based on the previous sample and then
295 * re-arm the timer for another interval when it's done, rather
296 * than updating the interval start time to be "now", which doesn't
297 * give the timer function enough time to make a decision on this
298 * run.)
299 */
300 if (timer_pending(&pcpu->cpu_timer) == 0 &&
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800301 pcpu->timer_run_time >= pcpu->idle_exit_time &&
302 pcpu->governor_enabled) {
Mike Chan1dab2592010-06-22 11:26:45 -0700303 pcpu->time_in_idle =
304 get_cpu_idle_time_us(smp_processor_id(),
305 &pcpu->idle_exit_time);
306 pcpu->timer_idlecancel = 0;
307 mod_timer(&pcpu->cpu_timer, jiffies + 2);
Mike Chan1dab2592010-06-22 11:26:45 -0700308 }
309
310}
311
312static int cpufreq_interactive_up_task(void *data)
313{
314 unsigned int cpu;
315 cpumask_t tmp_mask;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800316 unsigned long flags;
Mike Chan1dab2592010-06-22 11:26:45 -0700317 struct cpufreq_interactive_cpuinfo *pcpu;
318
Mike Chan1dab2592010-06-22 11:26:45 -0700319 while (1) {
320 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800321 spin_lock_irqsave(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700322
323 if (cpumask_empty(&up_cpumask)) {
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800324 spin_unlock_irqrestore(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700325 schedule();
326
327 if (kthread_should_stop())
328 break;
329
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800330 spin_lock_irqsave(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700331 }
332
333 set_current_state(TASK_RUNNING);
334
Mike Chan1dab2592010-06-22 11:26:45 -0700335 tmp_mask = up_cpumask;
336 cpumask_clear(&up_cpumask);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800337 spin_unlock_irqrestore(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700338
339 for_each_cpu(cpu, &tmp_mask) {
340 pcpu = &per_cpu(cpuinfo, cpu);
341
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800342 smp_rmb();
343
344 if (!pcpu->governor_enabled)
345 continue;
346
Mike Chan1dab2592010-06-22 11:26:45 -0700347 __cpufreq_driver_target(pcpu->policy,
348 pcpu->target_freq,
349 CPUFREQ_RELATION_H);
350 pcpu->freq_change_time_in_idle =
351 get_cpu_idle_time_us(cpu,
352 &pcpu->freq_change_time);
Mike Chan1dab2592010-06-22 11:26:45 -0700353 }
354 }
355
356 return 0;
357}
358
359static void cpufreq_interactive_freq_down(struct work_struct *work)
360{
361 unsigned int cpu;
362 cpumask_t tmp_mask;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800363 unsigned long flags;
Mike Chan1dab2592010-06-22 11:26:45 -0700364 struct cpufreq_interactive_cpuinfo *pcpu;
365
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800366 spin_lock_irqsave(&down_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700367 tmp_mask = down_cpumask;
368 cpumask_clear(&down_cpumask);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800369 spin_unlock_irqrestore(&down_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700370
371 for_each_cpu(cpu, &tmp_mask) {
372 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800373
374 smp_rmb();
375
376 if (!pcpu->governor_enabled)
377 continue;
378
Mike Chan1dab2592010-06-22 11:26:45 -0700379 __cpufreq_driver_target(pcpu->policy,
380 pcpu->target_freq,
381 CPUFREQ_RELATION_H);
382 pcpu->freq_change_time_in_idle =
383 get_cpu_idle_time_us(cpu,
384 &pcpu->freq_change_time);
Mike Chan1dab2592010-06-22 11:26:45 -0700385 }
386}
387
388static ssize_t show_go_maxspeed_load(struct kobject *kobj,
389 struct attribute *attr, char *buf)
390{
391 return sprintf(buf, "%lu\n", go_maxspeed_load);
392}
393
394static ssize_t store_go_maxspeed_load(struct kobject *kobj,
395 struct attribute *attr, const char *buf, size_t count)
396{
397 return strict_strtoul(buf, 0, &go_maxspeed_load);
398}
399
400static struct global_attr go_maxspeed_load_attr = __ATTR(go_maxspeed_load, 0644,
401 show_go_maxspeed_load, store_go_maxspeed_load);
402
403static ssize_t show_min_sample_time(struct kobject *kobj,
404 struct attribute *attr, char *buf)
405{
406 return sprintf(buf, "%lu\n", min_sample_time);
407}
408
409static ssize_t store_min_sample_time(struct kobject *kobj,
410 struct attribute *attr, const char *buf, size_t count)
411{
412 return strict_strtoul(buf, 0, &min_sample_time);
413}
414
415static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
416 show_min_sample_time, store_min_sample_time);
417
418static struct attribute *interactive_attributes[] = {
419 &go_maxspeed_load_attr.attr,
420 &min_sample_time_attr.attr,
421 NULL,
422};
423
424static struct attribute_group interactive_attr_group = {
425 .attrs = interactive_attributes,
426 .name = "interactive",
427};
428
Todd Poynor097d3962011-06-06 18:30:23 -0700429static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
Mike Chan1dab2592010-06-22 11:26:45 -0700430 unsigned int event)
431{
432 int rc;
Todd Poynor097d3962011-06-06 18:30:23 -0700433 unsigned int j;
434 struct cpufreq_interactive_cpuinfo *pcpu;
435 struct cpufreq_frequency_table *freq_table;
Mike Chan1dab2592010-06-22 11:26:45 -0700436
437 switch (event) {
438 case CPUFREQ_GOV_START:
Todd Poynor097d3962011-06-06 18:30:23 -0700439 if (!cpu_online(policy->cpu))
Mike Chan1dab2592010-06-22 11:26:45 -0700440 return -EINVAL;
441
Todd Poynor097d3962011-06-06 18:30:23 -0700442 freq_table =
443 cpufreq_frequency_get_table(policy->cpu);
444
445 for_each_cpu(j, policy->cpus) {
446 pcpu = &per_cpu(cpuinfo, j);
447 pcpu->policy = policy;
448 pcpu->target_freq = policy->cur;
449 pcpu->freq_table = freq_table;
450 pcpu->freq_change_time_in_idle =
451 get_cpu_idle_time_us(j,
Mike Chan1dab2592010-06-22 11:26:45 -0700452 &pcpu->freq_change_time);
Todd Poynor097d3962011-06-06 18:30:23 -0700453 pcpu->governor_enabled = 1;
454 smp_wmb();
455 }
456
Mike Chan1dab2592010-06-22 11:26:45 -0700457 /*
458 * Do not register the idle hook and create sysfs
459 * entries if we have already done so.
460 */
461 if (atomic_inc_return(&active_count) > 1)
462 return 0;
463
464 rc = sysfs_create_group(cpufreq_global_kobject,
465 &interactive_attr_group);
466 if (rc)
467 return rc;
468
Mike Chan1dab2592010-06-22 11:26:45 -0700469 break;
470
471 case CPUFREQ_GOV_STOP:
Todd Poynor097d3962011-06-06 18:30:23 -0700472 for_each_cpu(j, policy->cpus) {
473 pcpu = &per_cpu(cpuinfo, j);
474 pcpu->governor_enabled = 0;
475 smp_wmb();
476 del_timer_sync(&pcpu->cpu_timer);
Mike Chan1dab2592010-06-22 11:26:45 -0700477
Todd Poynor097d3962011-06-06 18:30:23 -0700478 /*
479 * Reset idle exit time since we may cancel the timer
480 * before it can run after the last idle exit time,
481 * to avoid tripping the check in idle exit for a timer
482 * that is trying to run.
483 */
484 pcpu->idle_exit_time = 0;
485 }
486
487 flush_work(&freq_scale_down_work);
Mike Chan1dab2592010-06-22 11:26:45 -0700488 if (atomic_dec_return(&active_count) > 0)
489 return 0;
490
491 sysfs_remove_group(cpufreq_global_kobject,
492 &interactive_attr_group);
493
Mike Chan1dab2592010-06-22 11:26:45 -0700494 break;
495
496 case CPUFREQ_GOV_LIMITS:
Todd Poynor097d3962011-06-06 18:30:23 -0700497 if (policy->max < policy->cur)
498 __cpufreq_driver_target(policy,
499 policy->max, CPUFREQ_RELATION_H);
500 else if (policy->min > policy->cur)
501 __cpufreq_driver_target(policy,
502 policy->min, CPUFREQ_RELATION_L);
Mike Chan1dab2592010-06-22 11:26:45 -0700503 break;
504 }
505 return 0;
506}
507
Allen Martin80e65d92011-06-28 09:58:39 -0700508static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
509 unsigned long val,
510 void *data)
511{
512 switch (val) {
513 case IDLE_START:
514 cpufreq_interactive_idle_start();
515 break;
516 case IDLE_END:
517 cpufreq_interactive_idle_end();
518 break;
519 }
520
521 return 0;
522}
523
524static struct notifier_block cpufreq_interactive_idle_nb = {
525 .notifier_call = cpufreq_interactive_idle_notifier,
526};
527
Mike Chan1dab2592010-06-22 11:26:45 -0700528static int __init cpufreq_interactive_init(void)
529{
530 unsigned int i;
531 struct cpufreq_interactive_cpuinfo *pcpu;
532 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
533
534 go_maxspeed_load = DEFAULT_GO_MAXSPEED_LOAD;
535 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
536
537 /* Initalize per-cpu timers */
538 for_each_possible_cpu(i) {
539 pcpu = &per_cpu(cpuinfo, i);
540 init_timer(&pcpu->cpu_timer);
541 pcpu->cpu_timer.function = cpufreq_interactive_timer;
542 pcpu->cpu_timer.data = i;
543 }
544
545 up_task = kthread_create(cpufreq_interactive_up_task, NULL,
546 "kinteractiveup");
547 if (IS_ERR(up_task))
548 return PTR_ERR(up_task);
549
550 sched_setscheduler_nocheck(up_task, SCHED_FIFO, &param);
551 get_task_struct(up_task);
552
553 /* No rescuer thread, bind to CPU queuing the work for possibly
554 warm cache (probably doesn't matter much). */
555 down_wq = alloc_workqueue("knteractive_down", 0, 1);
556
557 if (! down_wq)
558 goto err_freeuptask;
559
560 INIT_WORK(&freq_scale_down_work,
561 cpufreq_interactive_freq_down);
562
563 spin_lock_init(&up_cpumask_lock);
564 spin_lock_init(&down_cpumask_lock);
565
Allen Martin80e65d92011-06-28 09:58:39 -0700566 idle_notifier_register(&cpufreq_interactive_idle_nb);
567
Mike Chan1dab2592010-06-22 11:26:45 -0700568 return cpufreq_register_governor(&cpufreq_gov_interactive);
569
570err_freeuptask:
571 put_task_struct(up_task);
572 return -ENOMEM;
573}
574
575#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
576fs_initcall(cpufreq_interactive_init);
577#else
578module_init(cpufreq_interactive_init);
579#endif
580
581static void __exit cpufreq_interactive_exit(void)
582{
583 cpufreq_unregister_governor(&cpufreq_gov_interactive);
584 kthread_stop(up_task);
585 put_task_struct(up_task);
586 destroy_workqueue(down_wq);
587}
588
589module_exit(cpufreq_interactive_exit);
590
591MODULE_AUTHOR("Mike Chan <mike@android.com>");
592MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
593 "Latency sensitive workloads");
594MODULE_LICENSE("GPL");