blob: bcbb7ac8306ced67ddd7883bd3135a784176833a [file] [log] [blame]
Mike Chan1dab2592010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/mutex.h>
23#include <linux/sched.h>
24#include <linux/tick.h>
25#include <linux/timer.h>
26#include <linux/workqueue.h>
27#include <linux/kthread.h>
28
29#include <asm/cputime.h>
30
31static void (*pm_idle_old)(void);
32static atomic_t active_count = ATOMIC_INIT(0);
33
34struct cpufreq_interactive_cpuinfo {
35 struct timer_list cpu_timer;
36 int timer_idlecancel;
37 u64 time_in_idle;
38 u64 idle_exit_time;
39 u64 timer_run_time;
40 int idling;
41 u64 freq_change_time;
42 u64 freq_change_time_in_idle;
43 struct cpufreq_policy *policy;
44 struct cpufreq_frequency_table *freq_table;
45 unsigned int target_freq;
46 int governor_enabled;
47};
48
49static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
50
51/* Workqueues handle frequency scaling */
52static struct task_struct *up_task;
53static struct workqueue_struct *down_wq;
54static struct work_struct freq_scale_down_work;
55static cpumask_t up_cpumask;
56static spinlock_t up_cpumask_lock;
57static cpumask_t down_cpumask;
58static spinlock_t down_cpumask_lock;
59
60/* Go to max speed when CPU load at or above this value. */
61#define DEFAULT_GO_MAXSPEED_LOAD 85
62static unsigned long go_maxspeed_load;
63
64/*
65 * The minimum amount of time to spend at a frequency before we can ramp down.
66 */
67#define DEFAULT_MIN_SAMPLE_TIME 80000;
68static unsigned long min_sample_time;
69
70#define DEBUG 0
71#define BUFSZ 128
72
73#if DEBUG
74#include <linux/proc_fs.h>
75
76struct dbgln {
77 int cpu;
78 unsigned long jiffy;
79 unsigned long run;
80 char buf[BUFSZ];
81};
82
83#define NDBGLNS 256
84
85static struct dbgln dbgbuf[NDBGLNS];
86static int dbgbufs;
87static int dbgbufe;
88static struct proc_dir_entry *dbg_proc;
89static spinlock_t dbgpr_lock;
90
91static u64 up_request_time;
92static unsigned int up_max_latency;
93
94static void dbgpr(char *fmt, ...)
95{
96 va_list args;
97 int n;
98 unsigned long flags;
99
100 spin_lock_irqsave(&dbgpr_lock, flags);
101 n = dbgbufe;
102 va_start(args, fmt);
103 vsnprintf(dbgbuf[n].buf, BUFSZ, fmt, args);
104 va_end(args);
105 dbgbuf[n].cpu = smp_processor_id();
106 dbgbuf[n].run = nr_running();
107 dbgbuf[n].jiffy = jiffies;
108
109 if (++dbgbufe >= NDBGLNS)
110 dbgbufe = 0;
111
112 if (dbgbufe == dbgbufs)
113 if (++dbgbufs >= NDBGLNS)
114 dbgbufs = 0;
115
116 spin_unlock_irqrestore(&dbgpr_lock, flags);
117}
118
119static void dbgdump(void)
120{
121 int i, j;
122 unsigned long flags;
123 static struct dbgln prbuf[NDBGLNS];
124
125 spin_lock_irqsave(&dbgpr_lock, flags);
126 i = dbgbufs;
127 j = dbgbufe;
128 memcpy(prbuf, dbgbuf, sizeof(dbgbuf));
129 dbgbufs = 0;
130 dbgbufe = 0;
131 spin_unlock_irqrestore(&dbgpr_lock, flags);
132
133 while (i != j)
134 {
135 printk("%lu %d %lu %s",
136 prbuf[i].jiffy, prbuf[i].cpu, prbuf[i].run,
137 prbuf[i].buf);
138 if (++i == NDBGLNS)
139 i = 0;
140 }
141}
142
143static int dbg_proc_read(char *buffer, char **start, off_t offset,
144 int count, int *peof, void *dat)
145{
146 printk("max up_task latency=%uus\n", up_max_latency);
147 dbgdump();
148 *peof = 1;
149 return 0;
150}
151
152
153#else
154#define dbgpr(...) do {} while (0)
155#endif
156
157static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
158 unsigned int event);
159
160#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
161static
162#endif
163struct cpufreq_governor cpufreq_gov_interactive = {
164 .name = "interactive",
165 .governor = cpufreq_governor_interactive,
166 .max_transition_latency = 10000000,
167 .owner = THIS_MODULE,
168};
169
170static void cpufreq_interactive_timer(unsigned long data)
171{
172 unsigned int delta_idle;
173 unsigned int delta_time;
174 int cpu_load;
175 int load_since_change;
176 u64 time_in_idle;
177 u64 idle_exit_time;
178 struct cpufreq_interactive_cpuinfo *pcpu =
179 &per_cpu(cpuinfo, data);
180 u64 now_idle;
181 unsigned int new_freq;
182 unsigned int index;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800183 unsigned long flags;
Mike Chan1dab2592010-06-22 11:26:45 -0700184
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800185 smp_rmb();
186
187 if (!pcpu->governor_enabled)
188 goto exit;
189
Mike Chan1dab2592010-06-22 11:26:45 -0700190 /*
191 * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
192 * this lets idle exit know the current idle time sample has
193 * been processed, and idle exit can generate a new sample and
194 * re-arm the timer. This prevents a concurrent idle
195 * exit on that CPU from writing a new set of info at the same time
196 * the timer function runs (the timer function can't use that info
197 * until more time passes).
198 */
199 time_in_idle = pcpu->time_in_idle;
200 idle_exit_time = pcpu->idle_exit_time;
201 now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
202 smp_wmb();
203
204 /* If we raced with cancelling a timer, skip. */
205 if (!idle_exit_time) {
206 dbgpr("timer %d: no valid idle exit sample\n", (int) data);
207 goto exit;
208 }
209
210#if DEBUG
211 if ((int) jiffies - (int) pcpu->cpu_timer.expires >= 10)
212 dbgpr("timer %d: late by %d ticks\n",
213 (int) data, jiffies - pcpu->cpu_timer.expires);
214#endif
215
216 delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle);
217 delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
218 idle_exit_time);
219
220 /*
221 * If timer ran less than 1ms after short-term sample started, retry.
222 */
223 if (delta_time < 1000) {
224 dbgpr("timer %d: time delta %u too short exit=%llu now=%llu\n", (int) data,
225 delta_time, idle_exit_time, pcpu->timer_run_time);
226 goto rearm;
227 }
228
229 if (delta_idle > delta_time)
230 cpu_load = 0;
231 else
232 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
233
234 delta_idle = (unsigned int) cputime64_sub(now_idle,
235 pcpu->freq_change_time_in_idle);
236 delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
237 pcpu->freq_change_time);
238
239 if (delta_idle > delta_time)
240 load_since_change = 0;
241 else
242 load_since_change =
243 100 * (delta_time - delta_idle) / delta_time;
244
245 /*
246 * Choose greater of short-term load (since last idle timer
247 * started or timer function re-armed itself) or long-term load
248 * (since last frequency change).
249 */
250 if (load_since_change > cpu_load)
251 cpu_load = load_since_change;
252
253 if (cpu_load >= go_maxspeed_load)
254 new_freq = pcpu->policy->max;
255 else
256 new_freq = pcpu->policy->max * cpu_load / 100;
257
258 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
259 new_freq, CPUFREQ_RELATION_H,
260 &index)) {
261 dbgpr("timer %d: cpufreq_frequency_table_target error\n", (int) data);
262 goto rearm;
263 }
264
265 new_freq = pcpu->freq_table[index].frequency;
266
267 if (pcpu->target_freq == new_freq)
268 {
269 dbgpr("timer %d: load=%d, already at %d\n", (int) data, cpu_load, new_freq);
270 goto rearm_if_notmax;
271 }
272
273 /*
274 * Do not scale down unless we have been at this frequency for the
275 * minimum sample time.
276 */
277 if (new_freq < pcpu->target_freq) {
278 if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time) <
279 min_sample_time) {
280 dbgpr("timer %d: load=%d cur=%d tgt=%d not yet\n", (int) data, cpu_load, pcpu->target_freq, new_freq);
281 goto rearm;
282 }
283 }
284
285 dbgpr("timer %d: load=%d cur=%d tgt=%d queue\n", (int) data, cpu_load, pcpu->target_freq, new_freq);
286
287 if (new_freq < pcpu->target_freq) {
288 pcpu->target_freq = new_freq;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800289 spin_lock_irqsave(&down_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700290 cpumask_set_cpu(data, &down_cpumask);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800291 spin_unlock_irqrestore(&down_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700292 queue_work(down_wq, &freq_scale_down_work);
293 } else {
294 pcpu->target_freq = new_freq;
295#if DEBUG
296 up_request_time = ktime_to_us(ktime_get());
297#endif
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800298 spin_lock_irqsave(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700299 cpumask_set_cpu(data, &up_cpumask);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800300 spin_unlock_irqrestore(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700301 wake_up_process(up_task);
302 }
303
304rearm_if_notmax:
305 /*
306 * Already set max speed and don't see a need to change that,
307 * wait until next idle to re-evaluate, don't need timer.
308 */
309 if (pcpu->target_freq == pcpu->policy->max)
310 goto exit;
311
312rearm:
313 if (!timer_pending(&pcpu->cpu_timer)) {
314 /*
315 * If already at min: if that CPU is idle, don't set timer.
316 * Else cancel the timer if that CPU goes idle. We don't
317 * need to re-evaluate speed until the next idle exit.
318 */
319 if (pcpu->target_freq == pcpu->policy->min) {
320 smp_rmb();
321
322 if (pcpu->idling) {
323 dbgpr("timer %d: cpu idle, don't re-arm\n", (int) data);
324 goto exit;
325 }
326
327 pcpu->timer_idlecancel = 1;
328 }
329
330 pcpu->time_in_idle = get_cpu_idle_time_us(
331 data, &pcpu->idle_exit_time);
332 mod_timer(&pcpu->cpu_timer, jiffies + 2);
333 dbgpr("timer %d: set timer for %lu exit=%llu\n", (int) data, pcpu->cpu_timer.expires, pcpu->idle_exit_time);
334 }
335
336exit:
337 return;
338}
339
340static void cpufreq_interactive_idle(void)
341{
342 struct cpufreq_interactive_cpuinfo *pcpu =
343 &per_cpu(cpuinfo, smp_processor_id());
344 int pending;
345
346 if (!pcpu->governor_enabled) {
347 pm_idle_old();
348 return;
349 }
350
351 pcpu->idling = 1;
352 smp_wmb();
353 pending = timer_pending(&pcpu->cpu_timer);
354
355 if (pcpu->target_freq != pcpu->policy->min) {
356#ifdef CONFIG_SMP
357 /*
358 * Entering idle while not at lowest speed. On some
359 * platforms this can hold the other CPU(s) at that speed
360 * even though the CPU is idle. Set a timer to re-evaluate
361 * speed so this idle CPU doesn't hold the other CPUs above
362 * min indefinitely. This should probably be a quirk of
363 * the CPUFreq driver.
364 */
365 if (!pending) {
366 pcpu->time_in_idle = get_cpu_idle_time_us(
367 smp_processor_id(), &pcpu->idle_exit_time);
368 pcpu->timer_idlecancel = 0;
369 mod_timer(&pcpu->cpu_timer, jiffies + 2);
370 dbgpr("idle: enter at %d, set timer for %lu exit=%llu\n",
371 pcpu->target_freq, pcpu->cpu_timer.expires,
372 pcpu->idle_exit_time);
373 }
374#endif
375 } else {
376 /*
377 * If at min speed and entering idle after load has
378 * already been evaluated, and a timer has been set just in
379 * case the CPU suddenly goes busy, cancel that timer. The
380 * CPU didn't go busy; we'll recheck things upon idle exit.
381 */
382 if (pending && pcpu->timer_idlecancel) {
383 dbgpr("idle: cancel timer for %lu\n", pcpu->cpu_timer.expires);
384 del_timer(&pcpu->cpu_timer);
385 /*
386 * Ensure last timer run time is after current idle
387 * sample start time, so next idle exit will always
388 * start a new idle sampling period.
389 */
390 pcpu->idle_exit_time = 0;
391 pcpu->timer_idlecancel = 0;
392 }
393 }
394
395 pm_idle_old();
396 pcpu->idling = 0;
397 smp_wmb();
398
399 /*
400 * Arm the timer for 1-2 ticks later if not already, and if the timer
401 * function has already processed the previous load sampling
402 * interval. (If the timer is not pending but has not processed
403 * the previous interval, it is probably racing with us on another
404 * CPU. Let it compute load based on the previous sample and then
405 * re-arm the timer for another interval when it's done, rather
406 * than updating the interval start time to be "now", which doesn't
407 * give the timer function enough time to make a decision on this
408 * run.)
409 */
410 if (timer_pending(&pcpu->cpu_timer) == 0 &&
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800411 pcpu->timer_run_time >= pcpu->idle_exit_time &&
412 pcpu->governor_enabled) {
Mike Chan1dab2592010-06-22 11:26:45 -0700413 pcpu->time_in_idle =
414 get_cpu_idle_time_us(smp_processor_id(),
415 &pcpu->idle_exit_time);
416 pcpu->timer_idlecancel = 0;
417 mod_timer(&pcpu->cpu_timer, jiffies + 2);
418 dbgpr("idle: exit, set timer for %lu exit=%llu\n", pcpu->cpu_timer.expires, pcpu->idle_exit_time);
419#if DEBUG
420 } else if (timer_pending(&pcpu->cpu_timer) == 0 &&
421 pcpu->timer_run_time < pcpu->idle_exit_time) {
422 dbgpr("idle: timer not run yet: exit=%llu tmrrun=%llu\n",
423 pcpu->idle_exit_time, pcpu->timer_run_time);
424#endif
425 }
426
427}
428
429static int cpufreq_interactive_up_task(void *data)
430{
431 unsigned int cpu;
432 cpumask_t tmp_mask;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800433 unsigned long flags;
Mike Chan1dab2592010-06-22 11:26:45 -0700434 struct cpufreq_interactive_cpuinfo *pcpu;
435
436#if DEBUG
437 u64 now;
438 u64 then;
439 unsigned int lat;
440#endif
441
442 while (1) {
443 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800444 spin_lock_irqsave(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700445
446 if (cpumask_empty(&up_cpumask)) {
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800447 spin_unlock_irqrestore(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700448 schedule();
449
450 if (kthread_should_stop())
451 break;
452
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800453 spin_lock_irqsave(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700454 }
455
456 set_current_state(TASK_RUNNING);
457
458#if DEBUG
459 then = up_request_time;
460 now = ktime_to_us(ktime_get());
461
462 if (now > then) {
463 lat = ktime_to_us(ktime_get()) - then;
464
465 if (lat > up_max_latency)
466 up_max_latency = lat;
467 }
468#endif
469
470 tmp_mask = up_cpumask;
471 cpumask_clear(&up_cpumask);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800472 spin_unlock_irqrestore(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700473
474 for_each_cpu(cpu, &tmp_mask) {
475 pcpu = &per_cpu(cpuinfo, cpu);
476
477 if (nr_running() == 1) {
478 dbgpr("up %d: tgt=%d nothing else running\n", cpu,
479 pcpu->target_freq);
480 }
481
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800482 smp_rmb();
483
484 if (!pcpu->governor_enabled)
485 continue;
486
Mike Chan1dab2592010-06-22 11:26:45 -0700487 __cpufreq_driver_target(pcpu->policy,
488 pcpu->target_freq,
489 CPUFREQ_RELATION_H);
490 pcpu->freq_change_time_in_idle =
491 get_cpu_idle_time_us(cpu,
492 &pcpu->freq_change_time);
493 dbgpr("up %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur);
494 }
495 }
496
497 return 0;
498}
499
500static void cpufreq_interactive_freq_down(struct work_struct *work)
501{
502 unsigned int cpu;
503 cpumask_t tmp_mask;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800504 unsigned long flags;
Mike Chan1dab2592010-06-22 11:26:45 -0700505 struct cpufreq_interactive_cpuinfo *pcpu;
506
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800507 spin_lock_irqsave(&down_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700508 tmp_mask = down_cpumask;
509 cpumask_clear(&down_cpumask);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800510 spin_unlock_irqrestore(&down_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700511
512 for_each_cpu(cpu, &tmp_mask) {
513 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800514
515 smp_rmb();
516
517 if (!pcpu->governor_enabled)
518 continue;
519
Mike Chan1dab2592010-06-22 11:26:45 -0700520 __cpufreq_driver_target(pcpu->policy,
521 pcpu->target_freq,
522 CPUFREQ_RELATION_H);
523 pcpu->freq_change_time_in_idle =
524 get_cpu_idle_time_us(cpu,
525 &pcpu->freq_change_time);
526 dbgpr("down %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur);
527 }
528}
529
530static ssize_t show_go_maxspeed_load(struct kobject *kobj,
531 struct attribute *attr, char *buf)
532{
533 return sprintf(buf, "%lu\n", go_maxspeed_load);
534}
535
536static ssize_t store_go_maxspeed_load(struct kobject *kobj,
537 struct attribute *attr, const char *buf, size_t count)
538{
539 return strict_strtoul(buf, 0, &go_maxspeed_load);
540}
541
542static struct global_attr go_maxspeed_load_attr = __ATTR(go_maxspeed_load, 0644,
543 show_go_maxspeed_load, store_go_maxspeed_load);
544
545static ssize_t show_min_sample_time(struct kobject *kobj,
546 struct attribute *attr, char *buf)
547{
548 return sprintf(buf, "%lu\n", min_sample_time);
549}
550
551static ssize_t store_min_sample_time(struct kobject *kobj,
552 struct attribute *attr, const char *buf, size_t count)
553{
554 return strict_strtoul(buf, 0, &min_sample_time);
555}
556
557static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
558 show_min_sample_time, store_min_sample_time);
559
560static struct attribute *interactive_attributes[] = {
561 &go_maxspeed_load_attr.attr,
562 &min_sample_time_attr.attr,
563 NULL,
564};
565
566static struct attribute_group interactive_attr_group = {
567 .attrs = interactive_attributes,
568 .name = "interactive",
569};
570
Todd Poynor097d3962011-06-06 18:30:23 -0700571static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
Mike Chan1dab2592010-06-22 11:26:45 -0700572 unsigned int event)
573{
574 int rc;
Todd Poynor097d3962011-06-06 18:30:23 -0700575 unsigned int j;
576 struct cpufreq_interactive_cpuinfo *pcpu;
577 struct cpufreq_frequency_table *freq_table;
Mike Chan1dab2592010-06-22 11:26:45 -0700578
579 switch (event) {
580 case CPUFREQ_GOV_START:
Todd Poynor097d3962011-06-06 18:30:23 -0700581 if (!cpu_online(policy->cpu))
Mike Chan1dab2592010-06-22 11:26:45 -0700582 return -EINVAL;
583
Todd Poynor097d3962011-06-06 18:30:23 -0700584 freq_table =
585 cpufreq_frequency_get_table(policy->cpu);
586
587 for_each_cpu(j, policy->cpus) {
588 pcpu = &per_cpu(cpuinfo, j);
589 pcpu->policy = policy;
590 pcpu->target_freq = policy->cur;
591 pcpu->freq_table = freq_table;
592 pcpu->freq_change_time_in_idle =
593 get_cpu_idle_time_us(j,
Mike Chan1dab2592010-06-22 11:26:45 -0700594 &pcpu->freq_change_time);
Todd Poynor097d3962011-06-06 18:30:23 -0700595 pcpu->governor_enabled = 1;
596 smp_wmb();
597 }
598
Mike Chan1dab2592010-06-22 11:26:45 -0700599 /*
600 * Do not register the idle hook and create sysfs
601 * entries if we have already done so.
602 */
603 if (atomic_inc_return(&active_count) > 1)
604 return 0;
605
606 rc = sysfs_create_group(cpufreq_global_kobject,
607 &interactive_attr_group);
608 if (rc)
609 return rc;
610
611 pm_idle_old = pm_idle;
612 pm_idle = cpufreq_interactive_idle;
613 break;
614
615 case CPUFREQ_GOV_STOP:
Todd Poynor097d3962011-06-06 18:30:23 -0700616 for_each_cpu(j, policy->cpus) {
617 pcpu = &per_cpu(cpuinfo, j);
618 pcpu->governor_enabled = 0;
619 smp_wmb();
620 del_timer_sync(&pcpu->cpu_timer);
Mike Chan1dab2592010-06-22 11:26:45 -0700621
Todd Poynor097d3962011-06-06 18:30:23 -0700622 /*
623 * Reset idle exit time since we may cancel the timer
624 * before it can run after the last idle exit time,
625 * to avoid tripping the check in idle exit for a timer
626 * that is trying to run.
627 */
628 pcpu->idle_exit_time = 0;
629 }
630
631 flush_work(&freq_scale_down_work);
Mike Chan1dab2592010-06-22 11:26:45 -0700632 if (atomic_dec_return(&active_count) > 0)
633 return 0;
634
635 sysfs_remove_group(cpufreq_global_kobject,
636 &interactive_attr_group);
637
638 pm_idle = pm_idle_old;
Mike Chan1dab2592010-06-22 11:26:45 -0700639 break;
640
641 case CPUFREQ_GOV_LIMITS:
Todd Poynor097d3962011-06-06 18:30:23 -0700642 if (policy->max < policy->cur)
643 __cpufreq_driver_target(policy,
644 policy->max, CPUFREQ_RELATION_H);
645 else if (policy->min > policy->cur)
646 __cpufreq_driver_target(policy,
647 policy->min, CPUFREQ_RELATION_L);
Mike Chan1dab2592010-06-22 11:26:45 -0700648 break;
649 }
650 return 0;
651}
652
653static int __init cpufreq_interactive_init(void)
654{
655 unsigned int i;
656 struct cpufreq_interactive_cpuinfo *pcpu;
657 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
658
659 go_maxspeed_load = DEFAULT_GO_MAXSPEED_LOAD;
660 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
661
662 /* Initalize per-cpu timers */
663 for_each_possible_cpu(i) {
664 pcpu = &per_cpu(cpuinfo, i);
665 init_timer(&pcpu->cpu_timer);
666 pcpu->cpu_timer.function = cpufreq_interactive_timer;
667 pcpu->cpu_timer.data = i;
668 }
669
670 up_task = kthread_create(cpufreq_interactive_up_task, NULL,
671 "kinteractiveup");
672 if (IS_ERR(up_task))
673 return PTR_ERR(up_task);
674
675 sched_setscheduler_nocheck(up_task, SCHED_FIFO, &param);
676 get_task_struct(up_task);
677
678 /* No rescuer thread, bind to CPU queuing the work for possibly
679 warm cache (probably doesn't matter much). */
680 down_wq = alloc_workqueue("knteractive_down", 0, 1);
681
682 if (! down_wq)
683 goto err_freeuptask;
684
685 INIT_WORK(&freq_scale_down_work,
686 cpufreq_interactive_freq_down);
687
688 spin_lock_init(&up_cpumask_lock);
689 spin_lock_init(&down_cpumask_lock);
690
691#if DEBUG
692 spin_lock_init(&dbgpr_lock);
693 dbg_proc = create_proc_entry("igov", S_IWUSR | S_IRUGO, NULL);
694 dbg_proc->read_proc = dbg_proc_read;
695#endif
696
697 return cpufreq_register_governor(&cpufreq_gov_interactive);
698
699err_freeuptask:
700 put_task_struct(up_task);
701 return -ENOMEM;
702}
703
704#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
705fs_initcall(cpufreq_interactive_init);
706#else
707module_init(cpufreq_interactive_init);
708#endif
709
710static void __exit cpufreq_interactive_exit(void)
711{
712 cpufreq_unregister_governor(&cpufreq_gov_interactive);
713 kthread_stop(up_task);
714 put_task_struct(up_task);
715 destroy_workqueue(down_wq);
716}
717
718module_exit(cpufreq_interactive_exit);
719
720MODULE_AUTHOR("Mike Chan <mike@android.com>");
721MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
722 "Latency sensitive workloads");
723MODULE_LICENSE("GPL");