blob: d60cb380add12640c7ef8b1ace39dc37703460b9 [file] [log] [blame]
Mike Chan1dab2592010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/mutex.h>
23#include <linux/sched.h>
24#include <linux/tick.h>
25#include <linux/timer.h>
26#include <linux/workqueue.h>
27#include <linux/kthread.h>
Todd Poynora50b0532011-06-22 14:34:23 -070028#include <linux/mutex.h>
Mike Chan1dab2592010-06-22 11:26:45 -070029
30#include <asm/cputime.h>
31
Mike Chan1dab2592010-06-22 11:26:45 -070032static atomic_t active_count = ATOMIC_INIT(0);
33
34struct cpufreq_interactive_cpuinfo {
35 struct timer_list cpu_timer;
36 int timer_idlecancel;
37 u64 time_in_idle;
38 u64 idle_exit_time;
39 u64 timer_run_time;
40 int idling;
41 u64 freq_change_time;
42 u64 freq_change_time_in_idle;
43 struct cpufreq_policy *policy;
44 struct cpufreq_frequency_table *freq_table;
45 unsigned int target_freq;
46 int governor_enabled;
47};
48
49static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
50
51/* Workqueues handle frequency scaling */
52static struct task_struct *up_task;
53static struct workqueue_struct *down_wq;
54static struct work_struct freq_scale_down_work;
55static cpumask_t up_cpumask;
56static spinlock_t up_cpumask_lock;
57static cpumask_t down_cpumask;
58static spinlock_t down_cpumask_lock;
Todd Poynora50b0532011-06-22 14:34:23 -070059static struct mutex set_speed_lock;
Mike Chan1dab2592010-06-22 11:26:45 -070060
61/* Go to max speed when CPU load at or above this value. */
62#define DEFAULT_GO_MAXSPEED_LOAD 85
63static unsigned long go_maxspeed_load;
64
65/*
66 * The minimum amount of time to spend at a frequency before we can ramp down.
67 */
68#define DEFAULT_MIN_SAMPLE_TIME 80000;
69static unsigned long min_sample_time;
70
Allen Martin5d14f982011-06-30 23:54:07 -070071/*
72 * The sample rate of the timer used to increase frequency
73 */
74#define DEFAULT_TIMER_RATE 30000;
75static unsigned long timer_rate;
76
Mike Chan1dab2592010-06-22 11:26:45 -070077static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
78 unsigned int event);
79
80#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
81static
82#endif
83struct cpufreq_governor cpufreq_gov_interactive = {
84 .name = "interactive",
85 .governor = cpufreq_governor_interactive,
86 .max_transition_latency = 10000000,
87 .owner = THIS_MODULE,
88};
89
90static void cpufreq_interactive_timer(unsigned long data)
91{
92 unsigned int delta_idle;
93 unsigned int delta_time;
94 int cpu_load;
95 int load_since_change;
96 u64 time_in_idle;
97 u64 idle_exit_time;
98 struct cpufreq_interactive_cpuinfo *pcpu =
99 &per_cpu(cpuinfo, data);
100 u64 now_idle;
101 unsigned int new_freq;
102 unsigned int index;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800103 unsigned long flags;
Mike Chan1dab2592010-06-22 11:26:45 -0700104
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800105 smp_rmb();
106
107 if (!pcpu->governor_enabled)
108 goto exit;
109
Mike Chan1dab2592010-06-22 11:26:45 -0700110 /*
111 * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
112 * this lets idle exit know the current idle time sample has
113 * been processed, and idle exit can generate a new sample and
114 * re-arm the timer. This prevents a concurrent idle
115 * exit on that CPU from writing a new set of info at the same time
116 * the timer function runs (the timer function can't use that info
117 * until more time passes).
118 */
119 time_in_idle = pcpu->time_in_idle;
120 idle_exit_time = pcpu->idle_exit_time;
121 now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
122 smp_wmb();
123
124 /* If we raced with cancelling a timer, skip. */
Allen Martin13b2b142011-06-28 10:40:30 -0700125 if (!idle_exit_time)
Mike Chan1dab2592010-06-22 11:26:45 -0700126 goto exit;
Mike Chan1dab2592010-06-22 11:26:45 -0700127
128 delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle);
129 delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
130 idle_exit_time);
131
132 /*
133 * If timer ran less than 1ms after short-term sample started, retry.
134 */
Allen Martin13b2b142011-06-28 10:40:30 -0700135 if (delta_time < 1000)
Mike Chan1dab2592010-06-22 11:26:45 -0700136 goto rearm;
Mike Chan1dab2592010-06-22 11:26:45 -0700137
138 if (delta_idle > delta_time)
139 cpu_load = 0;
140 else
141 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
142
143 delta_idle = (unsigned int) cputime64_sub(now_idle,
Allen Martinadce6892011-07-01 11:19:14 -0700144 pcpu->freq_change_time_in_idle);
Mike Chan1dab2592010-06-22 11:26:45 -0700145 delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
146 pcpu->freq_change_time);
147
Axel Haslam3c053e12011-10-05 14:04:57 -0700148 if ((delta_time == 0) || (delta_idle > delta_time))
Mike Chan1dab2592010-06-22 11:26:45 -0700149 load_since_change = 0;
150 else
151 load_since_change =
152 100 * (delta_time - delta_idle) / delta_time;
153
154 /*
155 * Choose greater of short-term load (since last idle timer
156 * started or timer function re-armed itself) or long-term load
157 * (since last frequency change).
158 */
159 if (load_since_change > cpu_load)
160 cpu_load = load_since_change;
161
162 if (cpu_load >= go_maxspeed_load)
163 new_freq = pcpu->policy->max;
164 else
165 new_freq = pcpu->policy->max * cpu_load / 100;
166
167 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
168 new_freq, CPUFREQ_RELATION_H,
169 &index)) {
Allen Martin13b2b142011-06-28 10:40:30 -0700170 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
171 (int) data);
Mike Chan1dab2592010-06-22 11:26:45 -0700172 goto rearm;
173 }
174
175 new_freq = pcpu->freq_table[index].frequency;
176
177 if (pcpu->target_freq == new_freq)
Mike Chan1dab2592010-06-22 11:26:45 -0700178 goto rearm_if_notmax;
Mike Chan1dab2592010-06-22 11:26:45 -0700179
180 /*
181 * Do not scale down unless we have been at this frequency for the
182 * minimum sample time.
183 */
184 if (new_freq < pcpu->target_freq) {
Allen Martinadce6892011-07-01 11:19:14 -0700185 if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time)
186 < min_sample_time)
Mike Chan1dab2592010-06-22 11:26:45 -0700187 goto rearm;
Mike Chan1dab2592010-06-22 11:26:45 -0700188 }
189
Mike Chan1dab2592010-06-22 11:26:45 -0700190 if (new_freq < pcpu->target_freq) {
191 pcpu->target_freq = new_freq;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800192 spin_lock_irqsave(&down_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700193 cpumask_set_cpu(data, &down_cpumask);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800194 spin_unlock_irqrestore(&down_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700195 queue_work(down_wq, &freq_scale_down_work);
196 } else {
197 pcpu->target_freq = new_freq;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800198 spin_lock_irqsave(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700199 cpumask_set_cpu(data, &up_cpumask);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800200 spin_unlock_irqrestore(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700201 wake_up_process(up_task);
202 }
203
204rearm_if_notmax:
205 /*
206 * Already set max speed and don't see a need to change that,
207 * wait until next idle to re-evaluate, don't need timer.
208 */
209 if (pcpu->target_freq == pcpu->policy->max)
210 goto exit;
211
212rearm:
213 if (!timer_pending(&pcpu->cpu_timer)) {
214 /*
215 * If already at min: if that CPU is idle, don't set timer.
216 * Else cancel the timer if that CPU goes idle. We don't
217 * need to re-evaluate speed until the next idle exit.
218 */
219 if (pcpu->target_freq == pcpu->policy->min) {
220 smp_rmb();
221
Allen Martin13b2b142011-06-28 10:40:30 -0700222 if (pcpu->idling)
Mike Chan1dab2592010-06-22 11:26:45 -0700223 goto exit;
Mike Chan1dab2592010-06-22 11:26:45 -0700224
225 pcpu->timer_idlecancel = 1;
226 }
227
228 pcpu->time_in_idle = get_cpu_idle_time_us(
229 data, &pcpu->idle_exit_time);
Allen Martin9549cff2011-07-19 14:56:21 -0700230 mod_timer(&pcpu->cpu_timer,
231 jiffies + usecs_to_jiffies(timer_rate));
Mike Chan1dab2592010-06-22 11:26:45 -0700232 }
233
234exit:
235 return;
236}
237
Allen Martin80e65d92011-06-28 09:58:39 -0700238static void cpufreq_interactive_idle_start(void)
Mike Chan1dab2592010-06-22 11:26:45 -0700239{
240 struct cpufreq_interactive_cpuinfo *pcpu =
241 &per_cpu(cpuinfo, smp_processor_id());
242 int pending;
243
Allen Martinadce6892011-07-01 11:19:14 -0700244 if (!pcpu->governor_enabled)
Mike Chan1dab2592010-06-22 11:26:45 -0700245 return;
Mike Chan1dab2592010-06-22 11:26:45 -0700246
247 pcpu->idling = 1;
248 smp_wmb();
249 pending = timer_pending(&pcpu->cpu_timer);
250
251 if (pcpu->target_freq != pcpu->policy->min) {
252#ifdef CONFIG_SMP
253 /*
254 * Entering idle while not at lowest speed. On some
255 * platforms this can hold the other CPU(s) at that speed
256 * even though the CPU is idle. Set a timer to re-evaluate
257 * speed so this idle CPU doesn't hold the other CPUs above
258 * min indefinitely. This should probably be a quirk of
259 * the CPUFreq driver.
260 */
261 if (!pending) {
262 pcpu->time_in_idle = get_cpu_idle_time_us(
263 smp_processor_id(), &pcpu->idle_exit_time);
264 pcpu->timer_idlecancel = 0;
Allen Martin9549cff2011-07-19 14:56:21 -0700265 mod_timer(&pcpu->cpu_timer,
266 jiffies + usecs_to_jiffies(timer_rate));
Mike Chan1dab2592010-06-22 11:26:45 -0700267 }
268#endif
269 } else {
270 /*
271 * If at min speed and entering idle after load has
272 * already been evaluated, and a timer has been set just in
273 * case the CPU suddenly goes busy, cancel that timer. The
274 * CPU didn't go busy; we'll recheck things upon idle exit.
275 */
276 if (pending && pcpu->timer_idlecancel) {
Mike Chan1dab2592010-06-22 11:26:45 -0700277 del_timer(&pcpu->cpu_timer);
278 /*
279 * Ensure last timer run time is after current idle
280 * sample start time, so next idle exit will always
281 * start a new idle sampling period.
282 */
283 pcpu->idle_exit_time = 0;
284 pcpu->timer_idlecancel = 0;
285 }
286 }
287
Allen Martin80e65d92011-06-28 09:58:39 -0700288}
289
290static void cpufreq_interactive_idle_end(void)
291{
292 struct cpufreq_interactive_cpuinfo *pcpu =
293 &per_cpu(cpuinfo, smp_processor_id());
294
Mike Chan1dab2592010-06-22 11:26:45 -0700295 pcpu->idling = 0;
296 smp_wmb();
297
298 /*
299 * Arm the timer for 1-2 ticks later if not already, and if the timer
300 * function has already processed the previous load sampling
301 * interval. (If the timer is not pending but has not processed
302 * the previous interval, it is probably racing with us on another
303 * CPU. Let it compute load based on the previous sample and then
304 * re-arm the timer for another interval when it's done, rather
305 * than updating the interval start time to be "now", which doesn't
306 * give the timer function enough time to make a decision on this
307 * run.)
308 */
309 if (timer_pending(&pcpu->cpu_timer) == 0 &&
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800310 pcpu->timer_run_time >= pcpu->idle_exit_time &&
311 pcpu->governor_enabled) {
Mike Chan1dab2592010-06-22 11:26:45 -0700312 pcpu->time_in_idle =
313 get_cpu_idle_time_us(smp_processor_id(),
314 &pcpu->idle_exit_time);
315 pcpu->timer_idlecancel = 0;
Allen Martin9549cff2011-07-19 14:56:21 -0700316 mod_timer(&pcpu->cpu_timer,
317 jiffies + usecs_to_jiffies(timer_rate));
Mike Chan1dab2592010-06-22 11:26:45 -0700318 }
319
320}
321
322static int cpufreq_interactive_up_task(void *data)
323{
324 unsigned int cpu;
325 cpumask_t tmp_mask;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800326 unsigned long flags;
Mike Chan1dab2592010-06-22 11:26:45 -0700327 struct cpufreq_interactive_cpuinfo *pcpu;
328
Mike Chan1dab2592010-06-22 11:26:45 -0700329 while (1) {
330 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800331 spin_lock_irqsave(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700332
333 if (cpumask_empty(&up_cpumask)) {
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800334 spin_unlock_irqrestore(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700335 schedule();
336
337 if (kthread_should_stop())
338 break;
339
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800340 spin_lock_irqsave(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700341 }
342
343 set_current_state(TASK_RUNNING);
Mike Chan1dab2592010-06-22 11:26:45 -0700344 tmp_mask = up_cpumask;
345 cpumask_clear(&up_cpumask);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800346 spin_unlock_irqrestore(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700347
348 for_each_cpu(cpu, &tmp_mask) {
Todd Poynora50b0532011-06-22 14:34:23 -0700349 unsigned int j;
350 unsigned int max_freq = 0;
Mike Chan1dab2592010-06-22 11:26:45 -0700351
Todd Poynora50b0532011-06-22 14:34:23 -0700352 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800353 smp_rmb();
354
355 if (!pcpu->governor_enabled)
356 continue;
357
Todd Poynora50b0532011-06-22 14:34:23 -0700358 mutex_lock(&set_speed_lock);
359
360 for_each_cpu(j, pcpu->policy->cpus) {
361 struct cpufreq_interactive_cpuinfo *pjcpu =
362 &per_cpu(cpuinfo, j);
363
364 if (pjcpu->target_freq > max_freq)
365 max_freq = pjcpu->target_freq;
366 }
367
368 if (max_freq != pcpu->policy->cur)
369 __cpufreq_driver_target(pcpu->policy,
370 max_freq,
371 CPUFREQ_RELATION_H);
372 mutex_unlock(&set_speed_lock);
373
Mike Chan1dab2592010-06-22 11:26:45 -0700374 pcpu->freq_change_time_in_idle =
375 get_cpu_idle_time_us(cpu,
376 &pcpu->freq_change_time);
Mike Chan1dab2592010-06-22 11:26:45 -0700377 }
378 }
379
380 return 0;
381}
382
383static void cpufreq_interactive_freq_down(struct work_struct *work)
384{
385 unsigned int cpu;
386 cpumask_t tmp_mask;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800387 unsigned long flags;
Mike Chan1dab2592010-06-22 11:26:45 -0700388 struct cpufreq_interactive_cpuinfo *pcpu;
389
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800390 spin_lock_irqsave(&down_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700391 tmp_mask = down_cpumask;
392 cpumask_clear(&down_cpumask);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800393 spin_unlock_irqrestore(&down_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700394
395 for_each_cpu(cpu, &tmp_mask) {
Todd Poynora50b0532011-06-22 14:34:23 -0700396 unsigned int j;
397 unsigned int max_freq = 0;
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800398
Todd Poynora50b0532011-06-22 14:34:23 -0700399 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800400 smp_rmb();
401
402 if (!pcpu->governor_enabled)
403 continue;
404
Todd Poynora50b0532011-06-22 14:34:23 -0700405 mutex_lock(&set_speed_lock);
406
407 for_each_cpu(j, pcpu->policy->cpus) {
408 struct cpufreq_interactive_cpuinfo *pjcpu =
409 &per_cpu(cpuinfo, j);
410
411 if (pjcpu->target_freq > max_freq)
412 max_freq = pjcpu->target_freq;
413 }
414
415 if (max_freq != pcpu->policy->cur)
416 __cpufreq_driver_target(pcpu->policy, max_freq,
417 CPUFREQ_RELATION_H);
418
419 mutex_unlock(&set_speed_lock);
Mike Chan1dab2592010-06-22 11:26:45 -0700420 pcpu->freq_change_time_in_idle =
421 get_cpu_idle_time_us(cpu,
422 &pcpu->freq_change_time);
Mike Chan1dab2592010-06-22 11:26:45 -0700423 }
424}
425
426static ssize_t show_go_maxspeed_load(struct kobject *kobj,
427 struct attribute *attr, char *buf)
428{
429 return sprintf(buf, "%lu\n", go_maxspeed_load);
430}
431
432static ssize_t store_go_maxspeed_load(struct kobject *kobj,
433 struct attribute *attr, const char *buf, size_t count)
434{
Allen Martin8b125522011-06-30 16:59:19 -0700435 int ret;
436 unsigned long val;
437
438 ret = strict_strtoul(buf, 0, &val);
439 if (ret < 0)
440 return ret;
441 go_maxspeed_load = val;
442 return count;
Mike Chan1dab2592010-06-22 11:26:45 -0700443}
444
445static struct global_attr go_maxspeed_load_attr = __ATTR(go_maxspeed_load, 0644,
446 show_go_maxspeed_load, store_go_maxspeed_load);
447
448static ssize_t show_min_sample_time(struct kobject *kobj,
449 struct attribute *attr, char *buf)
450{
451 return sprintf(buf, "%lu\n", min_sample_time);
452}
453
454static ssize_t store_min_sample_time(struct kobject *kobj,
455 struct attribute *attr, const char *buf, size_t count)
456{
Allen Martin8b125522011-06-30 16:59:19 -0700457 int ret;
458 unsigned long val;
459
460 ret = strict_strtoul(buf, 0, &val);
461 if (ret < 0)
462 return ret;
463 min_sample_time = val;
464 return count;
Mike Chan1dab2592010-06-22 11:26:45 -0700465}
466
467static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
468 show_min_sample_time, store_min_sample_time);
469
Allen Martin5d14f982011-06-30 23:54:07 -0700470static ssize_t show_timer_rate(struct kobject *kobj,
471 struct attribute *attr, char *buf)
472{
473 return sprintf(buf, "%lu\n", timer_rate);
474}
475
476static ssize_t store_timer_rate(struct kobject *kobj,
477 struct attribute *attr, const char *buf, size_t count)
478{
479 int ret;
480 unsigned long val;
481
482 ret = strict_strtoul(buf, 0, &val);
483 if (ret < 0)
484 return ret;
485 timer_rate = val;
486 return count;
487}
488
489static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
490 show_timer_rate, store_timer_rate);
491
Mike Chan1dab2592010-06-22 11:26:45 -0700492static struct attribute *interactive_attributes[] = {
493 &go_maxspeed_load_attr.attr,
494 &min_sample_time_attr.attr,
Allen Martin5d14f982011-06-30 23:54:07 -0700495 &timer_rate_attr.attr,
Mike Chan1dab2592010-06-22 11:26:45 -0700496 NULL,
497};
498
499static struct attribute_group interactive_attr_group = {
500 .attrs = interactive_attributes,
501 .name = "interactive",
502};
503
Todd Poynor097d3962011-06-06 18:30:23 -0700504static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
Mike Chan1dab2592010-06-22 11:26:45 -0700505 unsigned int event)
506{
507 int rc;
Todd Poynor097d3962011-06-06 18:30:23 -0700508 unsigned int j;
509 struct cpufreq_interactive_cpuinfo *pcpu;
510 struct cpufreq_frequency_table *freq_table;
Mike Chan1dab2592010-06-22 11:26:45 -0700511
512 switch (event) {
513 case CPUFREQ_GOV_START:
Todd Poynor097d3962011-06-06 18:30:23 -0700514 if (!cpu_online(policy->cpu))
Mike Chan1dab2592010-06-22 11:26:45 -0700515 return -EINVAL;
516
Todd Poynor097d3962011-06-06 18:30:23 -0700517 freq_table =
518 cpufreq_frequency_get_table(policy->cpu);
519
520 for_each_cpu(j, policy->cpus) {
521 pcpu = &per_cpu(cpuinfo, j);
522 pcpu->policy = policy;
523 pcpu->target_freq = policy->cur;
524 pcpu->freq_table = freq_table;
525 pcpu->freq_change_time_in_idle =
526 get_cpu_idle_time_us(j,
Mike Chan1dab2592010-06-22 11:26:45 -0700527 &pcpu->freq_change_time);
Todd Poynor097d3962011-06-06 18:30:23 -0700528 pcpu->governor_enabled = 1;
529 smp_wmb();
530 }
531
Mike Chan1dab2592010-06-22 11:26:45 -0700532 /*
533 * Do not register the idle hook and create sysfs
534 * entries if we have already done so.
535 */
536 if (atomic_inc_return(&active_count) > 1)
537 return 0;
538
539 rc = sysfs_create_group(cpufreq_global_kobject,
540 &interactive_attr_group);
541 if (rc)
542 return rc;
543
Mike Chan1dab2592010-06-22 11:26:45 -0700544 break;
545
546 case CPUFREQ_GOV_STOP:
Todd Poynor097d3962011-06-06 18:30:23 -0700547 for_each_cpu(j, policy->cpus) {
548 pcpu = &per_cpu(cpuinfo, j);
549 pcpu->governor_enabled = 0;
550 smp_wmb();
551 del_timer_sync(&pcpu->cpu_timer);
Mike Chan1dab2592010-06-22 11:26:45 -0700552
Todd Poynor097d3962011-06-06 18:30:23 -0700553 /*
554 * Reset idle exit time since we may cancel the timer
555 * before it can run after the last idle exit time,
556 * to avoid tripping the check in idle exit for a timer
557 * that is trying to run.
558 */
559 pcpu->idle_exit_time = 0;
560 }
561
562 flush_work(&freq_scale_down_work);
Mike Chan1dab2592010-06-22 11:26:45 -0700563 if (atomic_dec_return(&active_count) > 0)
564 return 0;
565
566 sysfs_remove_group(cpufreq_global_kobject,
567 &interactive_attr_group);
568
Mike Chan1dab2592010-06-22 11:26:45 -0700569 break;
570
571 case CPUFREQ_GOV_LIMITS:
Todd Poynor097d3962011-06-06 18:30:23 -0700572 if (policy->max < policy->cur)
573 __cpufreq_driver_target(policy,
574 policy->max, CPUFREQ_RELATION_H);
575 else if (policy->min > policy->cur)
576 __cpufreq_driver_target(policy,
577 policy->min, CPUFREQ_RELATION_L);
Mike Chan1dab2592010-06-22 11:26:45 -0700578 break;
579 }
580 return 0;
581}
582
Allen Martin80e65d92011-06-28 09:58:39 -0700583static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
584 unsigned long val,
585 void *data)
586{
587 switch (val) {
588 case IDLE_START:
589 cpufreq_interactive_idle_start();
590 break;
591 case IDLE_END:
592 cpufreq_interactive_idle_end();
593 break;
594 }
595
596 return 0;
597}
598
599static struct notifier_block cpufreq_interactive_idle_nb = {
600 .notifier_call = cpufreq_interactive_idle_notifier,
601};
602
Mike Chan1dab2592010-06-22 11:26:45 -0700603static int __init cpufreq_interactive_init(void)
604{
605 unsigned int i;
606 struct cpufreq_interactive_cpuinfo *pcpu;
607 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
608
609 go_maxspeed_load = DEFAULT_GO_MAXSPEED_LOAD;
610 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Allen Martin5d14f982011-06-30 23:54:07 -0700611 timer_rate = DEFAULT_TIMER_RATE;
Mike Chan1dab2592010-06-22 11:26:45 -0700612
613 /* Initalize per-cpu timers */
614 for_each_possible_cpu(i) {
615 pcpu = &per_cpu(cpuinfo, i);
616 init_timer(&pcpu->cpu_timer);
617 pcpu->cpu_timer.function = cpufreq_interactive_timer;
618 pcpu->cpu_timer.data = i;
619 }
620
621 up_task = kthread_create(cpufreq_interactive_up_task, NULL,
622 "kinteractiveup");
623 if (IS_ERR(up_task))
624 return PTR_ERR(up_task);
625
626 sched_setscheduler_nocheck(up_task, SCHED_FIFO, &param);
627 get_task_struct(up_task);
628
629 /* No rescuer thread, bind to CPU queuing the work for possibly
630 warm cache (probably doesn't matter much). */
631 down_wq = alloc_workqueue("knteractive_down", 0, 1);
632
Allen Martinadce6892011-07-01 11:19:14 -0700633 if (!down_wq)
Mike Chan1dab2592010-06-22 11:26:45 -0700634 goto err_freeuptask;
635
636 INIT_WORK(&freq_scale_down_work,
637 cpufreq_interactive_freq_down);
638
639 spin_lock_init(&up_cpumask_lock);
640 spin_lock_init(&down_cpumask_lock);
Todd Poynora50b0532011-06-22 14:34:23 -0700641 mutex_init(&set_speed_lock);
Mike Chan1dab2592010-06-22 11:26:45 -0700642
Allen Martin80e65d92011-06-28 09:58:39 -0700643 idle_notifier_register(&cpufreq_interactive_idle_nb);
644
Mike Chan1dab2592010-06-22 11:26:45 -0700645 return cpufreq_register_governor(&cpufreq_gov_interactive);
646
647err_freeuptask:
648 put_task_struct(up_task);
649 return -ENOMEM;
650}
651
652#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
653fs_initcall(cpufreq_interactive_init);
654#else
655module_init(cpufreq_interactive_init);
656#endif
657
658static void __exit cpufreq_interactive_exit(void)
659{
660 cpufreq_unregister_governor(&cpufreq_gov_interactive);
661 kthread_stop(up_task);
662 put_task_struct(up_task);
663 destroy_workqueue(down_wq);
664}
665
666module_exit(cpufreq_interactive_exit);
667
668MODULE_AUTHOR("Mike Chan <mike@android.com>");
669MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
670 "Latency sensitive workloads");
671MODULE_LICENSE("GPL");