blob: 72ae6533a2d47a99c6de946e430f0776aa20521d [file] [log] [blame]
Mike Chan1dab2592010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/mutex.h>
23#include <linux/sched.h>
24#include <linux/tick.h>
25#include <linux/timer.h>
26#include <linux/workqueue.h>
27#include <linux/kthread.h>
Todd Poynora50b0532011-06-22 14:34:23 -070028#include <linux/mutex.h>
Mike Chan1dab2592010-06-22 11:26:45 -070029
30#include <asm/cputime.h>
31
Mike Chan1dab2592010-06-22 11:26:45 -070032static atomic_t active_count = ATOMIC_INIT(0);
33
34struct cpufreq_interactive_cpuinfo {
35 struct timer_list cpu_timer;
36 int timer_idlecancel;
37 u64 time_in_idle;
38 u64 idle_exit_time;
39 u64 timer_run_time;
40 int idling;
41 u64 freq_change_time;
42 u64 freq_change_time_in_idle;
43 struct cpufreq_policy *policy;
44 struct cpufreq_frequency_table *freq_table;
45 unsigned int target_freq;
46 int governor_enabled;
47};
48
49static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
50
51/* Workqueues handle frequency scaling */
52static struct task_struct *up_task;
53static struct workqueue_struct *down_wq;
54static struct work_struct freq_scale_down_work;
55static cpumask_t up_cpumask;
56static spinlock_t up_cpumask_lock;
57static cpumask_t down_cpumask;
58static spinlock_t down_cpumask_lock;
Todd Poynora50b0532011-06-22 14:34:23 -070059static struct mutex set_speed_lock;
Mike Chan1dab2592010-06-22 11:26:45 -070060
61/* Go to max speed when CPU load at or above this value. */
Todd Poynor2cc01eb2011-10-16 22:40:29 -070062#define DEFAULT_GO_MAXSPEED_LOAD 95
Mike Chan1dab2592010-06-22 11:26:45 -070063static unsigned long go_maxspeed_load;
64
65/*
66 * The minimum amount of time to spend at a frequency before we can ramp down.
67 */
Todd Poynorfe04aa12011-10-19 13:12:10 -070068#define DEFAULT_MIN_SAMPLE_TIME 20000;
Mike Chan1dab2592010-06-22 11:26:45 -070069static unsigned long min_sample_time;
70
Allen Martin5d14f982011-06-30 23:54:07 -070071/*
72 * The sample rate of the timer used to increase frequency
73 */
Todd Poynor2cc01eb2011-10-16 22:40:29 -070074#define DEFAULT_TIMER_RATE 10000;
Allen Martin5d14f982011-06-30 23:54:07 -070075static unsigned long timer_rate;
76
Mike Chan1dab2592010-06-22 11:26:45 -070077static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
78 unsigned int event);
79
80#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
81static
82#endif
83struct cpufreq_governor cpufreq_gov_interactive = {
84 .name = "interactive",
85 .governor = cpufreq_governor_interactive,
86 .max_transition_latency = 10000000,
87 .owner = THIS_MODULE,
88};
89
90static void cpufreq_interactive_timer(unsigned long data)
91{
92 unsigned int delta_idle;
93 unsigned int delta_time;
94 int cpu_load;
95 int load_since_change;
96 u64 time_in_idle;
97 u64 idle_exit_time;
98 struct cpufreq_interactive_cpuinfo *pcpu =
99 &per_cpu(cpuinfo, data);
100 u64 now_idle;
101 unsigned int new_freq;
102 unsigned int index;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800103 unsigned long flags;
Mike Chan1dab2592010-06-22 11:26:45 -0700104
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800105 smp_rmb();
106
107 if (!pcpu->governor_enabled)
108 goto exit;
109
Mike Chan1dab2592010-06-22 11:26:45 -0700110 /*
111 * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
112 * this lets idle exit know the current idle time sample has
113 * been processed, and idle exit can generate a new sample and
114 * re-arm the timer. This prevents a concurrent idle
115 * exit on that CPU from writing a new set of info at the same time
116 * the timer function runs (the timer function can't use that info
117 * until more time passes).
118 */
119 time_in_idle = pcpu->time_in_idle;
120 idle_exit_time = pcpu->idle_exit_time;
121 now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
122 smp_wmb();
123
124 /* If we raced with cancelling a timer, skip. */
Allen Martin13b2b142011-06-28 10:40:30 -0700125 if (!idle_exit_time)
Mike Chan1dab2592010-06-22 11:26:45 -0700126 goto exit;
Mike Chan1dab2592010-06-22 11:26:45 -0700127
128 delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle);
129 delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
130 idle_exit_time);
131
132 /*
133 * If timer ran less than 1ms after short-term sample started, retry.
134 */
Allen Martin13b2b142011-06-28 10:40:30 -0700135 if (delta_time < 1000)
Mike Chan1dab2592010-06-22 11:26:45 -0700136 goto rearm;
Mike Chan1dab2592010-06-22 11:26:45 -0700137
138 if (delta_idle > delta_time)
139 cpu_load = 0;
140 else
141 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
142
143 delta_idle = (unsigned int) cputime64_sub(now_idle,
Allen Martinadce6892011-07-01 11:19:14 -0700144 pcpu->freq_change_time_in_idle);
Mike Chan1dab2592010-06-22 11:26:45 -0700145 delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
146 pcpu->freq_change_time);
147
Axel Haslam3c053e12011-10-05 14:04:57 -0700148 if ((delta_time == 0) || (delta_idle > delta_time))
Mike Chan1dab2592010-06-22 11:26:45 -0700149 load_since_change = 0;
150 else
151 load_since_change =
152 100 * (delta_time - delta_idle) / delta_time;
153
154 /*
155 * Choose greater of short-term load (since last idle timer
156 * started or timer function re-armed itself) or long-term load
157 * (since last frequency change).
158 */
159 if (load_since_change > cpu_load)
160 cpu_load = load_since_change;
161
Todd Poynoreb5a3422011-11-03 21:22:54 -0700162 if (cpu_load >= go_maxspeed_load) {
163 if (pcpu->policy->cur == pcpu->policy->min)
164 new_freq = pcpu->policy->max;
165 else
166 new_freq = pcpu->policy->max * cpu_load / 100;
167 } else {
Todd Poynora610c812011-10-27 18:25:59 -0700168 new_freq = pcpu->policy->cur * cpu_load / 100;
Todd Poynoreb5a3422011-11-03 21:22:54 -0700169 }
Mike Chan1dab2592010-06-22 11:26:45 -0700170
171 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
172 new_freq, CPUFREQ_RELATION_H,
173 &index)) {
Allen Martin13b2b142011-06-28 10:40:30 -0700174 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
175 (int) data);
Mike Chan1dab2592010-06-22 11:26:45 -0700176 goto rearm;
177 }
178
179 new_freq = pcpu->freq_table[index].frequency;
180
181 if (pcpu->target_freq == new_freq)
Mike Chan1dab2592010-06-22 11:26:45 -0700182 goto rearm_if_notmax;
Mike Chan1dab2592010-06-22 11:26:45 -0700183
184 /*
185 * Do not scale down unless we have been at this frequency for the
186 * minimum sample time.
187 */
188 if (new_freq < pcpu->target_freq) {
Allen Martinadce6892011-07-01 11:19:14 -0700189 if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time)
190 < min_sample_time)
Mike Chan1dab2592010-06-22 11:26:45 -0700191 goto rearm;
Mike Chan1dab2592010-06-22 11:26:45 -0700192 }
193
Mike Chan1dab2592010-06-22 11:26:45 -0700194 if (new_freq < pcpu->target_freq) {
195 pcpu->target_freq = new_freq;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800196 spin_lock_irqsave(&down_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700197 cpumask_set_cpu(data, &down_cpumask);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800198 spin_unlock_irqrestore(&down_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700199 queue_work(down_wq, &freq_scale_down_work);
200 } else {
201 pcpu->target_freq = new_freq;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800202 spin_lock_irqsave(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700203 cpumask_set_cpu(data, &up_cpumask);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800204 spin_unlock_irqrestore(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700205 wake_up_process(up_task);
206 }
207
208rearm_if_notmax:
209 /*
210 * Already set max speed and don't see a need to change that,
211 * wait until next idle to re-evaluate, don't need timer.
212 */
213 if (pcpu->target_freq == pcpu->policy->max)
214 goto exit;
215
216rearm:
217 if (!timer_pending(&pcpu->cpu_timer)) {
218 /*
219 * If already at min: if that CPU is idle, don't set timer.
220 * Else cancel the timer if that CPU goes idle. We don't
221 * need to re-evaluate speed until the next idle exit.
222 */
223 if (pcpu->target_freq == pcpu->policy->min) {
224 smp_rmb();
225
Allen Martin13b2b142011-06-28 10:40:30 -0700226 if (pcpu->idling)
Mike Chan1dab2592010-06-22 11:26:45 -0700227 goto exit;
Mike Chan1dab2592010-06-22 11:26:45 -0700228
229 pcpu->timer_idlecancel = 1;
230 }
231
232 pcpu->time_in_idle = get_cpu_idle_time_us(
233 data, &pcpu->idle_exit_time);
Allen Martin9549cff2011-07-19 14:56:21 -0700234 mod_timer(&pcpu->cpu_timer,
235 jiffies + usecs_to_jiffies(timer_rate));
Mike Chan1dab2592010-06-22 11:26:45 -0700236 }
237
238exit:
239 return;
240}
241
Allen Martin80e65d92011-06-28 09:58:39 -0700242static void cpufreq_interactive_idle_start(void)
Mike Chan1dab2592010-06-22 11:26:45 -0700243{
244 struct cpufreq_interactive_cpuinfo *pcpu =
245 &per_cpu(cpuinfo, smp_processor_id());
246 int pending;
247
Allen Martinadce6892011-07-01 11:19:14 -0700248 if (!pcpu->governor_enabled)
Mike Chan1dab2592010-06-22 11:26:45 -0700249 return;
Mike Chan1dab2592010-06-22 11:26:45 -0700250
251 pcpu->idling = 1;
252 smp_wmb();
253 pending = timer_pending(&pcpu->cpu_timer);
254
255 if (pcpu->target_freq != pcpu->policy->min) {
256#ifdef CONFIG_SMP
257 /*
258 * Entering idle while not at lowest speed. On some
259 * platforms this can hold the other CPU(s) at that speed
260 * even though the CPU is idle. Set a timer to re-evaluate
261 * speed so this idle CPU doesn't hold the other CPUs above
262 * min indefinitely. This should probably be a quirk of
263 * the CPUFreq driver.
264 */
265 if (!pending) {
266 pcpu->time_in_idle = get_cpu_idle_time_us(
267 smp_processor_id(), &pcpu->idle_exit_time);
268 pcpu->timer_idlecancel = 0;
Allen Martin9549cff2011-07-19 14:56:21 -0700269 mod_timer(&pcpu->cpu_timer,
270 jiffies + usecs_to_jiffies(timer_rate));
Mike Chan1dab2592010-06-22 11:26:45 -0700271 }
272#endif
273 } else {
274 /*
275 * If at min speed and entering idle after load has
276 * already been evaluated, and a timer has been set just in
277 * case the CPU suddenly goes busy, cancel that timer. The
278 * CPU didn't go busy; we'll recheck things upon idle exit.
279 */
280 if (pending && pcpu->timer_idlecancel) {
Mike Chan1dab2592010-06-22 11:26:45 -0700281 del_timer(&pcpu->cpu_timer);
282 /*
283 * Ensure last timer run time is after current idle
284 * sample start time, so next idle exit will always
285 * start a new idle sampling period.
286 */
287 pcpu->idle_exit_time = 0;
288 pcpu->timer_idlecancel = 0;
289 }
290 }
291
Allen Martin80e65d92011-06-28 09:58:39 -0700292}
293
294static void cpufreq_interactive_idle_end(void)
295{
296 struct cpufreq_interactive_cpuinfo *pcpu =
297 &per_cpu(cpuinfo, smp_processor_id());
298
Mike Chan1dab2592010-06-22 11:26:45 -0700299 pcpu->idling = 0;
300 smp_wmb();
301
302 /*
303 * Arm the timer for 1-2 ticks later if not already, and if the timer
304 * function has already processed the previous load sampling
305 * interval. (If the timer is not pending but has not processed
306 * the previous interval, it is probably racing with us on another
307 * CPU. Let it compute load based on the previous sample and then
308 * re-arm the timer for another interval when it's done, rather
309 * than updating the interval start time to be "now", which doesn't
310 * give the timer function enough time to make a decision on this
311 * run.)
312 */
313 if (timer_pending(&pcpu->cpu_timer) == 0 &&
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800314 pcpu->timer_run_time >= pcpu->idle_exit_time &&
315 pcpu->governor_enabled) {
Mike Chan1dab2592010-06-22 11:26:45 -0700316 pcpu->time_in_idle =
317 get_cpu_idle_time_us(smp_processor_id(),
318 &pcpu->idle_exit_time);
319 pcpu->timer_idlecancel = 0;
Allen Martin9549cff2011-07-19 14:56:21 -0700320 mod_timer(&pcpu->cpu_timer,
321 jiffies + usecs_to_jiffies(timer_rate));
Mike Chan1dab2592010-06-22 11:26:45 -0700322 }
323
324}
325
326static int cpufreq_interactive_up_task(void *data)
327{
328 unsigned int cpu;
329 cpumask_t tmp_mask;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800330 unsigned long flags;
Mike Chan1dab2592010-06-22 11:26:45 -0700331 struct cpufreq_interactive_cpuinfo *pcpu;
332
Mike Chan1dab2592010-06-22 11:26:45 -0700333 while (1) {
334 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800335 spin_lock_irqsave(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700336
337 if (cpumask_empty(&up_cpumask)) {
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800338 spin_unlock_irqrestore(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700339 schedule();
340
341 if (kthread_should_stop())
342 break;
343
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800344 spin_lock_irqsave(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700345 }
346
347 set_current_state(TASK_RUNNING);
Mike Chan1dab2592010-06-22 11:26:45 -0700348 tmp_mask = up_cpumask;
349 cpumask_clear(&up_cpumask);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800350 spin_unlock_irqrestore(&up_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700351
352 for_each_cpu(cpu, &tmp_mask) {
Todd Poynora50b0532011-06-22 14:34:23 -0700353 unsigned int j;
354 unsigned int max_freq = 0;
Mike Chan1dab2592010-06-22 11:26:45 -0700355
Todd Poynora50b0532011-06-22 14:34:23 -0700356 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800357 smp_rmb();
358
359 if (!pcpu->governor_enabled)
360 continue;
361
Todd Poynora50b0532011-06-22 14:34:23 -0700362 mutex_lock(&set_speed_lock);
363
364 for_each_cpu(j, pcpu->policy->cpus) {
365 struct cpufreq_interactive_cpuinfo *pjcpu =
366 &per_cpu(cpuinfo, j);
367
368 if (pjcpu->target_freq > max_freq)
369 max_freq = pjcpu->target_freq;
370 }
371
372 if (max_freq != pcpu->policy->cur)
373 __cpufreq_driver_target(pcpu->policy,
374 max_freq,
375 CPUFREQ_RELATION_H);
376 mutex_unlock(&set_speed_lock);
377
Mike Chan1dab2592010-06-22 11:26:45 -0700378 pcpu->freq_change_time_in_idle =
379 get_cpu_idle_time_us(cpu,
380 &pcpu->freq_change_time);
Mike Chan1dab2592010-06-22 11:26:45 -0700381 }
382 }
383
384 return 0;
385}
386
387static void cpufreq_interactive_freq_down(struct work_struct *work)
388{
389 unsigned int cpu;
390 cpumask_t tmp_mask;
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800391 unsigned long flags;
Mike Chan1dab2592010-06-22 11:26:45 -0700392 struct cpufreq_interactive_cpuinfo *pcpu;
393
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800394 spin_lock_irqsave(&down_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700395 tmp_mask = down_cpumask;
396 cpumask_clear(&down_cpumask);
Todd Poynor1c4f95f2010-12-03 11:20:09 -0800397 spin_unlock_irqrestore(&down_cpumask_lock, flags);
Mike Chan1dab2592010-06-22 11:26:45 -0700398
399 for_each_cpu(cpu, &tmp_mask) {
Todd Poynora50b0532011-06-22 14:34:23 -0700400 unsigned int j;
401 unsigned int max_freq = 0;
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800402
Todd Poynora50b0532011-06-22 14:34:23 -0700403 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor9b6f4a32010-12-23 17:33:07 -0800404 smp_rmb();
405
406 if (!pcpu->governor_enabled)
407 continue;
408
Todd Poynora50b0532011-06-22 14:34:23 -0700409 mutex_lock(&set_speed_lock);
410
411 for_each_cpu(j, pcpu->policy->cpus) {
412 struct cpufreq_interactive_cpuinfo *pjcpu =
413 &per_cpu(cpuinfo, j);
414
415 if (pjcpu->target_freq > max_freq)
416 max_freq = pjcpu->target_freq;
417 }
418
419 if (max_freq != pcpu->policy->cur)
420 __cpufreq_driver_target(pcpu->policy, max_freq,
421 CPUFREQ_RELATION_H);
422
423 mutex_unlock(&set_speed_lock);
Mike Chan1dab2592010-06-22 11:26:45 -0700424 pcpu->freq_change_time_in_idle =
425 get_cpu_idle_time_us(cpu,
426 &pcpu->freq_change_time);
Mike Chan1dab2592010-06-22 11:26:45 -0700427 }
428}
429
430static ssize_t show_go_maxspeed_load(struct kobject *kobj,
431 struct attribute *attr, char *buf)
432{
433 return sprintf(buf, "%lu\n", go_maxspeed_load);
434}
435
436static ssize_t store_go_maxspeed_load(struct kobject *kobj,
437 struct attribute *attr, const char *buf, size_t count)
438{
Allen Martin8b125522011-06-30 16:59:19 -0700439 int ret;
440 unsigned long val;
441
442 ret = strict_strtoul(buf, 0, &val);
443 if (ret < 0)
444 return ret;
445 go_maxspeed_load = val;
446 return count;
Mike Chan1dab2592010-06-22 11:26:45 -0700447}
448
449static struct global_attr go_maxspeed_load_attr = __ATTR(go_maxspeed_load, 0644,
450 show_go_maxspeed_load, store_go_maxspeed_load);
451
452static ssize_t show_min_sample_time(struct kobject *kobj,
453 struct attribute *attr, char *buf)
454{
455 return sprintf(buf, "%lu\n", min_sample_time);
456}
457
458static ssize_t store_min_sample_time(struct kobject *kobj,
459 struct attribute *attr, const char *buf, size_t count)
460{
Allen Martin8b125522011-06-30 16:59:19 -0700461 int ret;
462 unsigned long val;
463
464 ret = strict_strtoul(buf, 0, &val);
465 if (ret < 0)
466 return ret;
467 min_sample_time = val;
468 return count;
Mike Chan1dab2592010-06-22 11:26:45 -0700469}
470
471static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
472 show_min_sample_time, store_min_sample_time);
473
Allen Martin5d14f982011-06-30 23:54:07 -0700474static ssize_t show_timer_rate(struct kobject *kobj,
475 struct attribute *attr, char *buf)
476{
477 return sprintf(buf, "%lu\n", timer_rate);
478}
479
480static ssize_t store_timer_rate(struct kobject *kobj,
481 struct attribute *attr, const char *buf, size_t count)
482{
483 int ret;
484 unsigned long val;
485
486 ret = strict_strtoul(buf, 0, &val);
487 if (ret < 0)
488 return ret;
489 timer_rate = val;
490 return count;
491}
492
493static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
494 show_timer_rate, store_timer_rate);
495
Mike Chan1dab2592010-06-22 11:26:45 -0700496static struct attribute *interactive_attributes[] = {
497 &go_maxspeed_load_attr.attr,
498 &min_sample_time_attr.attr,
Allen Martin5d14f982011-06-30 23:54:07 -0700499 &timer_rate_attr.attr,
Mike Chan1dab2592010-06-22 11:26:45 -0700500 NULL,
501};
502
503static struct attribute_group interactive_attr_group = {
504 .attrs = interactive_attributes,
505 .name = "interactive",
506};
507
Todd Poynor097d3962011-06-06 18:30:23 -0700508static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
Mike Chan1dab2592010-06-22 11:26:45 -0700509 unsigned int event)
510{
511 int rc;
Todd Poynor097d3962011-06-06 18:30:23 -0700512 unsigned int j;
513 struct cpufreq_interactive_cpuinfo *pcpu;
514 struct cpufreq_frequency_table *freq_table;
Mike Chan1dab2592010-06-22 11:26:45 -0700515
516 switch (event) {
517 case CPUFREQ_GOV_START:
Todd Poynor097d3962011-06-06 18:30:23 -0700518 if (!cpu_online(policy->cpu))
Mike Chan1dab2592010-06-22 11:26:45 -0700519 return -EINVAL;
520
Todd Poynor097d3962011-06-06 18:30:23 -0700521 freq_table =
522 cpufreq_frequency_get_table(policy->cpu);
523
524 for_each_cpu(j, policy->cpus) {
525 pcpu = &per_cpu(cpuinfo, j);
526 pcpu->policy = policy;
527 pcpu->target_freq = policy->cur;
528 pcpu->freq_table = freq_table;
529 pcpu->freq_change_time_in_idle =
530 get_cpu_idle_time_us(j,
Mike Chan1dab2592010-06-22 11:26:45 -0700531 &pcpu->freq_change_time);
Todd Poynor097d3962011-06-06 18:30:23 -0700532 pcpu->governor_enabled = 1;
533 smp_wmb();
534 }
535
Mike Chan1dab2592010-06-22 11:26:45 -0700536 /*
537 * Do not register the idle hook and create sysfs
538 * entries if we have already done so.
539 */
540 if (atomic_inc_return(&active_count) > 1)
541 return 0;
542
543 rc = sysfs_create_group(cpufreq_global_kobject,
544 &interactive_attr_group);
545 if (rc)
546 return rc;
547
Mike Chan1dab2592010-06-22 11:26:45 -0700548 break;
549
550 case CPUFREQ_GOV_STOP:
Todd Poynor097d3962011-06-06 18:30:23 -0700551 for_each_cpu(j, policy->cpus) {
552 pcpu = &per_cpu(cpuinfo, j);
553 pcpu->governor_enabled = 0;
554 smp_wmb();
555 del_timer_sync(&pcpu->cpu_timer);
Mike Chan1dab2592010-06-22 11:26:45 -0700556
Todd Poynor097d3962011-06-06 18:30:23 -0700557 /*
558 * Reset idle exit time since we may cancel the timer
559 * before it can run after the last idle exit time,
560 * to avoid tripping the check in idle exit for a timer
561 * that is trying to run.
562 */
563 pcpu->idle_exit_time = 0;
564 }
565
566 flush_work(&freq_scale_down_work);
Mike Chan1dab2592010-06-22 11:26:45 -0700567 if (atomic_dec_return(&active_count) > 0)
568 return 0;
569
570 sysfs_remove_group(cpufreq_global_kobject,
571 &interactive_attr_group);
572
Mike Chan1dab2592010-06-22 11:26:45 -0700573 break;
574
575 case CPUFREQ_GOV_LIMITS:
Todd Poynor097d3962011-06-06 18:30:23 -0700576 if (policy->max < policy->cur)
577 __cpufreq_driver_target(policy,
578 policy->max, CPUFREQ_RELATION_H);
579 else if (policy->min > policy->cur)
580 __cpufreq_driver_target(policy,
581 policy->min, CPUFREQ_RELATION_L);
Mike Chan1dab2592010-06-22 11:26:45 -0700582 break;
583 }
584 return 0;
585}
586
Allen Martin80e65d92011-06-28 09:58:39 -0700587static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
588 unsigned long val,
589 void *data)
590{
591 switch (val) {
592 case IDLE_START:
593 cpufreq_interactive_idle_start();
594 break;
595 case IDLE_END:
596 cpufreq_interactive_idle_end();
597 break;
598 }
599
600 return 0;
601}
602
603static struct notifier_block cpufreq_interactive_idle_nb = {
604 .notifier_call = cpufreq_interactive_idle_notifier,
605};
606
Mike Chan1dab2592010-06-22 11:26:45 -0700607static int __init cpufreq_interactive_init(void)
608{
609 unsigned int i;
610 struct cpufreq_interactive_cpuinfo *pcpu;
611 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
612
613 go_maxspeed_load = DEFAULT_GO_MAXSPEED_LOAD;
614 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Allen Martin5d14f982011-06-30 23:54:07 -0700615 timer_rate = DEFAULT_TIMER_RATE;
Mike Chan1dab2592010-06-22 11:26:45 -0700616
617 /* Initalize per-cpu timers */
618 for_each_possible_cpu(i) {
619 pcpu = &per_cpu(cpuinfo, i);
620 init_timer(&pcpu->cpu_timer);
621 pcpu->cpu_timer.function = cpufreq_interactive_timer;
622 pcpu->cpu_timer.data = i;
623 }
624
625 up_task = kthread_create(cpufreq_interactive_up_task, NULL,
626 "kinteractiveup");
627 if (IS_ERR(up_task))
628 return PTR_ERR(up_task);
629
630 sched_setscheduler_nocheck(up_task, SCHED_FIFO, &param);
631 get_task_struct(up_task);
632
633 /* No rescuer thread, bind to CPU queuing the work for possibly
634 warm cache (probably doesn't matter much). */
635 down_wq = alloc_workqueue("knteractive_down", 0, 1);
636
Allen Martinadce6892011-07-01 11:19:14 -0700637 if (!down_wq)
Mike Chan1dab2592010-06-22 11:26:45 -0700638 goto err_freeuptask;
639
640 INIT_WORK(&freq_scale_down_work,
641 cpufreq_interactive_freq_down);
642
643 spin_lock_init(&up_cpumask_lock);
644 spin_lock_init(&down_cpumask_lock);
Todd Poynora50b0532011-06-22 14:34:23 -0700645 mutex_init(&set_speed_lock);
Mike Chan1dab2592010-06-22 11:26:45 -0700646
Allen Martin80e65d92011-06-28 09:58:39 -0700647 idle_notifier_register(&cpufreq_interactive_idle_nb);
648
Mike Chan1dab2592010-06-22 11:26:45 -0700649 return cpufreq_register_governor(&cpufreq_gov_interactive);
650
651err_freeuptask:
652 put_task_struct(up_task);
653 return -ENOMEM;
654}
655
656#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
657fs_initcall(cpufreq_interactive_init);
658#else
659module_init(cpufreq_interactive_init);
660#endif
661
662static void __exit cpufreq_interactive_exit(void)
663{
664 cpufreq_unregister_governor(&cpufreq_gov_interactive);
665 kthread_stop(up_task);
666 put_task_struct(up_task);
667 destroy_workqueue(down_wq);
668}
669
670module_exit(cpufreq_interactive_exit);
671
672MODULE_AUTHOR("Mike Chan <mike@android.com>");
673MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
674 "Latency sensitive workloads");
675MODULE_LICENSE("GPL");