blob: 0c00b6db260b9a56863fe497e462b796f6725699 [file] [log] [blame]
Mike Chan9d49b702010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd4edd5d2012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Mike Chan9d49b702010-06-22 11:26:45 -070024#include <linux/mutex.h>
25#include <linux/sched.h>
26#include <linux/tick.h>
27#include <linux/time.h>
28#include <linux/timer.h>
29#include <linux/workqueue.h>
30#include <linux/kthread.h>
31#include <linux/mutex.h>
Todd Poynor7820a652012-04-02 17:17:14 -070032#include <linux/slab.h>
Todd Poynor9fb15312012-04-23 20:42:41 -070033#include <asm/cputime.h>
Mike Chan9d49b702010-06-22 11:26:45 -070034
Todd Poynora1e19512012-02-16 16:27:59 -080035#define CREATE_TRACE_POINTS
36#include <trace/events/cpufreq_interactive.h>
37
Mike Chan9d49b702010-06-22 11:26:45 -070038static atomic_t active_count = ATOMIC_INIT(0);
39
40struct cpufreq_interactive_cpuinfo {
41 struct timer_list cpu_timer;
Todd Poynor264e2912012-12-18 17:50:10 -080042 struct timer_list cpu_slack_timer;
Todd Poynor07a9e292012-12-11 16:05:03 -080043 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chan9d49b702010-06-22 11:26:45 -070044 u64 time_in_idle;
Todd Poynorae7f28c2012-10-08 20:14:34 -070045 u64 time_in_idle_timestamp;
Todd Poynor07a9e292012-12-11 16:05:03 -080046 u64 cputime_speedadj;
47 u64 cputime_speedadj_timestamp;
Mike Chan9d49b702010-06-22 11:26:45 -070048 struct cpufreq_policy *policy;
49 struct cpufreq_frequency_table *freq_table;
50 unsigned int target_freq;
Todd Poynoraad27322012-04-26 21:41:40 -070051 unsigned int floor_freq;
52 u64 floor_validate_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -070053 u64 hispeed_validate_time;
Mike Chan9d49b702010-06-22 11:26:45 -070054 int governor_enabled;
55};
56
57static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
58
Todd Poynor02442cf2012-07-16 17:07:15 -070059/* realtime thread handles frequency scaling */
60static struct task_struct *speedchange_task;
61static cpumask_t speedchange_cpumask;
62static spinlock_t speedchange_cpumask_lock;
Mike Chan9d49b702010-06-22 11:26:45 -070063
64/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynorf090ef02012-10-03 00:39:56 -070065static unsigned int hispeed_freq;
Mike Chan9d49b702010-06-22 11:26:45 -070066
67/* Go to hi speed when CPU load at or above this value. */
Todd Poynora0ec4362012-04-17 17:39:34 -070068#define DEFAULT_GO_HISPEED_LOAD 85
Mike Chan9d49b702010-06-22 11:26:45 -070069static unsigned long go_hispeed_load;
70
Todd Poynor6ecca112012-11-28 17:58:17 -080071/* Target load. Lower values result in higher CPU speeds. */
72#define DEFAULT_TARGET_LOAD 90
Todd Poynor21df1ca2012-11-14 11:41:21 -080073static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
74static spinlock_t target_loads_lock;
75static unsigned int *target_loads = default_target_loads;
76static int ntarget_loads = ARRAY_SIZE(default_target_loads);
Todd Poynor6ecca112012-11-28 17:58:17 -080077
Mike Chan9d49b702010-06-22 11:26:45 -070078/*
79 * The minimum amount of time to spend at a frequency before we can ramp down.
80 */
Todd Poynora0ec4362012-04-17 17:39:34 -070081#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070082static unsigned long min_sample_time;
83
84/*
85 * The sample rate of the timer used to increase frequency
86 */
Todd Poynora0ec4362012-04-17 17:39:34 -070087#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070088static unsigned long timer_rate;
89
Todd Poynor596cf1f2012-04-13 20:18:02 -070090/*
91 * Wait this long before raising speed above hispeed, by default a single
92 * timer interval.
93 */
94#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
95static unsigned long above_hispeed_delay_val;
96
Todd Poynor29835472012-12-14 17:31:19 -080097/* Non-zero means indefinite speed boost active */
Todd Poynor9fb15312012-04-23 20:42:41 -070098static int boost_val;
Todd Poynor29835472012-12-14 17:31:19 -080099/* Duration of a boot pulse in usecs */
100static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
101/* End time of boost pulse in ktime converted to usecs */
102static u64 boostpulse_endtime;
Todd Poynor9fb15312012-04-23 20:42:41 -0700103
Todd Poynor264e2912012-12-18 17:50:10 -0800104/*
105 * Max additional time to wait in idle, beyond timer_rate, at speeds above
106 * minimum before wakeup to reduce speed, or -1 if unnecessary.
107 */
108#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
109static int timer_slack_val = DEFAULT_TIMER_SLACK;
Lianwei Wangd4edd5d2012-11-01 09:59:52 +0800110
Mike Chan9d49b702010-06-22 11:26:45 -0700111static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
112 unsigned int event);
113
114#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
115static
116#endif
117struct cpufreq_governor cpufreq_gov_interactive = {
118 .name = "interactive",
119 .governor = cpufreq_governor_interactive,
120 .max_transition_latency = 10000000,
121 .owner = THIS_MODULE,
122};
123
Todd Poynorae7f28c2012-10-08 20:14:34 -0700124static void cpufreq_interactive_timer_resched(
125 struct cpufreq_interactive_cpuinfo *pcpu)
126{
Todd Poynor264e2912012-12-18 17:50:10 -0800127 unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
128
129 mod_timer_pinned(&pcpu->cpu_timer, expires);
130 if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
131 expires += usecs_to_jiffies(timer_slack_val);
132 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
133 }
134
Todd Poynor07a9e292012-12-11 16:05:03 -0800135 spin_lock(&pcpu->load_lock);
Todd Poynorae7f28c2012-10-08 20:14:34 -0700136 pcpu->time_in_idle =
137 get_cpu_idle_time_us(smp_processor_id(),
138 &pcpu->time_in_idle_timestamp);
Todd Poynor07a9e292012-12-11 16:05:03 -0800139 pcpu->cputime_speedadj = 0;
140 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
141 spin_unlock(&pcpu->load_lock);
Todd Poynorae7f28c2012-10-08 20:14:34 -0700142}
143
Todd Poynor21df1ca2012-11-14 11:41:21 -0800144static unsigned int freq_to_targetload(unsigned int freq)
145{
146 int i;
147 unsigned int ret;
148
149 spin_lock(&target_loads_lock);
150
151 for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
152 ;
153
154 ret = target_loads[i];
155 spin_unlock(&target_loads_lock);
156 return ret;
157}
158
159/*
160 * If increasing frequencies never map to a lower target load then
161 * choose_freq() will find the minimum frequency that does not exceed its
162 * target load given the current load.
163 */
164
165static unsigned int choose_freq(
Todd Poynor07a9e292012-12-11 16:05:03 -0800166 struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
Todd Poynor21df1ca2012-11-14 11:41:21 -0800167{
168 unsigned int freq = pcpu->policy->cur;
Todd Poynor21df1ca2012-11-14 11:41:21 -0800169 unsigned int prevfreq, freqmin, freqmax;
170 unsigned int tl;
171 int index;
172
173 freqmin = 0;
174 freqmax = UINT_MAX;
175
176 do {
177 prevfreq = freq;
178 tl = freq_to_targetload(freq);
179
180 /*
181 * Find the lowest frequency where the computed load is less
182 * than or equal to the target load.
183 */
184
185 cpufreq_frequency_table_target(
186 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
187 CPUFREQ_RELATION_L, &index);
188 freq = pcpu->freq_table[index].frequency;
189
190 if (freq > prevfreq) {
191 /* The previous frequency is too low. */
192 freqmin = prevfreq;
193
194 if (freq >= freqmax) {
195 /*
196 * Find the highest frequency that is less
197 * than freqmax.
198 */
199 cpufreq_frequency_table_target(
200 pcpu->policy, pcpu->freq_table,
201 freqmax - 1, CPUFREQ_RELATION_H,
202 &index);
203 freq = pcpu->freq_table[index].frequency;
204
205 if (freq == freqmin) {
206 /*
207 * The first frequency below freqmax
208 * has already been found to be too
209 * low. freqmax is the lowest speed
210 * we found that is fast enough.
211 */
212 freq = freqmax;
213 break;
214 }
215 }
216 } else if (freq < prevfreq) {
217 /* The previous frequency is high enough. */
218 freqmax = prevfreq;
219
220 if (freq <= freqmin) {
221 /*
222 * Find the lowest frequency that is higher
223 * than freqmin.
224 */
225 cpufreq_frequency_table_target(
226 pcpu->policy, pcpu->freq_table,
227 freqmin + 1, CPUFREQ_RELATION_L,
228 &index);
229 freq = pcpu->freq_table[index].frequency;
230
231 /*
232 * If freqmax is the first frequency above
233 * freqmin then we have already found that
234 * this speed is fast enough.
235 */
236 if (freq == freqmax)
237 break;
238 }
239 }
240
241 /* If same frequency chosen as previous then done. */
242 } while (freq != prevfreq);
243
244 return freq;
245}
246
Todd Poynor07a9e292012-12-11 16:05:03 -0800247static u64 update_load(int cpu)
248{
249 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
250 u64 now;
251 u64 now_idle;
252 unsigned int delta_idle;
253 unsigned int delta_time;
254 u64 active_time;
255
256 now_idle = get_cpu_idle_time_us(cpu, &now);
257 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
258 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
259 active_time = delta_time - delta_idle;
260 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
261
262 pcpu->time_in_idle = now_idle;
263 pcpu->time_in_idle_timestamp = now;
264 return now;
265}
266
Mike Chan9d49b702010-06-22 11:26:45 -0700267static void cpufreq_interactive_timer(unsigned long data)
268{
Todd Poynor1913e0f2012-11-05 13:09:03 -0800269 u64 now;
Mike Chan9d49b702010-06-22 11:26:45 -0700270 unsigned int delta_time;
Todd Poynor07a9e292012-12-11 16:05:03 -0800271 u64 cputime_speedadj;
Mike Chan9d49b702010-06-22 11:26:45 -0700272 int cpu_load;
Mike Chan9d49b702010-06-22 11:26:45 -0700273 struct cpufreq_interactive_cpuinfo *pcpu =
274 &per_cpu(cpuinfo, data);
Mike Chan9d49b702010-06-22 11:26:45 -0700275 unsigned int new_freq;
Todd Poynor07a9e292012-12-11 16:05:03 -0800276 unsigned int loadadjfreq;
Mike Chan9d49b702010-06-22 11:26:45 -0700277 unsigned int index;
278 unsigned long flags;
Todd Poynor29835472012-12-14 17:31:19 -0800279 bool boosted;
Mike Chan9d49b702010-06-22 11:26:45 -0700280
281 smp_rmb();
282
283 if (!pcpu->governor_enabled)
284 goto exit;
285
Todd Poynor07a9e292012-12-11 16:05:03 -0800286 spin_lock(&pcpu->load_lock);
287 now = update_load(data);
288 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
289 cputime_speedadj = pcpu->cputime_speedadj;
290 spin_unlock(&pcpu->load_lock);
Mike Chan9d49b702010-06-22 11:26:45 -0700291
Todd Poynor07a9e292012-12-11 16:05:03 -0800292 if (WARN_ON_ONCE(!delta_time))
Mike Chan9d49b702010-06-22 11:26:45 -0700293 goto rearm;
294
Todd Poynor07a9e292012-12-11 16:05:03 -0800295 do_div(cputime_speedadj, delta_time);
296 loadadjfreq = (unsigned int)cputime_speedadj * 100;
297 cpu_load = loadadjfreq / pcpu->target_freq;
Todd Poynor29835472012-12-14 17:31:19 -0800298 boosted = boost_val || now < boostpulse_endtime;
Mike Chan9d49b702010-06-22 11:26:45 -0700299
Todd Poynor7fca0cc2012-12-19 16:06:48 -0800300 if (cpu_load >= go_hispeed_load || boosted) {
301 if (pcpu->target_freq < hispeed_freq) {
302 new_freq = hispeed_freq;
303 } else {
304 new_freq = choose_freq(pcpu, loadadjfreq);
305
306 if (new_freq < hispeed_freq)
307 new_freq = hispeed_freq;
308 }
309 } else {
Todd Poynor07a9e292012-12-11 16:05:03 -0800310 new_freq = choose_freq(pcpu, loadadjfreq);
Todd Poynor7fca0cc2012-12-19 16:06:48 -0800311 }
Todd Poynor67f07522012-11-08 15:06:55 -0800312
313 if (pcpu->target_freq >= hispeed_freq &&
314 new_freq > pcpu->target_freq &&
315 now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
316 trace_cpufreq_interactive_notyet(
317 data, cpu_load, pcpu->target_freq,
318 pcpu->policy->cur, new_freq);
319 goto rearm;
Mike Chan9d49b702010-06-22 11:26:45 -0700320 }
321
Todd Poynor67f07522012-11-08 15:06:55 -0800322 pcpu->hispeed_validate_time = now;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700323
Mike Chan9d49b702010-06-22 11:26:45 -0700324 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynor6ecca112012-11-28 17:58:17 -0800325 new_freq, CPUFREQ_RELATION_L,
Mike Chan9d49b702010-06-22 11:26:45 -0700326 &index)) {
327 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
328 (int) data);
329 goto rearm;
330 }
331
332 new_freq = pcpu->freq_table[index].frequency;
333
Mike Chan9d49b702010-06-22 11:26:45 -0700334 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700335 * Do not scale below floor_freq unless we have been at or above the
336 * floor frequency for the minimum sample time since last validated.
Mike Chan9d49b702010-06-22 11:26:45 -0700337 */
Todd Poynoraad27322012-04-26 21:41:40 -0700338 if (new_freq < pcpu->floor_freq) {
Todd Poynor1913e0f2012-11-05 13:09:03 -0800339 if (now - pcpu->floor_validate_time < min_sample_time) {
Todd Poynor27f7b8e2012-11-28 17:56:09 -0800340 trace_cpufreq_interactive_notyet(
341 data, cpu_load, pcpu->target_freq,
342 pcpu->policy->cur, new_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700343 goto rearm;
Todd Poynora1e19512012-02-16 16:27:59 -0800344 }
Mike Chan9d49b702010-06-22 11:26:45 -0700345 }
346
Todd Poynor29835472012-12-14 17:31:19 -0800347 /*
348 * Update the timestamp for checking whether speed has been held at
349 * or above the selected frequency for a minimum of min_sample_time,
350 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
351 * allow the speed to drop as soon as the boostpulse duration expires
352 * (or the indefinite boost is turned off).
353 */
354
355 if (!boosted || new_freq > hispeed_freq) {
356 pcpu->floor_freq = new_freq;
357 pcpu->floor_validate_time = now;
358 }
Todd Poynor0a92d482012-04-06 19:59:36 -0700359
360 if (pcpu->target_freq == new_freq) {
Todd Poynor27f7b8e2012-11-28 17:56:09 -0800361 trace_cpufreq_interactive_already(
362 data, cpu_load, pcpu->target_freq,
363 pcpu->policy->cur, new_freq);
Todd Poynor0a92d482012-04-06 19:59:36 -0700364 goto rearm_if_notmax;
365 }
366
Todd Poynora1e19512012-02-16 16:27:59 -0800367 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynor27f7b8e2012-11-28 17:56:09 -0800368 pcpu->policy->cur, new_freq);
Todd Poynora1e19512012-02-16 16:27:59 -0800369
Todd Poynor02442cf2012-07-16 17:07:15 -0700370 pcpu->target_freq = new_freq;
371 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
372 cpumask_set_cpu(data, &speedchange_cpumask);
373 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
374 wake_up_process(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700375
376rearm_if_notmax:
377 /*
378 * Already set max speed and don't see a need to change that,
379 * wait until next idle to re-evaluate, don't need timer.
380 */
381 if (pcpu->target_freq == pcpu->policy->max)
382 goto exit;
383
384rearm:
Todd Poynor264e2912012-12-18 17:50:10 -0800385 if (!timer_pending(&pcpu->cpu_timer))
Todd Poynorae7f28c2012-10-08 20:14:34 -0700386 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700387
388exit:
389 return;
390}
391
392static void cpufreq_interactive_idle_start(void)
393{
394 struct cpufreq_interactive_cpuinfo *pcpu =
395 &per_cpu(cpuinfo, smp_processor_id());
396 int pending;
397
398 if (!pcpu->governor_enabled)
399 return;
400
Mike Chan9d49b702010-06-22 11:26:45 -0700401 pending = timer_pending(&pcpu->cpu_timer);
402
403 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chan9d49b702010-06-22 11:26:45 -0700404 /*
405 * Entering idle while not at lowest speed. On some
406 * platforms this can hold the other CPU(s) at that speed
407 * even though the CPU is idle. Set a timer to re-evaluate
408 * speed so this idle CPU doesn't hold the other CPUs above
409 * min indefinitely. This should probably be a quirk of
410 * the CPUFreq driver.
411 */
Todd Poynor264e2912012-12-18 17:50:10 -0800412 if (!pending)
Todd Poynorae7f28c2012-10-08 20:14:34 -0700413 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700414 }
415
416}
417
418static void cpufreq_interactive_idle_end(void)
419{
420 struct cpufreq_interactive_cpuinfo *pcpu =
421 &per_cpu(cpuinfo, smp_processor_id());
422
Sam Lefflera04e4412012-06-27 10:12:04 -0700423 if (!pcpu->governor_enabled)
424 return;
425
Todd Poynor1913e0f2012-11-05 13:09:03 -0800426 /* Arm the timer for 1-2 ticks later if not already. */
427 if (!timer_pending(&pcpu->cpu_timer)) {
Todd Poynorae7f28c2012-10-08 20:14:34 -0700428 cpufreq_interactive_timer_resched(pcpu);
Todd Poynor264e2912012-12-18 17:50:10 -0800429 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
Todd Poynorae7f28c2012-10-08 20:14:34 -0700430 del_timer(&pcpu->cpu_timer);
Todd Poynor264e2912012-12-18 17:50:10 -0800431 del_timer(&pcpu->cpu_slack_timer);
Todd Poynorae7f28c2012-10-08 20:14:34 -0700432 cpufreq_interactive_timer(smp_processor_id());
Mike Chan9d49b702010-06-22 11:26:45 -0700433 }
Mike Chan9d49b702010-06-22 11:26:45 -0700434}
435
Todd Poynor02442cf2012-07-16 17:07:15 -0700436static int cpufreq_interactive_speedchange_task(void *data)
Mike Chan9d49b702010-06-22 11:26:45 -0700437{
438 unsigned int cpu;
439 cpumask_t tmp_mask;
440 unsigned long flags;
441 struct cpufreq_interactive_cpuinfo *pcpu;
442
443 while (1) {
444 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor02442cf2012-07-16 17:07:15 -0700445 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700446
Todd Poynor02442cf2012-07-16 17:07:15 -0700447 if (cpumask_empty(&speedchange_cpumask)) {
448 spin_unlock_irqrestore(&speedchange_cpumask_lock,
449 flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700450 schedule();
451
452 if (kthread_should_stop())
453 break;
454
Todd Poynor02442cf2012-07-16 17:07:15 -0700455 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700456 }
457
458 set_current_state(TASK_RUNNING);
Todd Poynor02442cf2012-07-16 17:07:15 -0700459 tmp_mask = speedchange_cpumask;
460 cpumask_clear(&speedchange_cpumask);
461 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700462
463 for_each_cpu(cpu, &tmp_mask) {
464 unsigned int j;
465 unsigned int max_freq = 0;
466
467 pcpu = &per_cpu(cpuinfo, cpu);
468 smp_rmb();
469
470 if (!pcpu->governor_enabled)
471 continue;
472
Mike Chan9d49b702010-06-22 11:26:45 -0700473 for_each_cpu(j, pcpu->policy->cpus) {
474 struct cpufreq_interactive_cpuinfo *pjcpu =
475 &per_cpu(cpuinfo, j);
476
477 if (pjcpu->target_freq > max_freq)
478 max_freq = pjcpu->target_freq;
479 }
480
481 if (max_freq != pcpu->policy->cur)
482 __cpufreq_driver_target(pcpu->policy,
483 max_freq,
484 CPUFREQ_RELATION_H);
Todd Poynor02442cf2012-07-16 17:07:15 -0700485 trace_cpufreq_interactive_setspeed(cpu,
486 pcpu->target_freq,
Todd Poynora1e19512012-02-16 16:27:59 -0800487 pcpu->policy->cur);
Mike Chan9d49b702010-06-22 11:26:45 -0700488 }
489 }
490
491 return 0;
492}
493
Todd Poynor7820a652012-04-02 17:17:14 -0700494static void cpufreq_interactive_boost(void)
495{
496 int i;
497 int anyboost = 0;
498 unsigned long flags;
499 struct cpufreq_interactive_cpuinfo *pcpu;
500
Todd Poynor02442cf2012-07-16 17:07:15 -0700501 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700502
503 for_each_online_cpu(i) {
504 pcpu = &per_cpu(cpuinfo, i);
505
506 if (pcpu->target_freq < hispeed_freq) {
507 pcpu->target_freq = hispeed_freq;
Todd Poynor02442cf2012-07-16 17:07:15 -0700508 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynorc9d53b32012-12-07 20:08:45 -0800509 pcpu->hispeed_validate_time =
510 ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700511 anyboost = 1;
512 }
513
514 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700515 * Set floor freq and (re)start timer for when last
516 * validated.
Todd Poynor7820a652012-04-02 17:17:14 -0700517 */
518
Todd Poynoraad27322012-04-26 21:41:40 -0700519 pcpu->floor_freq = hispeed_freq;
520 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700521 }
522
Todd Poynor02442cf2012-07-16 17:07:15 -0700523 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700524
525 if (anyboost)
Todd Poynor02442cf2012-07-16 17:07:15 -0700526 wake_up_process(speedchange_task);
Todd Poynor7820a652012-04-02 17:17:14 -0700527}
528
Todd Poynor07a9e292012-12-11 16:05:03 -0800529static int cpufreq_interactive_notifier(
530 struct notifier_block *nb, unsigned long val, void *data)
531{
532 struct cpufreq_freqs *freq = data;
533 struct cpufreq_interactive_cpuinfo *pcpu;
534 int cpu;
535
536 if (val == CPUFREQ_POSTCHANGE) {
537 pcpu = &per_cpu(cpuinfo, freq->cpu);
538
539 for_each_cpu(cpu, pcpu->policy->cpus) {
540 struct cpufreq_interactive_cpuinfo *pjcpu =
541 &per_cpu(cpuinfo, cpu);
542 spin_lock(&pjcpu->load_lock);
543 update_load(cpu);
544 spin_unlock(&pjcpu->load_lock);
545 }
546 }
547
548 return 0;
549}
550
551static struct notifier_block cpufreq_notifier_block = {
552 .notifier_call = cpufreq_interactive_notifier,
553};
554
Todd Poynor21df1ca2012-11-14 11:41:21 -0800555static ssize_t show_target_loads(
Todd Poynor6ecca112012-11-28 17:58:17 -0800556 struct kobject *kobj, struct attribute *attr, char *buf)
557{
Todd Poynor21df1ca2012-11-14 11:41:21 -0800558 int i;
559 ssize_t ret = 0;
560
561 spin_lock(&target_loads_lock);
562
563 for (i = 0; i < ntarget_loads; i++)
564 ret += sprintf(buf + ret, "%u%s", target_loads[i],
565 i & 0x1 ? ":" : " ");
566
567 ret += sprintf(buf + ret, "\n");
568 spin_unlock(&target_loads_lock);
569 return ret;
Todd Poynor6ecca112012-11-28 17:58:17 -0800570}
571
Todd Poynor21df1ca2012-11-14 11:41:21 -0800572static ssize_t store_target_loads(
Todd Poynor6ecca112012-11-28 17:58:17 -0800573 struct kobject *kobj, struct attribute *attr, const char *buf,
574 size_t count)
575{
576 int ret;
Todd Poynor21df1ca2012-11-14 11:41:21 -0800577 const char *cp;
578 unsigned int *new_target_loads = NULL;
579 int ntokens = 1;
580 int i;
Todd Poynor6ecca112012-11-28 17:58:17 -0800581
Todd Poynor21df1ca2012-11-14 11:41:21 -0800582 cp = buf;
583 while ((cp = strpbrk(cp + 1, " :")))
584 ntokens++;
585
586 if (!(ntokens & 0x1))
587 goto err_inval;
588
589 new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
590 if (!new_target_loads) {
591 ret = -ENOMEM;
592 goto err;
593 }
594
595 cp = buf;
596 i = 0;
597 while (i < ntokens) {
598 if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
599 goto err_inval;
600
601 cp = strpbrk(cp, " :");
602 if (!cp)
603 break;
604 cp++;
605 }
606
607 if (i != ntokens)
608 goto err_inval;
609
610 spin_lock(&target_loads_lock);
611 if (target_loads != default_target_loads)
612 kfree(target_loads);
613 target_loads = new_target_loads;
614 ntarget_loads = ntokens;
615 spin_unlock(&target_loads_lock);
Todd Poynor6ecca112012-11-28 17:58:17 -0800616 return count;
Todd Poynor21df1ca2012-11-14 11:41:21 -0800617
618err_inval:
619 ret = -EINVAL;
620err:
621 kfree(new_target_loads);
622 return ret;
Todd Poynor6ecca112012-11-28 17:58:17 -0800623}
624
Todd Poynor21df1ca2012-11-14 11:41:21 -0800625static struct global_attr target_loads_attr =
626 __ATTR(target_loads, S_IRUGO | S_IWUSR,
627 show_target_loads, store_target_loads);
Todd Poynor6ecca112012-11-28 17:58:17 -0800628
Mike Chan9d49b702010-06-22 11:26:45 -0700629static ssize_t show_hispeed_freq(struct kobject *kobj,
630 struct attribute *attr, char *buf)
631{
Todd Poynorf090ef02012-10-03 00:39:56 -0700632 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700633}
634
635static ssize_t store_hispeed_freq(struct kobject *kobj,
636 struct attribute *attr, const char *buf,
637 size_t count)
638{
639 int ret;
Todd Poynorf090ef02012-10-03 00:39:56 -0700640 long unsigned int val;
Mike Chan9d49b702010-06-22 11:26:45 -0700641
Todd Poynorf090ef02012-10-03 00:39:56 -0700642 ret = strict_strtoul(buf, 0, &val);
Mike Chan9d49b702010-06-22 11:26:45 -0700643 if (ret < 0)
644 return ret;
645 hispeed_freq = val;
646 return count;
647}
648
649static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
650 show_hispeed_freq, store_hispeed_freq);
651
652
653static ssize_t show_go_hispeed_load(struct kobject *kobj,
654 struct attribute *attr, char *buf)
655{
656 return sprintf(buf, "%lu\n", go_hispeed_load);
657}
658
659static ssize_t store_go_hispeed_load(struct kobject *kobj,
660 struct attribute *attr, const char *buf, size_t count)
661{
662 int ret;
663 unsigned long val;
664
665 ret = strict_strtoul(buf, 0, &val);
666 if (ret < 0)
667 return ret;
668 go_hispeed_load = val;
669 return count;
670}
671
672static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
673 show_go_hispeed_load, store_go_hispeed_load);
674
675static ssize_t show_min_sample_time(struct kobject *kobj,
676 struct attribute *attr, char *buf)
677{
678 return sprintf(buf, "%lu\n", min_sample_time);
679}
680
681static ssize_t store_min_sample_time(struct kobject *kobj,
682 struct attribute *attr, const char *buf, size_t count)
683{
684 int ret;
685 unsigned long val;
686
687 ret = strict_strtoul(buf, 0, &val);
688 if (ret < 0)
689 return ret;
690 min_sample_time = val;
691 return count;
692}
693
694static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
695 show_min_sample_time, store_min_sample_time);
696
Todd Poynor596cf1f2012-04-13 20:18:02 -0700697static ssize_t show_above_hispeed_delay(struct kobject *kobj,
698 struct attribute *attr, char *buf)
699{
700 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
701}
702
703static ssize_t store_above_hispeed_delay(struct kobject *kobj,
704 struct attribute *attr,
705 const char *buf, size_t count)
706{
707 int ret;
708 unsigned long val;
709
710 ret = strict_strtoul(buf, 0, &val);
711 if (ret < 0)
712 return ret;
713 above_hispeed_delay_val = val;
714 return count;
715}
716
717define_one_global_rw(above_hispeed_delay);
718
Mike Chan9d49b702010-06-22 11:26:45 -0700719static ssize_t show_timer_rate(struct kobject *kobj,
720 struct attribute *attr, char *buf)
721{
722 return sprintf(buf, "%lu\n", timer_rate);
723}
724
725static ssize_t store_timer_rate(struct kobject *kobj,
726 struct attribute *attr, const char *buf, size_t count)
727{
728 int ret;
729 unsigned long val;
730
731 ret = strict_strtoul(buf, 0, &val);
732 if (ret < 0)
733 return ret;
734 timer_rate = val;
735 return count;
736}
737
738static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
739 show_timer_rate, store_timer_rate);
740
Todd Poynor264e2912012-12-18 17:50:10 -0800741static ssize_t show_timer_slack(
742 struct kobject *kobj, struct attribute *attr, char *buf)
743{
744 return sprintf(buf, "%d\n", timer_slack_val);
745}
746
747static ssize_t store_timer_slack(
748 struct kobject *kobj, struct attribute *attr, const char *buf,
749 size_t count)
750{
751 int ret;
752 unsigned long val;
753
754 ret = kstrtol(buf, 10, &val);
755 if (ret < 0)
756 return ret;
757
758 timer_slack_val = val;
759 return count;
760}
761
762define_one_global_rw(timer_slack);
763
Todd Poynor9fb15312012-04-23 20:42:41 -0700764static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
765 char *buf)
766{
767 return sprintf(buf, "%d\n", boost_val);
768}
769
770static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
771 const char *buf, size_t count)
772{
773 int ret;
774 unsigned long val;
775
776 ret = kstrtoul(buf, 0, &val);
777 if (ret < 0)
778 return ret;
779
780 boost_val = val;
781
Todd Poynor2e739a02012-05-03 00:16:55 -0700782 if (boost_val) {
783 trace_cpufreq_interactive_boost("on");
Todd Poynor9fb15312012-04-23 20:42:41 -0700784 cpufreq_interactive_boost();
Todd Poynor2e739a02012-05-03 00:16:55 -0700785 } else {
786 trace_cpufreq_interactive_unboost("off");
787 }
Todd Poynor9fb15312012-04-23 20:42:41 -0700788
789 return count;
790}
791
792define_one_global_rw(boost);
793
Todd Poynor2e739a02012-05-03 00:16:55 -0700794static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
795 const char *buf, size_t count)
796{
797 int ret;
798 unsigned long val;
799
800 ret = kstrtoul(buf, 0, &val);
801 if (ret < 0)
802 return ret;
803
Todd Poynor29835472012-12-14 17:31:19 -0800804 boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
Todd Poynor2e739a02012-05-03 00:16:55 -0700805 trace_cpufreq_interactive_boost("pulse");
806 cpufreq_interactive_boost();
807 return count;
808}
809
810static struct global_attr boostpulse =
811 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
812
Todd Poynor29835472012-12-14 17:31:19 -0800813static ssize_t show_boostpulse_duration(
814 struct kobject *kobj, struct attribute *attr, char *buf)
815{
816 return sprintf(buf, "%d\n", boostpulse_duration_val);
817}
818
819static ssize_t store_boostpulse_duration(
820 struct kobject *kobj, struct attribute *attr, const char *buf,
821 size_t count)
822{
823 int ret;
824 unsigned long val;
825
826 ret = kstrtoul(buf, 0, &val);
827 if (ret < 0)
828 return ret;
829
830 boostpulse_duration_val = val;
831 return count;
832}
833
834define_one_global_rw(boostpulse_duration);
835
Mike Chan9d49b702010-06-22 11:26:45 -0700836static struct attribute *interactive_attributes[] = {
Todd Poynor21df1ca2012-11-14 11:41:21 -0800837 &target_loads_attr.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700838 &hispeed_freq_attr.attr,
839 &go_hispeed_load_attr.attr,
Todd Poynor596cf1f2012-04-13 20:18:02 -0700840 &above_hispeed_delay.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700841 &min_sample_time_attr.attr,
842 &timer_rate_attr.attr,
Todd Poynor264e2912012-12-18 17:50:10 -0800843 &timer_slack.attr,
Todd Poynor9fb15312012-04-23 20:42:41 -0700844 &boost.attr,
Todd Poynor2e739a02012-05-03 00:16:55 -0700845 &boostpulse.attr,
Todd Poynor29835472012-12-14 17:31:19 -0800846 &boostpulse_duration.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700847 NULL,
848};
849
850static struct attribute_group interactive_attr_group = {
851 .attrs = interactive_attributes,
852 .name = "interactive",
853};
854
Sam Lefflera04e4412012-06-27 10:12:04 -0700855static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
856 unsigned long val,
857 void *data)
858{
859 switch (val) {
860 case IDLE_START:
861 cpufreq_interactive_idle_start();
862 break;
863 case IDLE_END:
864 cpufreq_interactive_idle_end();
865 break;
866 }
867
868 return 0;
869}
870
871static struct notifier_block cpufreq_interactive_idle_nb = {
872 .notifier_call = cpufreq_interactive_idle_notifier,
873};
874
Mike Chan9d49b702010-06-22 11:26:45 -0700875static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
876 unsigned int event)
877{
878 int rc;
879 unsigned int j;
880 struct cpufreq_interactive_cpuinfo *pcpu;
881 struct cpufreq_frequency_table *freq_table;
882
883 switch (event) {
884 case CPUFREQ_GOV_START:
885 if (!cpu_online(policy->cpu))
886 return -EINVAL;
887
888 freq_table =
889 cpufreq_frequency_get_table(policy->cpu);
Todd Poynor1913e0f2012-11-05 13:09:03 -0800890 if (!hispeed_freq)
891 hispeed_freq = policy->max;
Mike Chan9d49b702010-06-22 11:26:45 -0700892
893 for_each_cpu(j, policy->cpus) {
Todd Poynor264e2912012-12-18 17:50:10 -0800894 unsigned long expires;
895
Mike Chan9d49b702010-06-22 11:26:45 -0700896 pcpu = &per_cpu(cpuinfo, j);
897 pcpu->policy = policy;
898 pcpu->target_freq = policy->cur;
899 pcpu->freq_table = freq_table;
Todd Poynoraad27322012-04-26 21:41:40 -0700900 pcpu->floor_freq = pcpu->target_freq;
901 pcpu->floor_validate_time =
Todd Poynorc9d53b32012-12-07 20:08:45 -0800902 ktime_to_us(ktime_get());
Todd Poynor5a5aa702012-05-10 23:28:06 -0700903 pcpu->hispeed_validate_time =
Todd Poynorc9d53b32012-12-07 20:08:45 -0800904 pcpu->floor_validate_time;
Mike Chan9d49b702010-06-22 11:26:45 -0700905 pcpu->governor_enabled = 1;
906 smp_wmb();
Todd Poynor264e2912012-12-18 17:50:10 -0800907 expires = jiffies + usecs_to_jiffies(timer_rate);
908 pcpu->cpu_timer.expires = expires;
Todd Poynor1913e0f2012-11-05 13:09:03 -0800909 add_timer_on(&pcpu->cpu_timer, j);
Todd Poynor264e2912012-12-18 17:50:10 -0800910
911 if (timer_slack_val >= 0) {
912 expires += usecs_to_jiffies(timer_slack_val);
913 pcpu->cpu_slack_timer.expires = expires;
914 add_timer_on(&pcpu->cpu_slack_timer, j);
915 }
Mike Chan9d49b702010-06-22 11:26:45 -0700916 }
917
Mike Chan9d49b702010-06-22 11:26:45 -0700918 /*
919 * Do not register the idle hook and create sysfs
920 * entries if we have already done so.
921 */
922 if (atomic_inc_return(&active_count) > 1)
923 return 0;
924
925 rc = sysfs_create_group(cpufreq_global_kobject,
926 &interactive_attr_group);
927 if (rc)
928 return rc;
929
Sam Lefflera04e4412012-06-27 10:12:04 -0700930 idle_notifier_register(&cpufreq_interactive_idle_nb);
Todd Poynor07a9e292012-12-11 16:05:03 -0800931 cpufreq_register_notifier(
932 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Mike Chan9d49b702010-06-22 11:26:45 -0700933 break;
934
935 case CPUFREQ_GOV_STOP:
936 for_each_cpu(j, policy->cpus) {
937 pcpu = &per_cpu(cpuinfo, j);
938 pcpu->governor_enabled = 0;
939 smp_wmb();
940 del_timer_sync(&pcpu->cpu_timer);
Todd Poynor264e2912012-12-18 17:50:10 -0800941 del_timer_sync(&pcpu->cpu_slack_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700942 }
943
Mike Chan9d49b702010-06-22 11:26:45 -0700944 if (atomic_dec_return(&active_count) > 0)
945 return 0;
946
Todd Poynor07a9e292012-12-11 16:05:03 -0800947 cpufreq_unregister_notifier(
948 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Sam Lefflera04e4412012-06-27 10:12:04 -0700949 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700950 sysfs_remove_group(cpufreq_global_kobject,
951 &interactive_attr_group);
952
953 break;
954
955 case CPUFREQ_GOV_LIMITS:
956 if (policy->max < policy->cur)
957 __cpufreq_driver_target(policy,
958 policy->max, CPUFREQ_RELATION_H);
959 else if (policy->min > policy->cur)
960 __cpufreq_driver_target(policy,
961 policy->min, CPUFREQ_RELATION_L);
962 break;
963 }
964 return 0;
965}
966
Todd Poynor264e2912012-12-18 17:50:10 -0800967static void cpufreq_interactive_nop_timer(unsigned long data)
968{
969}
970
Mike Chan9d49b702010-06-22 11:26:45 -0700971static int __init cpufreq_interactive_init(void)
972{
973 unsigned int i;
974 struct cpufreq_interactive_cpuinfo *pcpu;
975 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
976
977 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
978 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Todd Poynor596cf1f2012-04-13 20:18:02 -0700979 above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
Mike Chan9d49b702010-06-22 11:26:45 -0700980 timer_rate = DEFAULT_TIMER_RATE;
981
982 /* Initalize per-cpu timers */
983 for_each_possible_cpu(i) {
984 pcpu = &per_cpu(cpuinfo, i);
Todd Poynor264e2912012-12-18 17:50:10 -0800985 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700986 pcpu->cpu_timer.function = cpufreq_interactive_timer;
987 pcpu->cpu_timer.data = i;
Todd Poynor264e2912012-12-18 17:50:10 -0800988 init_timer(&pcpu->cpu_slack_timer);
989 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
Todd Poynor07a9e292012-12-11 16:05:03 -0800990 spin_lock_init(&pcpu->load_lock);
Mike Chan9d49b702010-06-22 11:26:45 -0700991 }
992
Todd Poynor21df1ca2012-11-14 11:41:21 -0800993 spin_lock_init(&target_loads_lock);
Todd Poynor02442cf2012-07-16 17:07:15 -0700994 spin_lock_init(&speedchange_cpumask_lock);
995 speedchange_task =
996 kthread_create(cpufreq_interactive_speedchange_task, NULL,
997 "cfinteractive");
998 if (IS_ERR(speedchange_task))
999 return PTR_ERR(speedchange_task);
Sam Lefflera13f4152012-06-27 12:55:56 -07001000
Todd Poynor02442cf2012-07-16 17:07:15 -07001001 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1002 get_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -07001003
Sam Lefflera13f4152012-06-27 12:55:56 -07001004 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor02442cf2012-07-16 17:07:15 -07001005 wake_up_process(speedchange_task);
Sam Lefflera13f4152012-06-27 12:55:56 -07001006
Mike Chan9d49b702010-06-22 11:26:45 -07001007 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chan9d49b702010-06-22 11:26:45 -07001008}
1009
1010#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1011fs_initcall(cpufreq_interactive_init);
1012#else
1013module_init(cpufreq_interactive_init);
1014#endif
1015
1016static void __exit cpufreq_interactive_exit(void)
1017{
1018 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor02442cf2012-07-16 17:07:15 -07001019 kthread_stop(speedchange_task);
1020 put_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -07001021}
1022
1023module_exit(cpufreq_interactive_exit);
1024
1025MODULE_AUTHOR("Mike Chan <mike@android.com>");
1026MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1027 "Latency sensitive workloads");
1028MODULE_LICENSE("GPL");