blob: 726765b6fce75369000a08582b25a2dd034ed45c [file] [log] [blame]
Mike Chan9d49b702010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd4edd5d2012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynorbc819a22012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chan9d49b702010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/tick.h>
27#include <linux/time.h>
28#include <linux/timer.h>
29#include <linux/workqueue.h>
30#include <linux/kthread.h>
Todd Poynor7820a652012-04-02 17:17:14 -070031#include <linux/slab.h>
Lianwei Wang9697dcf2013-02-22 11:39:18 +080032#include <linux/kernel_stat.h>
Todd Poynor9fb15312012-04-23 20:42:41 -070033#include <asm/cputime.h>
Mike Chan9d49b702010-06-22 11:26:45 -070034
Todd Poynora1e19512012-02-16 16:27:59 -080035#define CREATE_TRACE_POINTS
36#include <trace/events/cpufreq_interactive.h>
37
Lianwei Wang9f8d5372013-01-07 14:15:51 +080038static int active_count;
Mike Chan9d49b702010-06-22 11:26:45 -070039
40struct cpufreq_interactive_cpuinfo {
41 struct timer_list cpu_timer;
Todd Poynor264e2912012-12-18 17:50:10 -080042 struct timer_list cpu_slack_timer;
Todd Poynor07a9e292012-12-11 16:05:03 -080043 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chan9d49b702010-06-22 11:26:45 -070044 u64 time_in_idle;
Todd Poynorae7f28c2012-10-08 20:14:34 -070045 u64 time_in_idle_timestamp;
Todd Poynor07a9e292012-12-11 16:05:03 -080046 u64 cputime_speedadj;
47 u64 cputime_speedadj_timestamp;
Mike Chan9d49b702010-06-22 11:26:45 -070048 struct cpufreq_policy *policy;
49 struct cpufreq_frequency_table *freq_table;
50 unsigned int target_freq;
Todd Poynoraad27322012-04-26 21:41:40 -070051 unsigned int floor_freq;
52 u64 floor_validate_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -070053 u64 hispeed_validate_time;
Todd Poynorbc819a22012-12-18 17:50:44 -080054 struct rw_semaphore enable_sem;
Mike Chan9d49b702010-06-22 11:26:45 -070055 int governor_enabled;
Steve Kondika4371942012-10-10 12:20:25 -070056 int cpu_load;
Mike Chan9d49b702010-06-22 11:26:45 -070057};
58
59static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
60
Todd Poynor02442cf2012-07-16 17:07:15 -070061/* realtime thread handles frequency scaling */
62static struct task_struct *speedchange_task;
63static cpumask_t speedchange_cpumask;
64static spinlock_t speedchange_cpumask_lock;
Lianwei Wang9f8d5372013-01-07 14:15:51 +080065static struct mutex gov_lock;
Mike Chan9d49b702010-06-22 11:26:45 -070066
67/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynorf090ef02012-10-03 00:39:56 -070068static unsigned int hispeed_freq;
Mike Chan9d49b702010-06-22 11:26:45 -070069
70/* Go to hi speed when CPU load at or above this value. */
Todd Poynored028682012-12-21 15:13:01 -080071#define DEFAULT_GO_HISPEED_LOAD 99
Todd Poynor9f58f842012-12-21 15:32:21 -080072static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
Mike Chan9d49b702010-06-22 11:26:45 -070073
Todd Poynor6ecca112012-11-28 17:58:17 -080074/* Target load. Lower values result in higher CPU speeds. */
75#define DEFAULT_TARGET_LOAD 90
Todd Poynor21df1ca2012-11-14 11:41:21 -080076static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
77static spinlock_t target_loads_lock;
78static unsigned int *target_loads = default_target_loads;
79static int ntarget_loads = ARRAY_SIZE(default_target_loads);
Todd Poynor6ecca112012-11-28 17:58:17 -080080
Mike Chan9d49b702010-06-22 11:26:45 -070081/*
82 * The minimum amount of time to spend at a frequency before we can ramp down.
83 */
Todd Poynora0ec4362012-04-17 17:39:34 -070084#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Todd Poynor9f58f842012-12-21 15:32:21 -080085static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Mike Chan9d49b702010-06-22 11:26:45 -070086
87/*
88 * The sample rate of the timer used to increase frequency
89 */
Todd Poynora0ec4362012-04-17 17:39:34 -070090#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Todd Poynor9f58f842012-12-21 15:32:21 -080091static unsigned long timer_rate = DEFAULT_TIMER_RATE;
Mike Chan9d49b702010-06-22 11:26:45 -070092
Todd Poynor596cf1f2012-04-13 20:18:02 -070093/*
94 * Wait this long before raising speed above hispeed, by default a single
95 * timer interval.
96 */
97#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
Minsung Kimcce741d2013-02-25 23:48:04 +090098static unsigned int default_above_hispeed_delay[] = {
99 DEFAULT_ABOVE_HISPEED_DELAY };
100static spinlock_t above_hispeed_delay_lock;
101static unsigned int *above_hispeed_delay = default_above_hispeed_delay;
102static int nabove_hispeed_delay = ARRAY_SIZE(default_above_hispeed_delay);
Todd Poynor596cf1f2012-04-13 20:18:02 -0700103
Todd Poynor29835472012-12-14 17:31:19 -0800104/* Non-zero means indefinite speed boost active */
Todd Poynor9fb15312012-04-23 20:42:41 -0700105static int boost_val;
Todd Poynor29835472012-12-14 17:31:19 -0800106/* Duration of a boot pulse in usecs */
107static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
108/* End time of boost pulse in ktime converted to usecs */
109static u64 boostpulse_endtime;
Todd Poynor9fb15312012-04-23 20:42:41 -0700110
Todd Poynor264e2912012-12-18 17:50:10 -0800111/*
112 * Max additional time to wait in idle, beyond timer_rate, at speeds above
113 * minimum before wakeup to reduce speed, or -1 if unnecessary.
114 */
115#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
116static int timer_slack_val = DEFAULT_TIMER_SLACK;
Lianwei Wangd4edd5d2012-11-01 09:59:52 +0800117
Lianwei Wang9697dcf2013-02-22 11:39:18 +0800118static bool io_is_busy;
119
Mike Chan9d49b702010-06-22 11:26:45 -0700120static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
121 unsigned int event);
122
123#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
124static
125#endif
126struct cpufreq_governor cpufreq_gov_interactive = {
127 .name = "interactive",
128 .governor = cpufreq_governor_interactive,
129 .max_transition_latency = 10000000,
130 .owner = THIS_MODULE,
131};
132
Lianwei Wang9697dcf2013-02-22 11:39:18 +0800133static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
134 cputime64_t *wall)
135{
136 u64 idle_time;
137 u64 cur_wall_time;
138 u64 busy_time;
139
140 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
141
142 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
143 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
144 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
145 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
146 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
147 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
148
149 idle_time = cur_wall_time - busy_time;
150 if (wall)
151 *wall = jiffies_to_usecs(cur_wall_time);
152
153 return jiffies_to_usecs(idle_time);
154}
155
156static inline cputime64_t get_cpu_idle_time(unsigned int cpu,
157 cputime64_t *wall)
158{
159 u64 idle_time = get_cpu_idle_time_us(cpu, wall);
160
161 if (idle_time == -1ULL)
162 idle_time = get_cpu_idle_time_jiffy(cpu, wall);
163 else if (!io_is_busy)
164 idle_time += get_cpu_iowait_time_us(cpu, wall);
165
166 return idle_time;
167}
168
Todd Poynorae7f28c2012-10-08 20:14:34 -0700169static void cpufreq_interactive_timer_resched(
170 struct cpufreq_interactive_cpuinfo *pcpu)
171{
Todd Poynor264e2912012-12-18 17:50:10 -0800172 unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
Todd Poynor8a42a032013-01-02 13:14:00 -0800173 unsigned long flags;
Todd Poynor264e2912012-12-18 17:50:10 -0800174
175 mod_timer_pinned(&pcpu->cpu_timer, expires);
176 if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
177 expires += usecs_to_jiffies(timer_slack_val);
178 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
179 }
180
Todd Poynor8a42a032013-01-02 13:14:00 -0800181 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynorae7f28c2012-10-08 20:14:34 -0700182 pcpu->time_in_idle =
Lianwei Wang9697dcf2013-02-22 11:39:18 +0800183 get_cpu_idle_time(smp_processor_id(),
Todd Poynorae7f28c2012-10-08 20:14:34 -0700184 &pcpu->time_in_idle_timestamp);
Todd Poynor07a9e292012-12-11 16:05:03 -0800185 pcpu->cputime_speedadj = 0;
186 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
Todd Poynor8a42a032013-01-02 13:14:00 -0800187 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Todd Poynorae7f28c2012-10-08 20:14:34 -0700188}
189
Minsung Kimcce741d2013-02-25 23:48:04 +0900190static unsigned int freq_to_above_hispeed_delay(unsigned int freq)
191{
192 int i;
193 unsigned int ret;
194 unsigned long flags;
195
196 spin_lock_irqsave(&above_hispeed_delay_lock, flags);
197
198 for (i = 0; i < nabove_hispeed_delay - 1 &&
199 freq >= above_hispeed_delay[i+1]; i += 2)
200 ;
201
202 ret = above_hispeed_delay[i];
203 spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
204 return ret;
205}
206
Todd Poynor21df1ca2012-11-14 11:41:21 -0800207static unsigned int freq_to_targetload(unsigned int freq)
208{
209 int i;
210 unsigned int ret;
Todd Poynor8a42a032013-01-02 13:14:00 -0800211 unsigned long flags;
Todd Poynor21df1ca2012-11-14 11:41:21 -0800212
Todd Poynor8a42a032013-01-02 13:14:00 -0800213 spin_lock_irqsave(&target_loads_lock, flags);
Todd Poynor21df1ca2012-11-14 11:41:21 -0800214
215 for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
216 ;
217
218 ret = target_loads[i];
Todd Poynor8a42a032013-01-02 13:14:00 -0800219 spin_unlock_irqrestore(&target_loads_lock, flags);
Todd Poynor21df1ca2012-11-14 11:41:21 -0800220 return ret;
221}
222
223/*
224 * If increasing frequencies never map to a lower target load then
225 * choose_freq() will find the minimum frequency that does not exceed its
226 * target load given the current load.
227 */
228
229static unsigned int choose_freq(
Todd Poynor07a9e292012-12-11 16:05:03 -0800230 struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
Todd Poynor21df1ca2012-11-14 11:41:21 -0800231{
232 unsigned int freq = pcpu->policy->cur;
Todd Poynor21df1ca2012-11-14 11:41:21 -0800233 unsigned int prevfreq, freqmin, freqmax;
234 unsigned int tl;
235 int index;
236
237 freqmin = 0;
238 freqmax = UINT_MAX;
239
240 do {
241 prevfreq = freq;
242 tl = freq_to_targetload(freq);
243
244 /*
245 * Find the lowest frequency where the computed load is less
246 * than or equal to the target load.
247 */
248
249 cpufreq_frequency_table_target(
250 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
251 CPUFREQ_RELATION_L, &index);
252 freq = pcpu->freq_table[index].frequency;
253
254 if (freq > prevfreq) {
255 /* The previous frequency is too low. */
256 freqmin = prevfreq;
257
258 if (freq >= freqmax) {
259 /*
260 * Find the highest frequency that is less
261 * than freqmax.
262 */
263 cpufreq_frequency_table_target(
264 pcpu->policy, pcpu->freq_table,
265 freqmax - 1, CPUFREQ_RELATION_H,
266 &index);
267 freq = pcpu->freq_table[index].frequency;
268
269 if (freq == freqmin) {
270 /*
271 * The first frequency below freqmax
272 * has already been found to be too
273 * low. freqmax is the lowest speed
274 * we found that is fast enough.
275 */
276 freq = freqmax;
277 break;
278 }
279 }
280 } else if (freq < prevfreq) {
281 /* The previous frequency is high enough. */
282 freqmax = prevfreq;
283
284 if (freq <= freqmin) {
285 /*
286 * Find the lowest frequency that is higher
287 * than freqmin.
288 */
289 cpufreq_frequency_table_target(
290 pcpu->policy, pcpu->freq_table,
291 freqmin + 1, CPUFREQ_RELATION_L,
292 &index);
293 freq = pcpu->freq_table[index].frequency;
294
295 /*
296 * If freqmax is the first frequency above
297 * freqmin then we have already found that
298 * this speed is fast enough.
299 */
300 if (freq == freqmax)
301 break;
302 }
303 }
304
305 /* If same frequency chosen as previous then done. */
306 } while (freq != prevfreq);
307
308 return freq;
309}
310
Todd Poynor07a9e292012-12-11 16:05:03 -0800311static u64 update_load(int cpu)
312{
313 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
314 u64 now;
315 u64 now_idle;
316 unsigned int delta_idle;
317 unsigned int delta_time;
318 u64 active_time;
319
Lianwei Wang9697dcf2013-02-22 11:39:18 +0800320 now_idle = get_cpu_idle_time(cpu, &now);
Todd Poynor07a9e292012-12-11 16:05:03 -0800321 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
322 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
323 active_time = delta_time - delta_idle;
324 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
325
326 pcpu->time_in_idle = now_idle;
327 pcpu->time_in_idle_timestamp = now;
328 return now;
329}
330
Mike Chan9d49b702010-06-22 11:26:45 -0700331static void cpufreq_interactive_timer(unsigned long data)
332{
Todd Poynor1913e0f2012-11-05 13:09:03 -0800333 u64 now;
Mike Chan9d49b702010-06-22 11:26:45 -0700334 unsigned int delta_time;
Todd Poynor07a9e292012-12-11 16:05:03 -0800335 u64 cputime_speedadj;
Mike Chan9d49b702010-06-22 11:26:45 -0700336 int cpu_load;
Mike Chan9d49b702010-06-22 11:26:45 -0700337 struct cpufreq_interactive_cpuinfo *pcpu =
338 &per_cpu(cpuinfo, data);
Mike Chan9d49b702010-06-22 11:26:45 -0700339 unsigned int new_freq;
Todd Poynor07a9e292012-12-11 16:05:03 -0800340 unsigned int loadadjfreq;
Mike Chan9d49b702010-06-22 11:26:45 -0700341 unsigned int index;
342 unsigned long flags;
Todd Poynor29835472012-12-14 17:31:19 -0800343 bool boosted;
Mike Chan9d49b702010-06-22 11:26:45 -0700344
Todd Poynorbc819a22012-12-18 17:50:44 -0800345 if (!down_read_trylock(&pcpu->enable_sem))
346 return;
Mike Chan9d49b702010-06-22 11:26:45 -0700347 if (!pcpu->governor_enabled)
348 goto exit;
349
Todd Poynor8a42a032013-01-02 13:14:00 -0800350 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor07a9e292012-12-11 16:05:03 -0800351 now = update_load(data);
352 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
353 cputime_speedadj = pcpu->cputime_speedadj;
Todd Poynor8a42a032013-01-02 13:14:00 -0800354 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700355
Todd Poynor07a9e292012-12-11 16:05:03 -0800356 if (WARN_ON_ONCE(!delta_time))
Mike Chan9d49b702010-06-22 11:26:45 -0700357 goto rearm;
358
Todd Poynor07a9e292012-12-11 16:05:03 -0800359 do_div(cputime_speedadj, delta_time);
360 loadadjfreq = (unsigned int)cputime_speedadj * 100;
361 cpu_load = loadadjfreq / pcpu->target_freq;
Todd Poynor29835472012-12-14 17:31:19 -0800362 boosted = boost_val || now < boostpulse_endtime;
Mike Chan9d49b702010-06-22 11:26:45 -0700363
Steve Kondika4371942012-10-10 12:20:25 -0700364 pcpu->cpu_load = cpu_load;
365
Todd Poynor7fca0cc2012-12-19 16:06:48 -0800366 if (cpu_load >= go_hispeed_load || boosted) {
367 if (pcpu->target_freq < hispeed_freq) {
368 new_freq = hispeed_freq;
369 } else {
370 new_freq = choose_freq(pcpu, loadadjfreq);
371
372 if (new_freq < hispeed_freq)
373 new_freq = hispeed_freq;
374 }
375 } else {
Todd Poynor07a9e292012-12-11 16:05:03 -0800376 new_freq = choose_freq(pcpu, loadadjfreq);
Todd Poynor7fca0cc2012-12-19 16:06:48 -0800377 }
Todd Poynor67f07522012-11-08 15:06:55 -0800378
379 if (pcpu->target_freq >= hispeed_freq &&
380 new_freq > pcpu->target_freq &&
Minsung Kimcce741d2013-02-25 23:48:04 +0900381 now - pcpu->hispeed_validate_time <
Todd Poynor1fe10cb2013-03-21 20:46:00 -0700382 freq_to_above_hispeed_delay(pcpu->target_freq)) {
Todd Poynor67f07522012-11-08 15:06:55 -0800383 trace_cpufreq_interactive_notyet(
384 data, cpu_load, pcpu->target_freq,
385 pcpu->policy->cur, new_freq);
386 goto rearm;
Mike Chan9d49b702010-06-22 11:26:45 -0700387 }
388
Todd Poynor67f07522012-11-08 15:06:55 -0800389 pcpu->hispeed_validate_time = now;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700390
Mike Chan9d49b702010-06-22 11:26:45 -0700391 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynor6ecca112012-11-28 17:58:17 -0800392 new_freq, CPUFREQ_RELATION_L,
Mike Chan9d49b702010-06-22 11:26:45 -0700393 &index)) {
394 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
395 (int) data);
396 goto rearm;
397 }
398
399 new_freq = pcpu->freq_table[index].frequency;
400
Mike Chan9d49b702010-06-22 11:26:45 -0700401 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700402 * Do not scale below floor_freq unless we have been at or above the
403 * floor frequency for the minimum sample time since last validated.
Mike Chan9d49b702010-06-22 11:26:45 -0700404 */
Todd Poynoraad27322012-04-26 21:41:40 -0700405 if (new_freq < pcpu->floor_freq) {
Todd Poynor1913e0f2012-11-05 13:09:03 -0800406 if (now - pcpu->floor_validate_time < min_sample_time) {
Todd Poynor27f7b8e2012-11-28 17:56:09 -0800407 trace_cpufreq_interactive_notyet(
408 data, cpu_load, pcpu->target_freq,
409 pcpu->policy->cur, new_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700410 goto rearm;
Todd Poynora1e19512012-02-16 16:27:59 -0800411 }
Mike Chan9d49b702010-06-22 11:26:45 -0700412 }
413
Todd Poynor29835472012-12-14 17:31:19 -0800414 /*
415 * Update the timestamp for checking whether speed has been held at
416 * or above the selected frequency for a minimum of min_sample_time,
417 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
418 * allow the speed to drop as soon as the boostpulse duration expires
419 * (or the indefinite boost is turned off).
420 */
421
422 if (!boosted || new_freq > hispeed_freq) {
423 pcpu->floor_freq = new_freq;
424 pcpu->floor_validate_time = now;
425 }
Todd Poynor0a92d482012-04-06 19:59:36 -0700426
427 if (pcpu->target_freq == new_freq) {
Todd Poynor27f7b8e2012-11-28 17:56:09 -0800428 trace_cpufreq_interactive_already(
429 data, cpu_load, pcpu->target_freq,
430 pcpu->policy->cur, new_freq);
Todd Poynor0a92d482012-04-06 19:59:36 -0700431 goto rearm_if_notmax;
432 }
433
Todd Poynora1e19512012-02-16 16:27:59 -0800434 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynor27f7b8e2012-11-28 17:56:09 -0800435 pcpu->policy->cur, new_freq);
Todd Poynora1e19512012-02-16 16:27:59 -0800436
Todd Poynor02442cf2012-07-16 17:07:15 -0700437 pcpu->target_freq = new_freq;
438 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
439 cpumask_set_cpu(data, &speedchange_cpumask);
440 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
441 wake_up_process(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700442
443rearm_if_notmax:
444 /*
445 * Already set max speed and don't see a need to change that,
446 * wait until next idle to re-evaluate, don't need timer.
447 */
448 if (pcpu->target_freq == pcpu->policy->max)
449 goto exit;
450
451rearm:
Todd Poynor264e2912012-12-18 17:50:10 -0800452 if (!timer_pending(&pcpu->cpu_timer))
Todd Poynorae7f28c2012-10-08 20:14:34 -0700453 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700454
455exit:
Todd Poynorbc819a22012-12-18 17:50:44 -0800456 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700457 return;
458}
459
460static void cpufreq_interactive_idle_start(void)
461{
462 struct cpufreq_interactive_cpuinfo *pcpu =
463 &per_cpu(cpuinfo, smp_processor_id());
464 int pending;
465
Todd Poynorbc819a22012-12-18 17:50:44 -0800466 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chan9d49b702010-06-22 11:26:45 -0700467 return;
Todd Poynorbc819a22012-12-18 17:50:44 -0800468 if (!pcpu->governor_enabled) {
469 up_read(&pcpu->enable_sem);
470 return;
471 }
Mike Chan9d49b702010-06-22 11:26:45 -0700472
Mike Chan9d49b702010-06-22 11:26:45 -0700473 pending = timer_pending(&pcpu->cpu_timer);
474
475 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chan9d49b702010-06-22 11:26:45 -0700476 /*
477 * Entering idle while not at lowest speed. On some
478 * platforms this can hold the other CPU(s) at that speed
479 * even though the CPU is idle. Set a timer to re-evaluate
480 * speed so this idle CPU doesn't hold the other CPUs above
481 * min indefinitely. This should probably be a quirk of
482 * the CPUFreq driver.
483 */
Todd Poynor264e2912012-12-18 17:50:10 -0800484 if (!pending)
Todd Poynorae7f28c2012-10-08 20:14:34 -0700485 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700486 }
487
Todd Poynorbc819a22012-12-18 17:50:44 -0800488 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700489}
490
491static void cpufreq_interactive_idle_end(void)
492{
493 struct cpufreq_interactive_cpuinfo *pcpu =
494 &per_cpu(cpuinfo, smp_processor_id());
495
Todd Poynorbc819a22012-12-18 17:50:44 -0800496 if (!down_read_trylock(&pcpu->enable_sem))
Sam Lefflera04e4412012-06-27 10:12:04 -0700497 return;
Todd Poynorbc819a22012-12-18 17:50:44 -0800498 if (!pcpu->governor_enabled) {
499 up_read(&pcpu->enable_sem);
500 return;
501 }
Sam Lefflera04e4412012-06-27 10:12:04 -0700502
Todd Poynor1913e0f2012-11-05 13:09:03 -0800503 /* Arm the timer for 1-2 ticks later if not already. */
504 if (!timer_pending(&pcpu->cpu_timer)) {
Todd Poynorae7f28c2012-10-08 20:14:34 -0700505 cpufreq_interactive_timer_resched(pcpu);
Todd Poynor264e2912012-12-18 17:50:10 -0800506 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
Todd Poynorae7f28c2012-10-08 20:14:34 -0700507 del_timer(&pcpu->cpu_timer);
Todd Poynor264e2912012-12-18 17:50:10 -0800508 del_timer(&pcpu->cpu_slack_timer);
Todd Poynorae7f28c2012-10-08 20:14:34 -0700509 cpufreq_interactive_timer(smp_processor_id());
Mike Chan9d49b702010-06-22 11:26:45 -0700510 }
Todd Poynorbc819a22012-12-18 17:50:44 -0800511
512 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700513}
514
Todd Poynor02442cf2012-07-16 17:07:15 -0700515static int cpufreq_interactive_speedchange_task(void *data)
Mike Chan9d49b702010-06-22 11:26:45 -0700516{
517 unsigned int cpu;
518 cpumask_t tmp_mask;
519 unsigned long flags;
520 struct cpufreq_interactive_cpuinfo *pcpu;
521
522 while (1) {
523 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor02442cf2012-07-16 17:07:15 -0700524 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700525
Todd Poynor02442cf2012-07-16 17:07:15 -0700526 if (cpumask_empty(&speedchange_cpumask)) {
527 spin_unlock_irqrestore(&speedchange_cpumask_lock,
528 flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700529 schedule();
530
531 if (kthread_should_stop())
532 break;
533
Todd Poynor02442cf2012-07-16 17:07:15 -0700534 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700535 }
536
537 set_current_state(TASK_RUNNING);
Todd Poynor02442cf2012-07-16 17:07:15 -0700538 tmp_mask = speedchange_cpumask;
539 cpumask_clear(&speedchange_cpumask);
540 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700541
542 for_each_cpu(cpu, &tmp_mask) {
543 unsigned int j;
544 unsigned int max_freq = 0;
545
546 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynorbc819a22012-12-18 17:50:44 -0800547 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chan9d49b702010-06-22 11:26:45 -0700548 continue;
Todd Poynorbc819a22012-12-18 17:50:44 -0800549 if (!pcpu->governor_enabled) {
550 up_read(&pcpu->enable_sem);
551 continue;
552 }
Mike Chan9d49b702010-06-22 11:26:45 -0700553
Mike Chan9d49b702010-06-22 11:26:45 -0700554 for_each_cpu(j, pcpu->policy->cpus) {
555 struct cpufreq_interactive_cpuinfo *pjcpu =
556 &per_cpu(cpuinfo, j);
557
558 if (pjcpu->target_freq > max_freq)
559 max_freq = pjcpu->target_freq;
Steve Kondika4371942012-10-10 12:20:25 -0700560
561 cpufreq_notify_utilization(pcpu->policy, (pcpu->cpu_load * pcpu->policy->cur) / pcpu->policy->cpuinfo.max_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700562 }
563
564 if (max_freq != pcpu->policy->cur)
565 __cpufreq_driver_target(pcpu->policy,
566 max_freq,
567 CPUFREQ_RELATION_H);
Todd Poynor02442cf2012-07-16 17:07:15 -0700568 trace_cpufreq_interactive_setspeed(cpu,
569 pcpu->target_freq,
Todd Poynora1e19512012-02-16 16:27:59 -0800570 pcpu->policy->cur);
Todd Poynorbc819a22012-12-18 17:50:44 -0800571
572 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700573 }
574 }
575
576 return 0;
577}
578
Todd Poynor7820a652012-04-02 17:17:14 -0700579static void cpufreq_interactive_boost(void)
580{
581 int i;
582 int anyboost = 0;
583 unsigned long flags;
584 struct cpufreq_interactive_cpuinfo *pcpu;
585
Todd Poynor02442cf2012-07-16 17:07:15 -0700586 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700587
588 for_each_online_cpu(i) {
589 pcpu = &per_cpu(cpuinfo, i);
590
591 if (pcpu->target_freq < hispeed_freq) {
592 pcpu->target_freq = hispeed_freq;
Todd Poynor02442cf2012-07-16 17:07:15 -0700593 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynorc9d53b32012-12-07 20:08:45 -0800594 pcpu->hispeed_validate_time =
595 ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700596 anyboost = 1;
597 }
598
599 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700600 * Set floor freq and (re)start timer for when last
601 * validated.
Todd Poynor7820a652012-04-02 17:17:14 -0700602 */
603
Todd Poynoraad27322012-04-26 21:41:40 -0700604 pcpu->floor_freq = hispeed_freq;
605 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700606 }
607
Todd Poynor02442cf2012-07-16 17:07:15 -0700608 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700609
610 if (anyboost)
Todd Poynor02442cf2012-07-16 17:07:15 -0700611 wake_up_process(speedchange_task);
Todd Poynor7820a652012-04-02 17:17:14 -0700612}
613
Todd Poynor07a9e292012-12-11 16:05:03 -0800614static int cpufreq_interactive_notifier(
615 struct notifier_block *nb, unsigned long val, void *data)
616{
617 struct cpufreq_freqs *freq = data;
618 struct cpufreq_interactive_cpuinfo *pcpu;
619 int cpu;
Todd Poynor8a42a032013-01-02 13:14:00 -0800620 unsigned long flags;
Todd Poynor07a9e292012-12-11 16:05:03 -0800621
622 if (val == CPUFREQ_POSTCHANGE) {
623 pcpu = &per_cpu(cpuinfo, freq->cpu);
Todd Poynor84a96d22012-12-23 12:28:49 -0800624 if (!down_read_trylock(&pcpu->enable_sem))
625 return 0;
626 if (!pcpu->governor_enabled) {
627 up_read(&pcpu->enable_sem);
628 return 0;
629 }
Todd Poynor07a9e292012-12-11 16:05:03 -0800630
631 for_each_cpu(cpu, pcpu->policy->cpus) {
632 struct cpufreq_interactive_cpuinfo *pjcpu =
633 &per_cpu(cpuinfo, cpu);
Todd Poynor8a42a032013-01-02 13:14:00 -0800634 spin_lock_irqsave(&pjcpu->load_lock, flags);
Todd Poynor07a9e292012-12-11 16:05:03 -0800635 update_load(cpu);
Todd Poynor8a42a032013-01-02 13:14:00 -0800636 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
Todd Poynor07a9e292012-12-11 16:05:03 -0800637 }
Todd Poynor07a9e292012-12-11 16:05:03 -0800638
Todd Poynor84a96d22012-12-23 12:28:49 -0800639 up_read(&pcpu->enable_sem);
640 }
Todd Poynor07a9e292012-12-11 16:05:03 -0800641 return 0;
642}
643
644static struct notifier_block cpufreq_notifier_block = {
645 .notifier_call = cpufreq_interactive_notifier,
646};
647
Minsung Kimcce741d2013-02-25 23:48:04 +0900648static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
649{
650 const char *cp;
651 int i;
652 int ntokens = 1;
653 unsigned int *tokenized_data;
Todd Poynor110e3f12013-03-20 15:40:46 -0700654 int err = -EINVAL;
Minsung Kimcce741d2013-02-25 23:48:04 +0900655
656 cp = buf;
657 while ((cp = strpbrk(cp + 1, " :")))
658 ntokens++;
659
Todd Poynor110e3f12013-03-20 15:40:46 -0700660 if (!(ntokens & 0x1))
Minsung Kimcce741d2013-02-25 23:48:04 +0900661 goto err;
Minsung Kimcce741d2013-02-25 23:48:04 +0900662
663 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
664 if (!tokenized_data) {
Todd Poynor110e3f12013-03-20 15:40:46 -0700665 err = -ENOMEM;
Minsung Kimcce741d2013-02-25 23:48:04 +0900666 goto err;
667 }
668
669 cp = buf;
670 i = 0;
671 while (i < ntokens) {
Todd Poynor110e3f12013-03-20 15:40:46 -0700672 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
Minsung Kimcce741d2013-02-25 23:48:04 +0900673 goto err_kfree;
Minsung Kimcce741d2013-02-25 23:48:04 +0900674
675 cp = strpbrk(cp, " :");
676 if (!cp)
677 break;
678 cp++;
679 }
680
Todd Poynor110e3f12013-03-20 15:40:46 -0700681 if (i != ntokens)
Minsung Kimcce741d2013-02-25 23:48:04 +0900682 goto err_kfree;
Minsung Kimcce741d2013-02-25 23:48:04 +0900683
684 *num_tokens = ntokens;
685 return tokenized_data;
686
687err_kfree:
688 kfree(tokenized_data);
689err:
Todd Poynor110e3f12013-03-20 15:40:46 -0700690 return ERR_PTR(err);
Minsung Kimcce741d2013-02-25 23:48:04 +0900691}
692
Todd Poynor21df1ca2012-11-14 11:41:21 -0800693static ssize_t show_target_loads(
Todd Poynor6ecca112012-11-28 17:58:17 -0800694 struct kobject *kobj, struct attribute *attr, char *buf)
695{
Todd Poynor21df1ca2012-11-14 11:41:21 -0800696 int i;
697 ssize_t ret = 0;
Todd Poynor8a42a032013-01-02 13:14:00 -0800698 unsigned long flags;
Todd Poynor21df1ca2012-11-14 11:41:21 -0800699
Todd Poynor8a42a032013-01-02 13:14:00 -0800700 spin_lock_irqsave(&target_loads_lock, flags);
Todd Poynor21df1ca2012-11-14 11:41:21 -0800701
702 for (i = 0; i < ntarget_loads; i++)
703 ret += sprintf(buf + ret, "%u%s", target_loads[i],
704 i & 0x1 ? ":" : " ");
705
706 ret += sprintf(buf + ret, "\n");
Todd Poynor8a42a032013-01-02 13:14:00 -0800707 spin_unlock_irqrestore(&target_loads_lock, flags);
Todd Poynor21df1ca2012-11-14 11:41:21 -0800708 return ret;
Todd Poynor6ecca112012-11-28 17:58:17 -0800709}
710
Todd Poynor21df1ca2012-11-14 11:41:21 -0800711static ssize_t store_target_loads(
Todd Poynor6ecca112012-11-28 17:58:17 -0800712 struct kobject *kobj, struct attribute *attr, const char *buf,
713 size_t count)
714{
Minsung Kimcce741d2013-02-25 23:48:04 +0900715 int ntokens;
Todd Poynor21df1ca2012-11-14 11:41:21 -0800716 unsigned int *new_target_loads = NULL;
Todd Poynor8a42a032013-01-02 13:14:00 -0800717 unsigned long flags;
Todd Poynor6ecca112012-11-28 17:58:17 -0800718
Minsung Kimcce741d2013-02-25 23:48:04 +0900719 new_target_loads = get_tokenized_data(buf, &ntokens);
720 if (IS_ERR(new_target_loads))
721 return PTR_RET(new_target_loads);
Todd Poynor21df1ca2012-11-14 11:41:21 -0800722
Todd Poynor8a42a032013-01-02 13:14:00 -0800723 spin_lock_irqsave(&target_loads_lock, flags);
Todd Poynor21df1ca2012-11-14 11:41:21 -0800724 if (target_loads != default_target_loads)
725 kfree(target_loads);
726 target_loads = new_target_loads;
727 ntarget_loads = ntokens;
Todd Poynor8a42a032013-01-02 13:14:00 -0800728 spin_unlock_irqrestore(&target_loads_lock, flags);
Todd Poynor6ecca112012-11-28 17:58:17 -0800729 return count;
730}
731
Todd Poynor21df1ca2012-11-14 11:41:21 -0800732static struct global_attr target_loads_attr =
733 __ATTR(target_loads, S_IRUGO | S_IWUSR,
734 show_target_loads, store_target_loads);
Todd Poynor6ecca112012-11-28 17:58:17 -0800735
Minsung Kimcce741d2013-02-25 23:48:04 +0900736static ssize_t show_above_hispeed_delay(
737 struct kobject *kobj, struct attribute *attr, char *buf)
738{
739 int i;
740 ssize_t ret = 0;
741 unsigned long flags;
742
743 spin_lock_irqsave(&above_hispeed_delay_lock, flags);
744
745 for (i = 0; i < nabove_hispeed_delay; i++)
746 ret += sprintf(buf + ret, "%u%s", above_hispeed_delay[i],
747 i & 0x1 ? ":" : " ");
748
749 ret += sprintf(buf + ret, "\n");
750 spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
751 return ret;
752}
753
754static ssize_t store_above_hispeed_delay(
755 struct kobject *kobj, struct attribute *attr, const char *buf,
756 size_t count)
757{
758 int ntokens;
759 unsigned int *new_above_hispeed_delay = NULL;
760 unsigned long flags;
761
762 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
763 if (IS_ERR(new_above_hispeed_delay))
764 return PTR_RET(new_above_hispeed_delay);
765
766 spin_lock_irqsave(&above_hispeed_delay_lock, flags);
767 if (above_hispeed_delay != default_above_hispeed_delay)
768 kfree(above_hispeed_delay);
769 above_hispeed_delay = new_above_hispeed_delay;
770 nabove_hispeed_delay = ntokens;
771 spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
772 return count;
773
774}
775
776static struct global_attr above_hispeed_delay_attr =
777 __ATTR(above_hispeed_delay, S_IRUGO | S_IWUSR,
778 show_above_hispeed_delay, store_above_hispeed_delay);
779
Mike Chan9d49b702010-06-22 11:26:45 -0700780static ssize_t show_hispeed_freq(struct kobject *kobj,
781 struct attribute *attr, char *buf)
782{
Todd Poynorf090ef02012-10-03 00:39:56 -0700783 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700784}
785
786static ssize_t store_hispeed_freq(struct kobject *kobj,
787 struct attribute *attr, const char *buf,
788 size_t count)
789{
790 int ret;
Todd Poynorf090ef02012-10-03 00:39:56 -0700791 long unsigned int val;
Mike Chan9d49b702010-06-22 11:26:45 -0700792
Todd Poynorf090ef02012-10-03 00:39:56 -0700793 ret = strict_strtoul(buf, 0, &val);
Mike Chan9d49b702010-06-22 11:26:45 -0700794 if (ret < 0)
795 return ret;
796 hispeed_freq = val;
797 return count;
798}
799
800static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
801 show_hispeed_freq, store_hispeed_freq);
802
803
804static ssize_t show_go_hispeed_load(struct kobject *kobj,
805 struct attribute *attr, char *buf)
806{
807 return sprintf(buf, "%lu\n", go_hispeed_load);
808}
809
810static ssize_t store_go_hispeed_load(struct kobject *kobj,
811 struct attribute *attr, const char *buf, size_t count)
812{
813 int ret;
814 unsigned long val;
815
816 ret = strict_strtoul(buf, 0, &val);
817 if (ret < 0)
818 return ret;
819 go_hispeed_load = val;
820 return count;
821}
822
823static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
824 show_go_hispeed_load, store_go_hispeed_load);
825
826static ssize_t show_min_sample_time(struct kobject *kobj,
827 struct attribute *attr, char *buf)
828{
829 return sprintf(buf, "%lu\n", min_sample_time);
830}
831
832static ssize_t store_min_sample_time(struct kobject *kobj,
833 struct attribute *attr, const char *buf, size_t count)
834{
835 int ret;
836 unsigned long val;
837
838 ret = strict_strtoul(buf, 0, &val);
839 if (ret < 0)
840 return ret;
841 min_sample_time = val;
842 return count;
843}
844
845static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
846 show_min_sample_time, store_min_sample_time);
847
848static ssize_t show_timer_rate(struct kobject *kobj,
849 struct attribute *attr, char *buf)
850{
851 return sprintf(buf, "%lu\n", timer_rate);
852}
853
854static ssize_t store_timer_rate(struct kobject *kobj,
855 struct attribute *attr, const char *buf, size_t count)
856{
857 int ret;
858 unsigned long val;
859
860 ret = strict_strtoul(buf, 0, &val);
861 if (ret < 0)
862 return ret;
863 timer_rate = val;
864 return count;
865}
866
867static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
868 show_timer_rate, store_timer_rate);
869
Todd Poynor264e2912012-12-18 17:50:10 -0800870static ssize_t show_timer_slack(
871 struct kobject *kobj, struct attribute *attr, char *buf)
872{
873 return sprintf(buf, "%d\n", timer_slack_val);
874}
875
876static ssize_t store_timer_slack(
877 struct kobject *kobj, struct attribute *attr, const char *buf,
878 size_t count)
879{
880 int ret;
881 unsigned long val;
882
883 ret = kstrtol(buf, 10, &val);
884 if (ret < 0)
885 return ret;
886
887 timer_slack_val = val;
888 return count;
889}
890
891define_one_global_rw(timer_slack);
892
Todd Poynor9fb15312012-04-23 20:42:41 -0700893static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
894 char *buf)
895{
896 return sprintf(buf, "%d\n", boost_val);
897}
898
899static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
900 const char *buf, size_t count)
901{
902 int ret;
903 unsigned long val;
904
905 ret = kstrtoul(buf, 0, &val);
906 if (ret < 0)
907 return ret;
908
909 boost_val = val;
910
Todd Poynor2e739a02012-05-03 00:16:55 -0700911 if (boost_val) {
912 trace_cpufreq_interactive_boost("on");
Todd Poynor9fb15312012-04-23 20:42:41 -0700913 cpufreq_interactive_boost();
Todd Poynor2e739a02012-05-03 00:16:55 -0700914 } else {
915 trace_cpufreq_interactive_unboost("off");
916 }
Todd Poynor9fb15312012-04-23 20:42:41 -0700917
918 return count;
919}
920
921define_one_global_rw(boost);
922
Todd Poynor2e739a02012-05-03 00:16:55 -0700923static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
924 const char *buf, size_t count)
925{
926 int ret;
927 unsigned long val;
928
929 ret = kstrtoul(buf, 0, &val);
930 if (ret < 0)
931 return ret;
932
Todd Poynor29835472012-12-14 17:31:19 -0800933 boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
Todd Poynor2e739a02012-05-03 00:16:55 -0700934 trace_cpufreq_interactive_boost("pulse");
935 cpufreq_interactive_boost();
936 return count;
937}
938
939static struct global_attr boostpulse =
940 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
941
Todd Poynor29835472012-12-14 17:31:19 -0800942static ssize_t show_boostpulse_duration(
943 struct kobject *kobj, struct attribute *attr, char *buf)
944{
945 return sprintf(buf, "%d\n", boostpulse_duration_val);
946}
947
948static ssize_t store_boostpulse_duration(
949 struct kobject *kobj, struct attribute *attr, const char *buf,
950 size_t count)
951{
952 int ret;
953 unsigned long val;
954
955 ret = kstrtoul(buf, 0, &val);
956 if (ret < 0)
957 return ret;
958
959 boostpulse_duration_val = val;
960 return count;
961}
962
963define_one_global_rw(boostpulse_duration);
964
Lianwei Wang9697dcf2013-02-22 11:39:18 +0800965static ssize_t show_io_is_busy(struct kobject *kobj,
966 struct attribute *attr, char *buf)
967{
968 return sprintf(buf, "%u\n", io_is_busy);
969}
970
971static ssize_t store_io_is_busy(struct kobject *kobj,
972 struct attribute *attr, const char *buf, size_t count)
973{
974 int ret;
975 unsigned long val;
976
977 ret = kstrtoul(buf, 0, &val);
978 if (ret < 0)
979 return ret;
980 io_is_busy = val;
981 return count;
982}
983
984static struct global_attr io_is_busy_attr = __ATTR(io_is_busy, 0644,
985 show_io_is_busy, store_io_is_busy);
986
Mike Chan9d49b702010-06-22 11:26:45 -0700987static struct attribute *interactive_attributes[] = {
Todd Poynor21df1ca2012-11-14 11:41:21 -0800988 &target_loads_attr.attr,
Minsung Kimcce741d2013-02-25 23:48:04 +0900989 &above_hispeed_delay_attr.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700990 &hispeed_freq_attr.attr,
991 &go_hispeed_load_attr.attr,
992 &min_sample_time_attr.attr,
993 &timer_rate_attr.attr,
Todd Poynor264e2912012-12-18 17:50:10 -0800994 &timer_slack.attr,
Todd Poynor9fb15312012-04-23 20:42:41 -0700995 &boost.attr,
Todd Poynor2e739a02012-05-03 00:16:55 -0700996 &boostpulse.attr,
Todd Poynor29835472012-12-14 17:31:19 -0800997 &boostpulse_duration.attr,
Lianwei Wang9697dcf2013-02-22 11:39:18 +0800998 &io_is_busy_attr.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700999 NULL,
1000};
1001
1002static struct attribute_group interactive_attr_group = {
1003 .attrs = interactive_attributes,
1004 .name = "interactive",
1005};
1006
Sam Lefflera04e4412012-06-27 10:12:04 -07001007static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1008 unsigned long val,
1009 void *data)
1010{
1011 switch (val) {
1012 case IDLE_START:
1013 cpufreq_interactive_idle_start();
1014 break;
1015 case IDLE_END:
1016 cpufreq_interactive_idle_end();
1017 break;
1018 }
1019
1020 return 0;
1021}
1022
1023static struct notifier_block cpufreq_interactive_idle_nb = {
1024 .notifier_call = cpufreq_interactive_idle_notifier,
1025};
1026
Mike Chan9d49b702010-06-22 11:26:45 -07001027static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1028 unsigned int event)
1029{
1030 int rc;
1031 unsigned int j;
1032 struct cpufreq_interactive_cpuinfo *pcpu;
1033 struct cpufreq_frequency_table *freq_table;
1034
1035 switch (event) {
1036 case CPUFREQ_GOV_START:
1037 if (!cpu_online(policy->cpu))
1038 return -EINVAL;
1039
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001040 mutex_lock(&gov_lock);
1041
Mike Chan9d49b702010-06-22 11:26:45 -07001042 freq_table =
1043 cpufreq_frequency_get_table(policy->cpu);
Todd Poynor1913e0f2012-11-05 13:09:03 -08001044 if (!hispeed_freq)
1045 hispeed_freq = policy->max;
Mike Chan9d49b702010-06-22 11:26:45 -07001046
1047 for_each_cpu(j, policy->cpus) {
Todd Poynor264e2912012-12-18 17:50:10 -08001048 unsigned long expires;
1049
Mike Chan9d49b702010-06-22 11:26:45 -07001050 pcpu = &per_cpu(cpuinfo, j);
1051 pcpu->policy = policy;
1052 pcpu->target_freq = policy->cur;
1053 pcpu->freq_table = freq_table;
Todd Poynoraad27322012-04-26 21:41:40 -07001054 pcpu->floor_freq = pcpu->target_freq;
1055 pcpu->floor_validate_time =
Todd Poynorc9d53b32012-12-07 20:08:45 -08001056 ktime_to_us(ktime_get());
Todd Poynor5a5aa702012-05-10 23:28:06 -07001057 pcpu->hispeed_validate_time =
Todd Poynorc9d53b32012-12-07 20:08:45 -08001058 pcpu->floor_validate_time;
Todd Poynor2ca56472012-12-20 15:51:00 -08001059 down_write(&pcpu->enable_sem);
Todd Poynor264e2912012-12-18 17:50:10 -08001060 expires = jiffies + usecs_to_jiffies(timer_rate);
1061 pcpu->cpu_timer.expires = expires;
Todd Poynor1913e0f2012-11-05 13:09:03 -08001062 add_timer_on(&pcpu->cpu_timer, j);
Todd Poynor264e2912012-12-18 17:50:10 -08001063 if (timer_slack_val >= 0) {
1064 expires += usecs_to_jiffies(timer_slack_val);
1065 pcpu->cpu_slack_timer.expires = expires;
1066 add_timer_on(&pcpu->cpu_slack_timer, j);
1067 }
Todd Poynor2ca56472012-12-20 15:51:00 -08001068 pcpu->governor_enabled = 1;
1069 up_write(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -07001070 }
1071
Mike Chan9d49b702010-06-22 11:26:45 -07001072 /*
1073 * Do not register the idle hook and create sysfs
1074 * entries if we have already done so.
1075 */
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001076 if (++active_count > 1) {
1077 mutex_unlock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -07001078 return 0;
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001079 }
Mike Chan9d49b702010-06-22 11:26:45 -07001080
1081 rc = sysfs_create_group(cpufreq_global_kobject,
1082 &interactive_attr_group);
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001083 if (rc) {
1084 mutex_unlock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -07001085 return rc;
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001086 }
Mike Chan9d49b702010-06-22 11:26:45 -07001087
Sam Lefflera04e4412012-06-27 10:12:04 -07001088 idle_notifier_register(&cpufreq_interactive_idle_nb);
Todd Poynor07a9e292012-12-11 16:05:03 -08001089 cpufreq_register_notifier(
1090 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001091 mutex_unlock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -07001092 break;
1093
1094 case CPUFREQ_GOV_STOP:
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001095 mutex_lock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -07001096 for_each_cpu(j, policy->cpus) {
1097 pcpu = &per_cpu(cpuinfo, j);
Todd Poynorbc819a22012-12-18 17:50:44 -08001098 down_write(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -07001099 pcpu->governor_enabled = 0;
Mike Chan9d49b702010-06-22 11:26:45 -07001100 del_timer_sync(&pcpu->cpu_timer);
Todd Poynor264e2912012-12-18 17:50:10 -08001101 del_timer_sync(&pcpu->cpu_slack_timer);
Todd Poynorbc819a22012-12-18 17:50:44 -08001102 up_write(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -07001103 }
1104
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001105 if (--active_count > 0) {
1106 mutex_unlock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -07001107 return 0;
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001108 }
Mike Chan9d49b702010-06-22 11:26:45 -07001109
Todd Poynor07a9e292012-12-11 16:05:03 -08001110 cpufreq_unregister_notifier(
1111 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Sam Lefflera04e4412012-06-27 10:12:04 -07001112 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -07001113 sysfs_remove_group(cpufreq_global_kobject,
1114 &interactive_attr_group);
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001115 mutex_unlock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -07001116
1117 break;
1118
1119 case CPUFREQ_GOV_LIMITS:
1120 if (policy->max < policy->cur)
1121 __cpufreq_driver_target(policy,
1122 policy->max, CPUFREQ_RELATION_H);
1123 else if (policy->min > policy->cur)
1124 __cpufreq_driver_target(policy,
1125 policy->min, CPUFREQ_RELATION_L);
1126 break;
1127 }
1128 return 0;
1129}
1130
Todd Poynor264e2912012-12-18 17:50:10 -08001131static void cpufreq_interactive_nop_timer(unsigned long data)
1132{
1133}
1134
Mike Chan9d49b702010-06-22 11:26:45 -07001135static int __init cpufreq_interactive_init(void)
1136{
1137 unsigned int i;
1138 struct cpufreq_interactive_cpuinfo *pcpu;
1139 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1140
Mike Chan9d49b702010-06-22 11:26:45 -07001141 /* Initalize per-cpu timers */
1142 for_each_possible_cpu(i) {
1143 pcpu = &per_cpu(cpuinfo, i);
Todd Poynor264e2912012-12-18 17:50:10 -08001144 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -07001145 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1146 pcpu->cpu_timer.data = i;
Todd Poynor264e2912012-12-18 17:50:10 -08001147 init_timer(&pcpu->cpu_slack_timer);
1148 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
Todd Poynor07a9e292012-12-11 16:05:03 -08001149 spin_lock_init(&pcpu->load_lock);
Todd Poynorbc819a22012-12-18 17:50:44 -08001150 init_rwsem(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -07001151 }
1152
Todd Poynor21df1ca2012-11-14 11:41:21 -08001153 spin_lock_init(&target_loads_lock);
Todd Poynor02442cf2012-07-16 17:07:15 -07001154 spin_lock_init(&speedchange_cpumask_lock);
Minsung Kimd3131832013-04-16 21:52:50 +09001155 spin_lock_init(&above_hispeed_delay_lock);
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001156 mutex_init(&gov_lock);
Todd Poynor02442cf2012-07-16 17:07:15 -07001157 speedchange_task =
1158 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1159 "cfinteractive");
1160 if (IS_ERR(speedchange_task))
1161 return PTR_ERR(speedchange_task);
Sam Lefflera13f4152012-06-27 12:55:56 -07001162
Todd Poynor02442cf2012-07-16 17:07:15 -07001163 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1164 get_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -07001165
Sam Lefflera13f4152012-06-27 12:55:56 -07001166 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor02442cf2012-07-16 17:07:15 -07001167 wake_up_process(speedchange_task);
Sam Lefflera13f4152012-06-27 12:55:56 -07001168
Mike Chan9d49b702010-06-22 11:26:45 -07001169 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chan9d49b702010-06-22 11:26:45 -07001170}
1171
1172#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1173fs_initcall(cpufreq_interactive_init);
1174#else
1175module_init(cpufreq_interactive_init);
1176#endif
1177
1178static void __exit cpufreq_interactive_exit(void)
1179{
1180 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor02442cf2012-07-16 17:07:15 -07001181 kthread_stop(speedchange_task);
1182 put_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -07001183}
1184
1185module_exit(cpufreq_interactive_exit);
1186
1187MODULE_AUTHOR("Mike Chan <mike@android.com>");
1188MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1189 "Latency sensitive workloads");
1190MODULE_LICENSE("GPL");