blob: 393077bc8f4158b5af443cd0a5149c5ef75a8564 [file] [log] [blame]
Mike Chan9d49b702010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd4edd5d2012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynorbc819a22012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chan9d49b702010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/tick.h>
27#include <linux/time.h>
28#include <linux/timer.h>
29#include <linux/workqueue.h>
30#include <linux/kthread.h>
Todd Poynor7820a652012-04-02 17:17:14 -070031#include <linux/slab.h>
Todd Poynor9fb15312012-04-23 20:42:41 -070032#include <asm/cputime.h>
Mike Chan9d49b702010-06-22 11:26:45 -070033
Todd Poynora1e19512012-02-16 16:27:59 -080034#define CREATE_TRACE_POINTS
35#include <trace/events/cpufreq_interactive.h>
36
Lianwei Wang9f8d5372013-01-07 14:15:51 +080037static int active_count;
Mike Chan9d49b702010-06-22 11:26:45 -070038
39struct cpufreq_interactive_cpuinfo {
40 struct timer_list cpu_timer;
Todd Poynor264e2912012-12-18 17:50:10 -080041 struct timer_list cpu_slack_timer;
Todd Poynor07a9e292012-12-11 16:05:03 -080042 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chan9d49b702010-06-22 11:26:45 -070043 u64 time_in_idle;
Todd Poynorae7f28c2012-10-08 20:14:34 -070044 u64 time_in_idle_timestamp;
Todd Poynor07a9e292012-12-11 16:05:03 -080045 u64 cputime_speedadj;
46 u64 cputime_speedadj_timestamp;
Mike Chan9d49b702010-06-22 11:26:45 -070047 struct cpufreq_policy *policy;
48 struct cpufreq_frequency_table *freq_table;
49 unsigned int target_freq;
Todd Poynoraad27322012-04-26 21:41:40 -070050 unsigned int floor_freq;
51 u64 floor_validate_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -070052 u64 hispeed_validate_time;
Todd Poynorbc819a22012-12-18 17:50:44 -080053 struct rw_semaphore enable_sem;
Mike Chan9d49b702010-06-22 11:26:45 -070054 int governor_enabled;
Steve Kondika4371942012-10-10 12:20:25 -070055 int cpu_load;
Mike Chan9d49b702010-06-22 11:26:45 -070056};
57
58static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
59
Todd Poynor02442cf2012-07-16 17:07:15 -070060/* realtime thread handles frequency scaling */
61static struct task_struct *speedchange_task;
62static cpumask_t speedchange_cpumask;
63static spinlock_t speedchange_cpumask_lock;
Lianwei Wang9f8d5372013-01-07 14:15:51 +080064static struct mutex gov_lock;
Mike Chan9d49b702010-06-22 11:26:45 -070065
66/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynorf090ef02012-10-03 00:39:56 -070067static unsigned int hispeed_freq;
Mike Chan9d49b702010-06-22 11:26:45 -070068
69/* Go to hi speed when CPU load at or above this value. */
Todd Poynored028682012-12-21 15:13:01 -080070#define DEFAULT_GO_HISPEED_LOAD 99
Todd Poynor9f58f842012-12-21 15:32:21 -080071static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
Mike Chan9d49b702010-06-22 11:26:45 -070072
Todd Poynor6ecca112012-11-28 17:58:17 -080073/* Target load. Lower values result in higher CPU speeds. */
74#define DEFAULT_TARGET_LOAD 90
Todd Poynor21df1ca2012-11-14 11:41:21 -080075static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
76static spinlock_t target_loads_lock;
77static unsigned int *target_loads = default_target_loads;
78static int ntarget_loads = ARRAY_SIZE(default_target_loads);
Todd Poynor6ecca112012-11-28 17:58:17 -080079
Mike Chan9d49b702010-06-22 11:26:45 -070080/*
81 * The minimum amount of time to spend at a frequency before we can ramp down.
82 */
Todd Poynora0ec4362012-04-17 17:39:34 -070083#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Todd Poynor9f58f842012-12-21 15:32:21 -080084static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Mike Chan9d49b702010-06-22 11:26:45 -070085
86/*
87 * The sample rate of the timer used to increase frequency
88 */
Todd Poynora0ec4362012-04-17 17:39:34 -070089#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Todd Poynor9f58f842012-12-21 15:32:21 -080090static unsigned long timer_rate = DEFAULT_TIMER_RATE;
Mike Chan9d49b702010-06-22 11:26:45 -070091
Todd Poynor596cf1f2012-04-13 20:18:02 -070092/*
93 * Wait this long before raising speed above hispeed, by default a single
94 * timer interval.
95 */
96#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
Minsung Kimcce741d2013-02-25 23:48:04 +090097static unsigned int default_above_hispeed_delay[] = {
98 DEFAULT_ABOVE_HISPEED_DELAY };
99static spinlock_t above_hispeed_delay_lock;
100static unsigned int *above_hispeed_delay = default_above_hispeed_delay;
101static int nabove_hispeed_delay = ARRAY_SIZE(default_above_hispeed_delay);
Todd Poynor596cf1f2012-04-13 20:18:02 -0700102
Todd Poynor29835472012-12-14 17:31:19 -0800103/* Non-zero means indefinite speed boost active */
Todd Poynor9fb15312012-04-23 20:42:41 -0700104static int boost_val;
Todd Poynor29835472012-12-14 17:31:19 -0800105/* Duration of a boot pulse in usecs */
106static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
107/* End time of boost pulse in ktime converted to usecs */
108static u64 boostpulse_endtime;
Todd Poynor9fb15312012-04-23 20:42:41 -0700109
Todd Poynor264e2912012-12-18 17:50:10 -0800110/*
111 * Max additional time to wait in idle, beyond timer_rate, at speeds above
112 * minimum before wakeup to reduce speed, or -1 if unnecessary.
113 */
114#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
115static int timer_slack_val = DEFAULT_TIMER_SLACK;
Lianwei Wangd4edd5d2012-11-01 09:59:52 +0800116
Mike Chan9d49b702010-06-22 11:26:45 -0700117static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
118 unsigned int event);
119
120#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
121static
122#endif
123struct cpufreq_governor cpufreq_gov_interactive = {
124 .name = "interactive",
125 .governor = cpufreq_governor_interactive,
126 .max_transition_latency = 10000000,
127 .owner = THIS_MODULE,
128};
129
Todd Poynorae7f28c2012-10-08 20:14:34 -0700130static void cpufreq_interactive_timer_resched(
131 struct cpufreq_interactive_cpuinfo *pcpu)
132{
Todd Poynor264e2912012-12-18 17:50:10 -0800133 unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
Todd Poynor8a42a032013-01-02 13:14:00 -0800134 unsigned long flags;
Todd Poynor264e2912012-12-18 17:50:10 -0800135
136 mod_timer_pinned(&pcpu->cpu_timer, expires);
137 if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
138 expires += usecs_to_jiffies(timer_slack_val);
139 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
140 }
141
Todd Poynor8a42a032013-01-02 13:14:00 -0800142 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynorae7f28c2012-10-08 20:14:34 -0700143 pcpu->time_in_idle =
144 get_cpu_idle_time_us(smp_processor_id(),
145 &pcpu->time_in_idle_timestamp);
Todd Poynor07a9e292012-12-11 16:05:03 -0800146 pcpu->cputime_speedadj = 0;
147 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
Todd Poynor8a42a032013-01-02 13:14:00 -0800148 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Todd Poynorae7f28c2012-10-08 20:14:34 -0700149}
150
Minsung Kimcce741d2013-02-25 23:48:04 +0900151static unsigned int freq_to_above_hispeed_delay(unsigned int freq)
152{
153 int i;
154 unsigned int ret;
155 unsigned long flags;
156
157 spin_lock_irqsave(&above_hispeed_delay_lock, flags);
158
159 for (i = 0; i < nabove_hispeed_delay - 1 &&
160 freq >= above_hispeed_delay[i+1]; i += 2)
161 ;
162
163 ret = above_hispeed_delay[i];
164 spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
165 return ret;
166}
167
Todd Poynor21df1ca2012-11-14 11:41:21 -0800168static unsigned int freq_to_targetload(unsigned int freq)
169{
170 int i;
171 unsigned int ret;
Todd Poynor8a42a032013-01-02 13:14:00 -0800172 unsigned long flags;
Todd Poynor21df1ca2012-11-14 11:41:21 -0800173
Todd Poynor8a42a032013-01-02 13:14:00 -0800174 spin_lock_irqsave(&target_loads_lock, flags);
Todd Poynor21df1ca2012-11-14 11:41:21 -0800175
176 for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
177 ;
178
179 ret = target_loads[i];
Todd Poynor8a42a032013-01-02 13:14:00 -0800180 spin_unlock_irqrestore(&target_loads_lock, flags);
Todd Poynor21df1ca2012-11-14 11:41:21 -0800181 return ret;
182}
183
184/*
185 * If increasing frequencies never map to a lower target load then
186 * choose_freq() will find the minimum frequency that does not exceed its
187 * target load given the current load.
188 */
189
190static unsigned int choose_freq(
Todd Poynor07a9e292012-12-11 16:05:03 -0800191 struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
Todd Poynor21df1ca2012-11-14 11:41:21 -0800192{
193 unsigned int freq = pcpu->policy->cur;
Todd Poynor21df1ca2012-11-14 11:41:21 -0800194 unsigned int prevfreq, freqmin, freqmax;
195 unsigned int tl;
196 int index;
197
198 freqmin = 0;
199 freqmax = UINT_MAX;
200
201 do {
202 prevfreq = freq;
203 tl = freq_to_targetload(freq);
204
205 /*
206 * Find the lowest frequency where the computed load is less
207 * than or equal to the target load.
208 */
209
210 cpufreq_frequency_table_target(
211 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
212 CPUFREQ_RELATION_L, &index);
213 freq = pcpu->freq_table[index].frequency;
214
215 if (freq > prevfreq) {
216 /* The previous frequency is too low. */
217 freqmin = prevfreq;
218
219 if (freq >= freqmax) {
220 /*
221 * Find the highest frequency that is less
222 * than freqmax.
223 */
224 cpufreq_frequency_table_target(
225 pcpu->policy, pcpu->freq_table,
226 freqmax - 1, CPUFREQ_RELATION_H,
227 &index);
228 freq = pcpu->freq_table[index].frequency;
229
230 if (freq == freqmin) {
231 /*
232 * The first frequency below freqmax
233 * has already been found to be too
234 * low. freqmax is the lowest speed
235 * we found that is fast enough.
236 */
237 freq = freqmax;
238 break;
239 }
240 }
241 } else if (freq < prevfreq) {
242 /* The previous frequency is high enough. */
243 freqmax = prevfreq;
244
245 if (freq <= freqmin) {
246 /*
247 * Find the lowest frequency that is higher
248 * than freqmin.
249 */
250 cpufreq_frequency_table_target(
251 pcpu->policy, pcpu->freq_table,
252 freqmin + 1, CPUFREQ_RELATION_L,
253 &index);
254 freq = pcpu->freq_table[index].frequency;
255
256 /*
257 * If freqmax is the first frequency above
258 * freqmin then we have already found that
259 * this speed is fast enough.
260 */
261 if (freq == freqmax)
262 break;
263 }
264 }
265
266 /* If same frequency chosen as previous then done. */
267 } while (freq != prevfreq);
268
269 return freq;
270}
271
Todd Poynor07a9e292012-12-11 16:05:03 -0800272static u64 update_load(int cpu)
273{
274 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
275 u64 now;
276 u64 now_idle;
277 unsigned int delta_idle;
278 unsigned int delta_time;
279 u64 active_time;
280
281 now_idle = get_cpu_idle_time_us(cpu, &now);
282 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
283 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
284 active_time = delta_time - delta_idle;
285 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
286
287 pcpu->time_in_idle = now_idle;
288 pcpu->time_in_idle_timestamp = now;
289 return now;
290}
291
Mike Chan9d49b702010-06-22 11:26:45 -0700292static void cpufreq_interactive_timer(unsigned long data)
293{
Todd Poynor1913e0f2012-11-05 13:09:03 -0800294 u64 now;
Mike Chan9d49b702010-06-22 11:26:45 -0700295 unsigned int delta_time;
Todd Poynor07a9e292012-12-11 16:05:03 -0800296 u64 cputime_speedadj;
Mike Chan9d49b702010-06-22 11:26:45 -0700297 int cpu_load;
Mike Chan9d49b702010-06-22 11:26:45 -0700298 struct cpufreq_interactive_cpuinfo *pcpu =
299 &per_cpu(cpuinfo, data);
Mike Chan9d49b702010-06-22 11:26:45 -0700300 unsigned int new_freq;
Todd Poynor07a9e292012-12-11 16:05:03 -0800301 unsigned int loadadjfreq;
Mike Chan9d49b702010-06-22 11:26:45 -0700302 unsigned int index;
303 unsigned long flags;
Todd Poynor29835472012-12-14 17:31:19 -0800304 bool boosted;
Mike Chan9d49b702010-06-22 11:26:45 -0700305
Todd Poynorbc819a22012-12-18 17:50:44 -0800306 if (!down_read_trylock(&pcpu->enable_sem))
307 return;
Mike Chan9d49b702010-06-22 11:26:45 -0700308 if (!pcpu->governor_enabled)
309 goto exit;
310
Todd Poynor8a42a032013-01-02 13:14:00 -0800311 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor07a9e292012-12-11 16:05:03 -0800312 now = update_load(data);
313 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
314 cputime_speedadj = pcpu->cputime_speedadj;
Todd Poynor8a42a032013-01-02 13:14:00 -0800315 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700316
Todd Poynor07a9e292012-12-11 16:05:03 -0800317 if (WARN_ON_ONCE(!delta_time))
Mike Chan9d49b702010-06-22 11:26:45 -0700318 goto rearm;
319
Todd Poynor07a9e292012-12-11 16:05:03 -0800320 do_div(cputime_speedadj, delta_time);
321 loadadjfreq = (unsigned int)cputime_speedadj * 100;
322 cpu_load = loadadjfreq / pcpu->target_freq;
Todd Poynor29835472012-12-14 17:31:19 -0800323 boosted = boost_val || now < boostpulse_endtime;
Mike Chan9d49b702010-06-22 11:26:45 -0700324
Steve Kondika4371942012-10-10 12:20:25 -0700325 pcpu->cpu_load = cpu_load;
326
Todd Poynor7fca0cc2012-12-19 16:06:48 -0800327 if (cpu_load >= go_hispeed_load || boosted) {
328 if (pcpu->target_freq < hispeed_freq) {
329 new_freq = hispeed_freq;
330 } else {
331 new_freq = choose_freq(pcpu, loadadjfreq);
332
333 if (new_freq < hispeed_freq)
334 new_freq = hispeed_freq;
335 }
336 } else {
Todd Poynor07a9e292012-12-11 16:05:03 -0800337 new_freq = choose_freq(pcpu, loadadjfreq);
Todd Poynor7fca0cc2012-12-19 16:06:48 -0800338 }
Todd Poynor67f07522012-11-08 15:06:55 -0800339
340 if (pcpu->target_freq >= hispeed_freq &&
341 new_freq > pcpu->target_freq &&
Minsung Kimcce741d2013-02-25 23:48:04 +0900342 now - pcpu->hispeed_validate_time <
343 freq_to_above_hispeed_delay(pcpu->policy->cur)) {
Todd Poynor67f07522012-11-08 15:06:55 -0800344 trace_cpufreq_interactive_notyet(
345 data, cpu_load, pcpu->target_freq,
346 pcpu->policy->cur, new_freq);
347 goto rearm;
Mike Chan9d49b702010-06-22 11:26:45 -0700348 }
349
Todd Poynor67f07522012-11-08 15:06:55 -0800350 pcpu->hispeed_validate_time = now;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700351
Mike Chan9d49b702010-06-22 11:26:45 -0700352 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynor6ecca112012-11-28 17:58:17 -0800353 new_freq, CPUFREQ_RELATION_L,
Mike Chan9d49b702010-06-22 11:26:45 -0700354 &index)) {
355 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
356 (int) data);
357 goto rearm;
358 }
359
360 new_freq = pcpu->freq_table[index].frequency;
361
Mike Chan9d49b702010-06-22 11:26:45 -0700362 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700363 * Do not scale below floor_freq unless we have been at or above the
364 * floor frequency for the minimum sample time since last validated.
Mike Chan9d49b702010-06-22 11:26:45 -0700365 */
Todd Poynoraad27322012-04-26 21:41:40 -0700366 if (new_freq < pcpu->floor_freq) {
Todd Poynor1913e0f2012-11-05 13:09:03 -0800367 if (now - pcpu->floor_validate_time < min_sample_time) {
Todd Poynor27f7b8e2012-11-28 17:56:09 -0800368 trace_cpufreq_interactive_notyet(
369 data, cpu_load, pcpu->target_freq,
370 pcpu->policy->cur, new_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700371 goto rearm;
Todd Poynora1e19512012-02-16 16:27:59 -0800372 }
Mike Chan9d49b702010-06-22 11:26:45 -0700373 }
374
Todd Poynor29835472012-12-14 17:31:19 -0800375 /*
376 * Update the timestamp for checking whether speed has been held at
377 * or above the selected frequency for a minimum of min_sample_time,
378 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
379 * allow the speed to drop as soon as the boostpulse duration expires
380 * (or the indefinite boost is turned off).
381 */
382
383 if (!boosted || new_freq > hispeed_freq) {
384 pcpu->floor_freq = new_freq;
385 pcpu->floor_validate_time = now;
386 }
Todd Poynor0a92d482012-04-06 19:59:36 -0700387
388 if (pcpu->target_freq == new_freq) {
Todd Poynor27f7b8e2012-11-28 17:56:09 -0800389 trace_cpufreq_interactive_already(
390 data, cpu_load, pcpu->target_freq,
391 pcpu->policy->cur, new_freq);
Todd Poynor0a92d482012-04-06 19:59:36 -0700392 goto rearm_if_notmax;
393 }
394
Todd Poynora1e19512012-02-16 16:27:59 -0800395 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynor27f7b8e2012-11-28 17:56:09 -0800396 pcpu->policy->cur, new_freq);
Todd Poynora1e19512012-02-16 16:27:59 -0800397
Todd Poynor02442cf2012-07-16 17:07:15 -0700398 pcpu->target_freq = new_freq;
399 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
400 cpumask_set_cpu(data, &speedchange_cpumask);
401 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
402 wake_up_process(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700403
404rearm_if_notmax:
405 /*
406 * Already set max speed and don't see a need to change that,
407 * wait until next idle to re-evaluate, don't need timer.
408 */
409 if (pcpu->target_freq == pcpu->policy->max)
410 goto exit;
411
412rearm:
Todd Poynor264e2912012-12-18 17:50:10 -0800413 if (!timer_pending(&pcpu->cpu_timer))
Todd Poynorae7f28c2012-10-08 20:14:34 -0700414 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700415
416exit:
Todd Poynorbc819a22012-12-18 17:50:44 -0800417 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700418 return;
419}
420
421static void cpufreq_interactive_idle_start(void)
422{
423 struct cpufreq_interactive_cpuinfo *pcpu =
424 &per_cpu(cpuinfo, smp_processor_id());
425 int pending;
426
Todd Poynorbc819a22012-12-18 17:50:44 -0800427 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chan9d49b702010-06-22 11:26:45 -0700428 return;
Todd Poynorbc819a22012-12-18 17:50:44 -0800429 if (!pcpu->governor_enabled) {
430 up_read(&pcpu->enable_sem);
431 return;
432 }
Mike Chan9d49b702010-06-22 11:26:45 -0700433
Mike Chan9d49b702010-06-22 11:26:45 -0700434 pending = timer_pending(&pcpu->cpu_timer);
435
436 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chan9d49b702010-06-22 11:26:45 -0700437 /*
438 * Entering idle while not at lowest speed. On some
439 * platforms this can hold the other CPU(s) at that speed
440 * even though the CPU is idle. Set a timer to re-evaluate
441 * speed so this idle CPU doesn't hold the other CPUs above
442 * min indefinitely. This should probably be a quirk of
443 * the CPUFreq driver.
444 */
Todd Poynor264e2912012-12-18 17:50:10 -0800445 if (!pending)
Todd Poynorae7f28c2012-10-08 20:14:34 -0700446 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700447 }
448
Todd Poynorbc819a22012-12-18 17:50:44 -0800449 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700450}
451
452static void cpufreq_interactive_idle_end(void)
453{
454 struct cpufreq_interactive_cpuinfo *pcpu =
455 &per_cpu(cpuinfo, smp_processor_id());
456
Todd Poynorbc819a22012-12-18 17:50:44 -0800457 if (!down_read_trylock(&pcpu->enable_sem))
Sam Lefflera04e4412012-06-27 10:12:04 -0700458 return;
Todd Poynorbc819a22012-12-18 17:50:44 -0800459 if (!pcpu->governor_enabled) {
460 up_read(&pcpu->enable_sem);
461 return;
462 }
Sam Lefflera04e4412012-06-27 10:12:04 -0700463
Todd Poynor1913e0f2012-11-05 13:09:03 -0800464 /* Arm the timer for 1-2 ticks later if not already. */
465 if (!timer_pending(&pcpu->cpu_timer)) {
Todd Poynorae7f28c2012-10-08 20:14:34 -0700466 cpufreq_interactive_timer_resched(pcpu);
Todd Poynor264e2912012-12-18 17:50:10 -0800467 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
Todd Poynorae7f28c2012-10-08 20:14:34 -0700468 del_timer(&pcpu->cpu_timer);
Todd Poynor264e2912012-12-18 17:50:10 -0800469 del_timer(&pcpu->cpu_slack_timer);
Todd Poynorae7f28c2012-10-08 20:14:34 -0700470 cpufreq_interactive_timer(smp_processor_id());
Mike Chan9d49b702010-06-22 11:26:45 -0700471 }
Todd Poynorbc819a22012-12-18 17:50:44 -0800472
473 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700474}
475
Todd Poynor02442cf2012-07-16 17:07:15 -0700476static int cpufreq_interactive_speedchange_task(void *data)
Mike Chan9d49b702010-06-22 11:26:45 -0700477{
478 unsigned int cpu;
479 cpumask_t tmp_mask;
480 unsigned long flags;
481 struct cpufreq_interactive_cpuinfo *pcpu;
482
483 while (1) {
484 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor02442cf2012-07-16 17:07:15 -0700485 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700486
Todd Poynor02442cf2012-07-16 17:07:15 -0700487 if (cpumask_empty(&speedchange_cpumask)) {
488 spin_unlock_irqrestore(&speedchange_cpumask_lock,
489 flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700490 schedule();
491
492 if (kthread_should_stop())
493 break;
494
Todd Poynor02442cf2012-07-16 17:07:15 -0700495 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700496 }
497
498 set_current_state(TASK_RUNNING);
Todd Poynor02442cf2012-07-16 17:07:15 -0700499 tmp_mask = speedchange_cpumask;
500 cpumask_clear(&speedchange_cpumask);
501 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700502
503 for_each_cpu(cpu, &tmp_mask) {
504 unsigned int j;
505 unsigned int max_freq = 0;
506
507 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynorbc819a22012-12-18 17:50:44 -0800508 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chan9d49b702010-06-22 11:26:45 -0700509 continue;
Todd Poynorbc819a22012-12-18 17:50:44 -0800510 if (!pcpu->governor_enabled) {
511 up_read(&pcpu->enable_sem);
512 continue;
513 }
Mike Chan9d49b702010-06-22 11:26:45 -0700514
Mike Chan9d49b702010-06-22 11:26:45 -0700515 for_each_cpu(j, pcpu->policy->cpus) {
516 struct cpufreq_interactive_cpuinfo *pjcpu =
517 &per_cpu(cpuinfo, j);
518
519 if (pjcpu->target_freq > max_freq)
520 max_freq = pjcpu->target_freq;
Steve Kondika4371942012-10-10 12:20:25 -0700521
522 cpufreq_notify_utilization(pcpu->policy, (pcpu->cpu_load * pcpu->policy->cur) / pcpu->policy->cpuinfo.max_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700523 }
524
525 if (max_freq != pcpu->policy->cur)
526 __cpufreq_driver_target(pcpu->policy,
527 max_freq,
528 CPUFREQ_RELATION_H);
Todd Poynor02442cf2012-07-16 17:07:15 -0700529 trace_cpufreq_interactive_setspeed(cpu,
530 pcpu->target_freq,
Todd Poynora1e19512012-02-16 16:27:59 -0800531 pcpu->policy->cur);
Todd Poynorbc819a22012-12-18 17:50:44 -0800532
533 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700534 }
535 }
536
537 return 0;
538}
539
Todd Poynor7820a652012-04-02 17:17:14 -0700540static void cpufreq_interactive_boost(void)
541{
542 int i;
543 int anyboost = 0;
544 unsigned long flags;
545 struct cpufreq_interactive_cpuinfo *pcpu;
546
Todd Poynor02442cf2012-07-16 17:07:15 -0700547 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700548
549 for_each_online_cpu(i) {
550 pcpu = &per_cpu(cpuinfo, i);
551
552 if (pcpu->target_freq < hispeed_freq) {
553 pcpu->target_freq = hispeed_freq;
Todd Poynor02442cf2012-07-16 17:07:15 -0700554 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynorc9d53b32012-12-07 20:08:45 -0800555 pcpu->hispeed_validate_time =
556 ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700557 anyboost = 1;
558 }
559
560 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700561 * Set floor freq and (re)start timer for when last
562 * validated.
Todd Poynor7820a652012-04-02 17:17:14 -0700563 */
564
Todd Poynoraad27322012-04-26 21:41:40 -0700565 pcpu->floor_freq = hispeed_freq;
566 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700567 }
568
Todd Poynor02442cf2012-07-16 17:07:15 -0700569 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700570
571 if (anyboost)
Todd Poynor02442cf2012-07-16 17:07:15 -0700572 wake_up_process(speedchange_task);
Todd Poynor7820a652012-04-02 17:17:14 -0700573}
574
Todd Poynor07a9e292012-12-11 16:05:03 -0800575static int cpufreq_interactive_notifier(
576 struct notifier_block *nb, unsigned long val, void *data)
577{
578 struct cpufreq_freqs *freq = data;
579 struct cpufreq_interactive_cpuinfo *pcpu;
580 int cpu;
Todd Poynor8a42a032013-01-02 13:14:00 -0800581 unsigned long flags;
Todd Poynor07a9e292012-12-11 16:05:03 -0800582
583 if (val == CPUFREQ_POSTCHANGE) {
584 pcpu = &per_cpu(cpuinfo, freq->cpu);
Todd Poynor84a96d22012-12-23 12:28:49 -0800585 if (!down_read_trylock(&pcpu->enable_sem))
586 return 0;
587 if (!pcpu->governor_enabled) {
588 up_read(&pcpu->enable_sem);
589 return 0;
590 }
Todd Poynor07a9e292012-12-11 16:05:03 -0800591
592 for_each_cpu(cpu, pcpu->policy->cpus) {
593 struct cpufreq_interactive_cpuinfo *pjcpu =
594 &per_cpu(cpuinfo, cpu);
Todd Poynor8a42a032013-01-02 13:14:00 -0800595 spin_lock_irqsave(&pjcpu->load_lock, flags);
Todd Poynor07a9e292012-12-11 16:05:03 -0800596 update_load(cpu);
Todd Poynor8a42a032013-01-02 13:14:00 -0800597 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
Todd Poynor07a9e292012-12-11 16:05:03 -0800598 }
Todd Poynor07a9e292012-12-11 16:05:03 -0800599
Todd Poynor84a96d22012-12-23 12:28:49 -0800600 up_read(&pcpu->enable_sem);
601 }
Todd Poynor07a9e292012-12-11 16:05:03 -0800602 return 0;
603}
604
605static struct notifier_block cpufreq_notifier_block = {
606 .notifier_call = cpufreq_interactive_notifier,
607};
608
Minsung Kimcce741d2013-02-25 23:48:04 +0900609static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
610{
611 const char *cp;
612 int i;
613 int ntokens = 1;
614 unsigned int *tokenized_data;
615
616 cp = buf;
617 while ((cp = strpbrk(cp + 1, " :")))
618 ntokens++;
619
620 if (!(ntokens & 0x1)) {
621 tokenized_data = ERR_PTR(-EINVAL);
622 goto err;
623 }
624
625 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
626 if (!tokenized_data) {
627 tokenized_data = ERR_PTR(-ENOMEM);
628 goto err;
629 }
630
631 cp = buf;
632 i = 0;
633 while (i < ntokens) {
634 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1) {
635 tokenized_data = ERR_PTR(-EINVAL);
636 goto err_kfree;
637 }
638
639 cp = strpbrk(cp, " :");
640 if (!cp)
641 break;
642 cp++;
643 }
644
645 if (i != ntokens) {
646 tokenized_data = ERR_PTR(-EINVAL);
647 goto err_kfree;
648 }
649
650 *num_tokens = ntokens;
651 return tokenized_data;
652
653err_kfree:
654 kfree(tokenized_data);
655err:
656 return tokenized_data;
657}
658
Todd Poynor21df1ca2012-11-14 11:41:21 -0800659static ssize_t show_target_loads(
Todd Poynor6ecca112012-11-28 17:58:17 -0800660 struct kobject *kobj, struct attribute *attr, char *buf)
661{
Todd Poynor21df1ca2012-11-14 11:41:21 -0800662 int i;
663 ssize_t ret = 0;
Todd Poynor8a42a032013-01-02 13:14:00 -0800664 unsigned long flags;
Todd Poynor21df1ca2012-11-14 11:41:21 -0800665
Todd Poynor8a42a032013-01-02 13:14:00 -0800666 spin_lock_irqsave(&target_loads_lock, flags);
Todd Poynor21df1ca2012-11-14 11:41:21 -0800667
668 for (i = 0; i < ntarget_loads; i++)
669 ret += sprintf(buf + ret, "%u%s", target_loads[i],
670 i & 0x1 ? ":" : " ");
671
672 ret += sprintf(buf + ret, "\n");
Todd Poynor8a42a032013-01-02 13:14:00 -0800673 spin_unlock_irqrestore(&target_loads_lock, flags);
Todd Poynor21df1ca2012-11-14 11:41:21 -0800674 return ret;
Todd Poynor6ecca112012-11-28 17:58:17 -0800675}
676
Todd Poynor21df1ca2012-11-14 11:41:21 -0800677static ssize_t store_target_loads(
Todd Poynor6ecca112012-11-28 17:58:17 -0800678 struct kobject *kobj, struct attribute *attr, const char *buf,
679 size_t count)
680{
Minsung Kimcce741d2013-02-25 23:48:04 +0900681 int ntokens;
Todd Poynor21df1ca2012-11-14 11:41:21 -0800682 unsigned int *new_target_loads = NULL;
Todd Poynor8a42a032013-01-02 13:14:00 -0800683 unsigned long flags;
Todd Poynor6ecca112012-11-28 17:58:17 -0800684
Minsung Kimcce741d2013-02-25 23:48:04 +0900685 new_target_loads = get_tokenized_data(buf, &ntokens);
686 if (IS_ERR(new_target_loads))
687 return PTR_RET(new_target_loads);
Todd Poynor21df1ca2012-11-14 11:41:21 -0800688
Todd Poynor8a42a032013-01-02 13:14:00 -0800689 spin_lock_irqsave(&target_loads_lock, flags);
Todd Poynor21df1ca2012-11-14 11:41:21 -0800690 if (target_loads != default_target_loads)
691 kfree(target_loads);
692 target_loads = new_target_loads;
693 ntarget_loads = ntokens;
Todd Poynor8a42a032013-01-02 13:14:00 -0800694 spin_unlock_irqrestore(&target_loads_lock, flags);
Todd Poynor6ecca112012-11-28 17:58:17 -0800695 return count;
696}
697
Todd Poynor21df1ca2012-11-14 11:41:21 -0800698static struct global_attr target_loads_attr =
699 __ATTR(target_loads, S_IRUGO | S_IWUSR,
700 show_target_loads, store_target_loads);
Todd Poynor6ecca112012-11-28 17:58:17 -0800701
Minsung Kimcce741d2013-02-25 23:48:04 +0900702static ssize_t show_above_hispeed_delay(
703 struct kobject *kobj, struct attribute *attr, char *buf)
704{
705 int i;
706 ssize_t ret = 0;
707 unsigned long flags;
708
709 spin_lock_irqsave(&above_hispeed_delay_lock, flags);
710
711 for (i = 0; i < nabove_hispeed_delay; i++)
712 ret += sprintf(buf + ret, "%u%s", above_hispeed_delay[i],
713 i & 0x1 ? ":" : " ");
714
715 ret += sprintf(buf + ret, "\n");
716 spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
717 return ret;
718}
719
720static ssize_t store_above_hispeed_delay(
721 struct kobject *kobj, struct attribute *attr, const char *buf,
722 size_t count)
723{
724 int ntokens;
725 unsigned int *new_above_hispeed_delay = NULL;
726 unsigned long flags;
727
728 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
729 if (IS_ERR(new_above_hispeed_delay))
730 return PTR_RET(new_above_hispeed_delay);
731
732 spin_lock_irqsave(&above_hispeed_delay_lock, flags);
733 if (above_hispeed_delay != default_above_hispeed_delay)
734 kfree(above_hispeed_delay);
735 above_hispeed_delay = new_above_hispeed_delay;
736 nabove_hispeed_delay = ntokens;
737 spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
738 return count;
739
740}
741
742static struct global_attr above_hispeed_delay_attr =
743 __ATTR(above_hispeed_delay, S_IRUGO | S_IWUSR,
744 show_above_hispeed_delay, store_above_hispeed_delay);
745
Mike Chan9d49b702010-06-22 11:26:45 -0700746static ssize_t show_hispeed_freq(struct kobject *kobj,
747 struct attribute *attr, char *buf)
748{
Todd Poynorf090ef02012-10-03 00:39:56 -0700749 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700750}
751
752static ssize_t store_hispeed_freq(struct kobject *kobj,
753 struct attribute *attr, const char *buf,
754 size_t count)
755{
756 int ret;
Todd Poynorf090ef02012-10-03 00:39:56 -0700757 long unsigned int val;
Mike Chan9d49b702010-06-22 11:26:45 -0700758
Todd Poynorf090ef02012-10-03 00:39:56 -0700759 ret = strict_strtoul(buf, 0, &val);
Mike Chan9d49b702010-06-22 11:26:45 -0700760 if (ret < 0)
761 return ret;
762 hispeed_freq = val;
763 return count;
764}
765
766static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
767 show_hispeed_freq, store_hispeed_freq);
768
769
770static ssize_t show_go_hispeed_load(struct kobject *kobj,
771 struct attribute *attr, char *buf)
772{
773 return sprintf(buf, "%lu\n", go_hispeed_load);
774}
775
776static ssize_t store_go_hispeed_load(struct kobject *kobj,
777 struct attribute *attr, const char *buf, size_t count)
778{
779 int ret;
780 unsigned long val;
781
782 ret = strict_strtoul(buf, 0, &val);
783 if (ret < 0)
784 return ret;
785 go_hispeed_load = val;
786 return count;
787}
788
789static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
790 show_go_hispeed_load, store_go_hispeed_load);
791
792static ssize_t show_min_sample_time(struct kobject *kobj,
793 struct attribute *attr, char *buf)
794{
795 return sprintf(buf, "%lu\n", min_sample_time);
796}
797
798static ssize_t store_min_sample_time(struct kobject *kobj,
799 struct attribute *attr, const char *buf, size_t count)
800{
801 int ret;
802 unsigned long val;
803
804 ret = strict_strtoul(buf, 0, &val);
805 if (ret < 0)
806 return ret;
807 min_sample_time = val;
808 return count;
809}
810
811static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
812 show_min_sample_time, store_min_sample_time);
813
814static ssize_t show_timer_rate(struct kobject *kobj,
815 struct attribute *attr, char *buf)
816{
817 return sprintf(buf, "%lu\n", timer_rate);
818}
819
820static ssize_t store_timer_rate(struct kobject *kobj,
821 struct attribute *attr, const char *buf, size_t count)
822{
823 int ret;
824 unsigned long val;
825
826 ret = strict_strtoul(buf, 0, &val);
827 if (ret < 0)
828 return ret;
829 timer_rate = val;
830 return count;
831}
832
833static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
834 show_timer_rate, store_timer_rate);
835
Todd Poynor264e2912012-12-18 17:50:10 -0800836static ssize_t show_timer_slack(
837 struct kobject *kobj, struct attribute *attr, char *buf)
838{
839 return sprintf(buf, "%d\n", timer_slack_val);
840}
841
842static ssize_t store_timer_slack(
843 struct kobject *kobj, struct attribute *attr, const char *buf,
844 size_t count)
845{
846 int ret;
847 unsigned long val;
848
849 ret = kstrtol(buf, 10, &val);
850 if (ret < 0)
851 return ret;
852
853 timer_slack_val = val;
854 return count;
855}
856
857define_one_global_rw(timer_slack);
858
Todd Poynor9fb15312012-04-23 20:42:41 -0700859static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
860 char *buf)
861{
862 return sprintf(buf, "%d\n", boost_val);
863}
864
865static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
866 const char *buf, size_t count)
867{
868 int ret;
869 unsigned long val;
870
871 ret = kstrtoul(buf, 0, &val);
872 if (ret < 0)
873 return ret;
874
875 boost_val = val;
876
Todd Poynor2e739a02012-05-03 00:16:55 -0700877 if (boost_val) {
878 trace_cpufreq_interactive_boost("on");
Todd Poynor9fb15312012-04-23 20:42:41 -0700879 cpufreq_interactive_boost();
Todd Poynor2e739a02012-05-03 00:16:55 -0700880 } else {
881 trace_cpufreq_interactive_unboost("off");
882 }
Todd Poynor9fb15312012-04-23 20:42:41 -0700883
884 return count;
885}
886
887define_one_global_rw(boost);
888
Todd Poynor2e739a02012-05-03 00:16:55 -0700889static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
890 const char *buf, size_t count)
891{
892 int ret;
893 unsigned long val;
894
895 ret = kstrtoul(buf, 0, &val);
896 if (ret < 0)
897 return ret;
898
Todd Poynor29835472012-12-14 17:31:19 -0800899 boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
Todd Poynor2e739a02012-05-03 00:16:55 -0700900 trace_cpufreq_interactive_boost("pulse");
901 cpufreq_interactive_boost();
902 return count;
903}
904
905static struct global_attr boostpulse =
906 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
907
Todd Poynor29835472012-12-14 17:31:19 -0800908static ssize_t show_boostpulse_duration(
909 struct kobject *kobj, struct attribute *attr, char *buf)
910{
911 return sprintf(buf, "%d\n", boostpulse_duration_val);
912}
913
914static ssize_t store_boostpulse_duration(
915 struct kobject *kobj, struct attribute *attr, const char *buf,
916 size_t count)
917{
918 int ret;
919 unsigned long val;
920
921 ret = kstrtoul(buf, 0, &val);
922 if (ret < 0)
923 return ret;
924
925 boostpulse_duration_val = val;
926 return count;
927}
928
929define_one_global_rw(boostpulse_duration);
930
Mike Chan9d49b702010-06-22 11:26:45 -0700931static struct attribute *interactive_attributes[] = {
Todd Poynor21df1ca2012-11-14 11:41:21 -0800932 &target_loads_attr.attr,
Minsung Kimcce741d2013-02-25 23:48:04 +0900933 &above_hispeed_delay_attr.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700934 &hispeed_freq_attr.attr,
935 &go_hispeed_load_attr.attr,
936 &min_sample_time_attr.attr,
937 &timer_rate_attr.attr,
Todd Poynor264e2912012-12-18 17:50:10 -0800938 &timer_slack.attr,
Todd Poynor9fb15312012-04-23 20:42:41 -0700939 &boost.attr,
Todd Poynor2e739a02012-05-03 00:16:55 -0700940 &boostpulse.attr,
Todd Poynor29835472012-12-14 17:31:19 -0800941 &boostpulse_duration.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700942 NULL,
943};
944
945static struct attribute_group interactive_attr_group = {
946 .attrs = interactive_attributes,
947 .name = "interactive",
948};
949
Sam Lefflera04e4412012-06-27 10:12:04 -0700950static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
951 unsigned long val,
952 void *data)
953{
954 switch (val) {
955 case IDLE_START:
956 cpufreq_interactive_idle_start();
957 break;
958 case IDLE_END:
959 cpufreq_interactive_idle_end();
960 break;
961 }
962
963 return 0;
964}
965
966static struct notifier_block cpufreq_interactive_idle_nb = {
967 .notifier_call = cpufreq_interactive_idle_notifier,
968};
969
Mike Chan9d49b702010-06-22 11:26:45 -0700970static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
971 unsigned int event)
972{
973 int rc;
974 unsigned int j;
975 struct cpufreq_interactive_cpuinfo *pcpu;
976 struct cpufreq_frequency_table *freq_table;
977
978 switch (event) {
979 case CPUFREQ_GOV_START:
980 if (!cpu_online(policy->cpu))
981 return -EINVAL;
982
Lianwei Wang9f8d5372013-01-07 14:15:51 +0800983 mutex_lock(&gov_lock);
984
Mike Chan9d49b702010-06-22 11:26:45 -0700985 freq_table =
986 cpufreq_frequency_get_table(policy->cpu);
Todd Poynor1913e0f2012-11-05 13:09:03 -0800987 if (!hispeed_freq)
988 hispeed_freq = policy->max;
Mike Chan9d49b702010-06-22 11:26:45 -0700989
990 for_each_cpu(j, policy->cpus) {
Todd Poynor264e2912012-12-18 17:50:10 -0800991 unsigned long expires;
992
Mike Chan9d49b702010-06-22 11:26:45 -0700993 pcpu = &per_cpu(cpuinfo, j);
994 pcpu->policy = policy;
995 pcpu->target_freq = policy->cur;
996 pcpu->freq_table = freq_table;
Todd Poynoraad27322012-04-26 21:41:40 -0700997 pcpu->floor_freq = pcpu->target_freq;
998 pcpu->floor_validate_time =
Todd Poynorc9d53b32012-12-07 20:08:45 -0800999 ktime_to_us(ktime_get());
Todd Poynor5a5aa702012-05-10 23:28:06 -07001000 pcpu->hispeed_validate_time =
Todd Poynorc9d53b32012-12-07 20:08:45 -08001001 pcpu->floor_validate_time;
Todd Poynor2ca56472012-12-20 15:51:00 -08001002 down_write(&pcpu->enable_sem);
Todd Poynor264e2912012-12-18 17:50:10 -08001003 expires = jiffies + usecs_to_jiffies(timer_rate);
1004 pcpu->cpu_timer.expires = expires;
Todd Poynor1913e0f2012-11-05 13:09:03 -08001005 add_timer_on(&pcpu->cpu_timer, j);
Todd Poynor264e2912012-12-18 17:50:10 -08001006 if (timer_slack_val >= 0) {
1007 expires += usecs_to_jiffies(timer_slack_val);
1008 pcpu->cpu_slack_timer.expires = expires;
1009 add_timer_on(&pcpu->cpu_slack_timer, j);
1010 }
Todd Poynor2ca56472012-12-20 15:51:00 -08001011 pcpu->governor_enabled = 1;
1012 up_write(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -07001013 }
1014
Mike Chan9d49b702010-06-22 11:26:45 -07001015 /*
1016 * Do not register the idle hook and create sysfs
1017 * entries if we have already done so.
1018 */
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001019 if (++active_count > 1) {
1020 mutex_unlock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -07001021 return 0;
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001022 }
Mike Chan9d49b702010-06-22 11:26:45 -07001023
1024 rc = sysfs_create_group(cpufreq_global_kobject,
1025 &interactive_attr_group);
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001026 if (rc) {
1027 mutex_unlock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -07001028 return rc;
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001029 }
Mike Chan9d49b702010-06-22 11:26:45 -07001030
Sam Lefflera04e4412012-06-27 10:12:04 -07001031 idle_notifier_register(&cpufreq_interactive_idle_nb);
Todd Poynor07a9e292012-12-11 16:05:03 -08001032 cpufreq_register_notifier(
1033 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001034 mutex_unlock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -07001035 break;
1036
1037 case CPUFREQ_GOV_STOP:
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001038 mutex_lock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -07001039 for_each_cpu(j, policy->cpus) {
1040 pcpu = &per_cpu(cpuinfo, j);
Todd Poynorbc819a22012-12-18 17:50:44 -08001041 down_write(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -07001042 pcpu->governor_enabled = 0;
Mike Chan9d49b702010-06-22 11:26:45 -07001043 del_timer_sync(&pcpu->cpu_timer);
Todd Poynor264e2912012-12-18 17:50:10 -08001044 del_timer_sync(&pcpu->cpu_slack_timer);
Todd Poynorbc819a22012-12-18 17:50:44 -08001045 up_write(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -07001046 }
1047
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001048 if (--active_count > 0) {
1049 mutex_unlock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -07001050 return 0;
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001051 }
Mike Chan9d49b702010-06-22 11:26:45 -07001052
Todd Poynor07a9e292012-12-11 16:05:03 -08001053 cpufreq_unregister_notifier(
1054 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Sam Lefflera04e4412012-06-27 10:12:04 -07001055 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -07001056 sysfs_remove_group(cpufreq_global_kobject,
1057 &interactive_attr_group);
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001058 mutex_unlock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -07001059
1060 break;
1061
1062 case CPUFREQ_GOV_LIMITS:
1063 if (policy->max < policy->cur)
1064 __cpufreq_driver_target(policy,
1065 policy->max, CPUFREQ_RELATION_H);
1066 else if (policy->min > policy->cur)
1067 __cpufreq_driver_target(policy,
1068 policy->min, CPUFREQ_RELATION_L);
1069 break;
1070 }
1071 return 0;
1072}
1073
Todd Poynor264e2912012-12-18 17:50:10 -08001074static void cpufreq_interactive_nop_timer(unsigned long data)
1075{
1076}
1077
Mike Chan9d49b702010-06-22 11:26:45 -07001078static int __init cpufreq_interactive_init(void)
1079{
1080 unsigned int i;
1081 struct cpufreq_interactive_cpuinfo *pcpu;
1082 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1083
Mike Chan9d49b702010-06-22 11:26:45 -07001084 /* Initalize per-cpu timers */
1085 for_each_possible_cpu(i) {
1086 pcpu = &per_cpu(cpuinfo, i);
Todd Poynor264e2912012-12-18 17:50:10 -08001087 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -07001088 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1089 pcpu->cpu_timer.data = i;
Todd Poynor264e2912012-12-18 17:50:10 -08001090 init_timer(&pcpu->cpu_slack_timer);
1091 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
Todd Poynor07a9e292012-12-11 16:05:03 -08001092 spin_lock_init(&pcpu->load_lock);
Todd Poynorbc819a22012-12-18 17:50:44 -08001093 init_rwsem(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -07001094 }
1095
Todd Poynor21df1ca2012-11-14 11:41:21 -08001096 spin_lock_init(&target_loads_lock);
Todd Poynor02442cf2012-07-16 17:07:15 -07001097 spin_lock_init(&speedchange_cpumask_lock);
Lianwei Wang9f8d5372013-01-07 14:15:51 +08001098 mutex_init(&gov_lock);
Todd Poynor02442cf2012-07-16 17:07:15 -07001099 speedchange_task =
1100 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1101 "cfinteractive");
1102 if (IS_ERR(speedchange_task))
1103 return PTR_ERR(speedchange_task);
Sam Lefflera13f4152012-06-27 12:55:56 -07001104
Todd Poynor02442cf2012-07-16 17:07:15 -07001105 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1106 get_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -07001107
Sam Lefflera13f4152012-06-27 12:55:56 -07001108 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor02442cf2012-07-16 17:07:15 -07001109 wake_up_process(speedchange_task);
Sam Lefflera13f4152012-06-27 12:55:56 -07001110
Mike Chan9d49b702010-06-22 11:26:45 -07001111 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chan9d49b702010-06-22 11:26:45 -07001112}
1113
1114#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1115fs_initcall(cpufreq_interactive_init);
1116#else
1117module_init(cpufreq_interactive_init);
1118#endif
1119
1120static void __exit cpufreq_interactive_exit(void)
1121{
1122 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor02442cf2012-07-16 17:07:15 -07001123 kthread_stop(speedchange_task);
1124 put_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -07001125}
1126
1127module_exit(cpufreq_interactive_exit);
1128
1129MODULE_AUTHOR("Mike Chan <mike@android.com>");
1130MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1131 "Latency sensitive workloads");
1132MODULE_LICENSE("GPL");