| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  drivers/cpufreq/cpufreq_conservative.c | 
|  | 3 | * | 
|  | 4 | *  Copyright (C)  2001 Russell King | 
|  | 5 | *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. | 
|  | 6 | *                      Jun Nakajima <jun.nakajima@intel.com> | 
| Alexander Clouter | 11a80a9c76 | 2009-02-13 19:01:01 +0000 | [diff] [blame] | 7 | *            (C)  2009 Alexander Clouter <alex@digriz.org.uk> | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 8 | * | 
|  | 9 | * This program is free software; you can redistribute it and/or modify | 
|  | 10 | * it under the terms of the GNU General Public License version 2 as | 
|  | 11 | * published by the Free Software Foundation. | 
|  | 12 | */ | 
|  | 13 |  | 
|  | 14 | #include <linux/kernel.h> | 
|  | 15 | #include <linux/module.h> | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 16 | #include <linux/init.h> | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 17 | #include <linux/cpufreq.h> | 
| Andrew Morton | 138a0128 | 2006-06-23 03:31:19 -0700 | [diff] [blame] | 18 | #include <linux/cpu.h> | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 19 | #include <linux/jiffies.h> | 
|  | 20 | #include <linux/kernel_stat.h> | 
| akpm@osdl.org | 3fc54d3 | 2006-01-13 15:54:22 -0800 | [diff] [blame] | 21 | #include <linux/mutex.h> | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 22 | #include <linux/hrtimer.h> | 
|  | 23 | #include <linux/tick.h> | 
|  | 24 | #include <linux/ktime.h> | 
|  | 25 | #include <linux/sched.h> | 
|  | 26 |  | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 27 | /* | 
|  | 28 | * dbs is used in this file as a shortform for demandbased switching | 
|  | 29 | * It helps to keep variable names smaller, simpler | 
|  | 30 | */ | 
|  | 31 |  | 
|  | 32 | #define DEF_FREQUENCY_UP_THRESHOLD		(80) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 33 | #define DEF_FREQUENCY_DOWN_THRESHOLD		(20) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 34 |  | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 35 | /* | 
|  | 36 | * The polling frequency of this governor depends on the capability of | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 37 | * the processor. Default polling frequency is 1000 times the transition | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 38 | * latency of the processor. The governor will work on any processor with | 
|  | 39 | * transition latency <= 10mS, using appropriate sampling | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 40 | * rate. | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 41 | * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) | 
|  | 42 | * this governor will not work. | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 43 | * All times here are in uS. | 
|  | 44 | */ | 
| Alexander Clouter | 2c906b3 | 2006-03-22 09:54:10 +0000 | [diff] [blame] | 45 | #define MIN_SAMPLING_RATE_RATIO			(2) | 
| Thomas Renninger | 112124a | 2009-02-04 11:55:12 +0100 | [diff] [blame] | 46 |  | 
| Thomas Renninger | cef9615 | 2009-04-22 13:48:29 +0200 | [diff] [blame] | 47 | static unsigned int min_sampling_rate; | 
|  | 48 |  | 
| Thomas Renninger | 112124a | 2009-02-04 11:55:12 +0100 | [diff] [blame] | 49 | #define LATENCY_MULTIPLIER			(1000) | 
| Thomas Renninger | cef9615 | 2009-04-22 13:48:29 +0200 | [diff] [blame] | 50 | #define MIN_LATENCY_MULTIPLIER			(100) | 
| Alexander Clouter | 2c906b3 | 2006-03-22 09:54:10 +0000 | [diff] [blame] | 51 | #define DEF_SAMPLING_DOWN_FACTOR		(1) | 
|  | 52 | #define MAX_SAMPLING_DOWN_FACTOR		(10) | 
| Thomas Renninger | 1c25624 | 2007-10-02 13:28:12 -0700 | [diff] [blame] | 53 | #define TRANSITION_LATENCY_LIMIT		(10 * 1000 * 1000) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 54 |  | 
| David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 55 | static void do_dbs_timer(struct work_struct *work); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 56 |  | 
|  | 57 | struct cpu_dbs_info_s { | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 58 | cputime64_t prev_cpu_idle; | 
|  | 59 | cputime64_t prev_cpu_wall; | 
|  | 60 | cputime64_t prev_cpu_nice; | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 61 | struct cpufreq_policy *cur_policy; | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 62 | struct delayed_work work; | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 63 | unsigned int down_skip; | 
|  | 64 | unsigned int requested_freq; | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 65 | int cpu; | 
|  | 66 | unsigned int enable:1; | 
| venkatesh.pallipadi@intel.com | ee88415 | 2009-07-02 17:08:33 -0700 | [diff] [blame] | 67 | /* | 
|  | 68 | * percpu mutex that serializes governor limit change with | 
|  | 69 | * do_dbs_timer invocation. We do not want do_dbs_timer to run | 
|  | 70 | * when user is changing the governor or limits. | 
|  | 71 | */ | 
|  | 72 | struct mutex timer_mutex; | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 73 | }; | 
| Tejun Heo | 245b2e7 | 2009-06-24 15:13:48 +0900 | [diff] [blame] | 74 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 75 |  | 
|  | 76 | static unsigned int dbs_enable;	/* number of CPUs using this policy */ | 
|  | 77 |  | 
| Venkatesh Pallipadi | 4ec223d | 2006-06-21 15:18:34 -0700 | [diff] [blame] | 78 | /* | 
| venkatesh.pallipadi@intel.com | 7d26e2d | 2009-07-02 17:08:30 -0700 | [diff] [blame] | 79 | * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on | 
| venkatesh.pallipadi@intel.com | ee88415 | 2009-07-02 17:08:33 -0700 | [diff] [blame] | 80 | * different CPUs. It protects dbs_enable in governor start/stop. | 
| Venkatesh Pallipadi | 4ec223d | 2006-06-21 15:18:34 -0700 | [diff] [blame] | 81 | */ | 
| Dave Jones | 9acef48 | 2009-01-18 01:39:51 -0500 | [diff] [blame] | 82 | static DEFINE_MUTEX(dbs_mutex); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 83 |  | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 84 | static struct workqueue_struct	*kconservative_wq; | 
|  | 85 |  | 
|  | 86 | static struct dbs_tuners { | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 87 | unsigned int sampling_rate; | 
|  | 88 | unsigned int sampling_down_factor; | 
|  | 89 | unsigned int up_threshold; | 
|  | 90 | unsigned int down_threshold; | 
|  | 91 | unsigned int ignore_nice; | 
|  | 92 | unsigned int freq_step; | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 93 | } dbs_tuners_ins = { | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 94 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, | 
|  | 95 | .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, | 
|  | 96 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, | 
|  | 97 | .ignore_nice = 0, | 
|  | 98 | .freq_step = 5, | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 99 | }; | 
|  | 100 |  | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 101 | static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, | 
|  | 102 | cputime64_t *wall) | 
| Dave Jones | dac1c1a | 2005-05-31 19:03:49 -0700 | [diff] [blame] | 103 | { | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 104 | cputime64_t idle_time; | 
|  | 105 | cputime64_t cur_wall_time; | 
|  | 106 | cputime64_t busy_time; | 
| Gautham R Shenoy | e08f5f5 | 2006-10-26 16:20:58 +0530 | [diff] [blame] | 107 |  | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 108 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); | 
|  | 109 | busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, | 
|  | 110 | kstat_cpu(cpu).cpustat.system); | 
| Gautham R Shenoy | e08f5f5 | 2006-10-26 16:20:58 +0530 | [diff] [blame] | 111 |  | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 112 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); | 
|  | 113 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); | 
|  | 114 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); | 
|  | 115 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); | 
| Gautham R Shenoy | e08f5f5 | 2006-10-26 16:20:58 +0530 | [diff] [blame] | 116 |  | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 117 | idle_time = cputime64_sub(cur_wall_time, busy_time); | 
|  | 118 | if (wall) | 
| Pallipadi, Venkatesh | 54c9a35 | 2009-11-11 16:50:29 -0800 | [diff] [blame] | 119 | *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 120 |  | 
| Pallipadi, Venkatesh | 54c9a35 | 2009-11-11 16:50:29 -0800 | [diff] [blame] | 121 | return (cputime64_t)jiffies_to_usecs(idle_time);; | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 122 | } | 
|  | 123 |  | 
|  | 124 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) | 
|  | 125 | { | 
|  | 126 | u64 idle_time = get_cpu_idle_time_us(cpu, wall); | 
|  | 127 |  | 
|  | 128 | if (idle_time == -1ULL) | 
|  | 129 | return get_cpu_idle_time_jiffy(cpu, wall); | 
|  | 130 |  | 
|  | 131 | return idle_time; | 
| Dave Jones | dac1c1a | 2005-05-31 19:03:49 -0700 | [diff] [blame] | 132 | } | 
|  | 133 |  | 
| Elias Oltmanns | a8d7c3b | 2007-10-22 09:50:13 +0200 | [diff] [blame] | 134 | /* keep track of frequency transitions */ | 
|  | 135 | static int | 
|  | 136 | dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | 
|  | 137 | void *data) | 
|  | 138 | { | 
|  | 139 | struct cpufreq_freqs *freq = data; | 
| Tejun Heo | 245b2e7 | 2009-06-24 15:13:48 +0900 | [diff] [blame] | 140 | struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info, | 
| Elias Oltmanns | a8d7c3b | 2007-10-22 09:50:13 +0200 | [diff] [blame] | 141 | freq->cpu); | 
|  | 142 |  | 
| Alexander Clouter | f407a08 | 2009-02-13 19:01:51 +0000 | [diff] [blame] | 143 | struct cpufreq_policy *policy; | 
|  | 144 |  | 
| Elias Oltmanns | a8d7c3b | 2007-10-22 09:50:13 +0200 | [diff] [blame] | 145 | if (!this_dbs_info->enable) | 
|  | 146 | return 0; | 
|  | 147 |  | 
| Alexander Clouter | f407a08 | 2009-02-13 19:01:51 +0000 | [diff] [blame] | 148 | policy = this_dbs_info->cur_policy; | 
|  | 149 |  | 
|  | 150 | /* | 
|  | 151 | * we only care if our internally tracked freq moves outside | 
|  | 152 | * the 'valid' ranges of freqency available to us otherwise | 
|  | 153 | * we do not change it | 
|  | 154 | */ | 
|  | 155 | if (this_dbs_info->requested_freq > policy->max | 
|  | 156 | || this_dbs_info->requested_freq < policy->min) | 
|  | 157 | this_dbs_info->requested_freq = freq->new; | 
| Elias Oltmanns | a8d7c3b | 2007-10-22 09:50:13 +0200 | [diff] [blame] | 158 |  | 
|  | 159 | return 0; | 
|  | 160 | } | 
|  | 161 |  | 
|  | 162 | static struct notifier_block dbs_cpufreq_notifier_block = { | 
|  | 163 | .notifier_call = dbs_cpufreq_notifier | 
|  | 164 | }; | 
|  | 165 |  | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 166 | /************************** sysfs interface ************************/ | 
| Thomas Renninger | 49b015c | 2009-10-01 19:49:28 +0200 | [diff] [blame] | 167 | static ssize_t show_sampling_rate_max(struct kobject *kobj, | 
|  | 168 | struct attribute *attr, char *buf) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 169 | { | 
| Thomas Renninger | 4f4d1ad | 2009-04-22 13:48:31 +0200 | [diff] [blame] | 170 | printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max " | 
|  | 171 | "sysfs file is deprecated - used by: %s\n", current->comm); | 
| Thomas Renninger | cef9615 | 2009-04-22 13:48:29 +0200 | [diff] [blame] | 172 | return sprintf(buf, "%u\n", -1U); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 173 | } | 
|  | 174 |  | 
| Thomas Renninger | 49b015c | 2009-10-01 19:49:28 +0200 | [diff] [blame] | 175 | static ssize_t show_sampling_rate_min(struct kobject *kobj, | 
|  | 176 | struct attribute *attr, char *buf) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 177 | { | 
| Thomas Renninger | cef9615 | 2009-04-22 13:48:29 +0200 | [diff] [blame] | 178 | return sprintf(buf, "%u\n", min_sampling_rate); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 179 | } | 
|  | 180 |  | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 181 | #define define_one_ro(_name)		\ | 
| Thomas Renninger | 49b015c | 2009-10-01 19:49:28 +0200 | [diff] [blame] | 182 | static struct global_attr _name =	\ | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 183 | __ATTR(_name, 0444, show_##_name, NULL) | 
|  | 184 |  | 
|  | 185 | define_one_ro(sampling_rate_max); | 
|  | 186 | define_one_ro(sampling_rate_min); | 
|  | 187 |  | 
|  | 188 | /* cpufreq_conservative Governor Tunables */ | 
|  | 189 | #define show_one(file_name, object)					\ | 
|  | 190 | static ssize_t show_##file_name						\ | 
| Thomas Renninger | 49b015c | 2009-10-01 19:49:28 +0200 | [diff] [blame] | 191 | (struct kobject *kobj, struct attribute *attr, char *buf)		\ | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 192 | {									\ | 
|  | 193 | return sprintf(buf, "%u\n", dbs_tuners_ins.object);		\ | 
|  | 194 | } | 
|  | 195 | show_one(sampling_rate, sampling_rate); | 
|  | 196 | show_one(sampling_down_factor, sampling_down_factor); | 
|  | 197 | show_one(up_threshold, up_threshold); | 
|  | 198 | show_one(down_threshold, down_threshold); | 
| Alexander Clouter | 001893c | 2005-12-01 01:09:25 -0800 | [diff] [blame] | 199 | show_one(ignore_nice_load, ignore_nice); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 200 | show_one(freq_step, freq_step); | 
|  | 201 |  | 
| Thomas Renninger | 49b015c | 2009-10-01 19:49:28 +0200 | [diff] [blame] | 202 | /*** delete after deprecation time ***/ | 
|  | 203 | #define DEPRECATION_MSG(file_name)					\ | 
|  | 204 | printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs "	\ | 
|  | 205 | "interface is deprecated - " #file_name "\n"); | 
|  | 206 |  | 
|  | 207 | #define show_one_old(file_name)						\ | 
|  | 208 | static ssize_t show_##file_name##_old					\ | 
|  | 209 | (struct cpufreq_policy *unused, char *buf)				\ | 
|  | 210 | {									\ | 
|  | 211 | printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs "	\ | 
|  | 212 | "interface is deprecated - " #file_name "\n");		\ | 
|  | 213 | return show_##file_name(NULL, NULL, buf);			\ | 
|  | 214 | } | 
|  | 215 | show_one_old(sampling_rate); | 
|  | 216 | show_one_old(sampling_down_factor); | 
|  | 217 | show_one_old(up_threshold); | 
|  | 218 | show_one_old(down_threshold); | 
|  | 219 | show_one_old(ignore_nice_load); | 
|  | 220 | show_one_old(freq_step); | 
|  | 221 | show_one_old(sampling_rate_min); | 
|  | 222 | show_one_old(sampling_rate_max); | 
|  | 223 |  | 
|  | 224 | #define define_one_ro_old(object, _name)	\ | 
|  | 225 | static struct freq_attr object =		\ | 
|  | 226 | __ATTR(_name, 0444, show_##_name##_old, NULL) | 
|  | 227 |  | 
|  | 228 | define_one_ro_old(sampling_rate_min_old, sampling_rate_min); | 
|  | 229 | define_one_ro_old(sampling_rate_max_old, sampling_rate_max); | 
|  | 230 |  | 
|  | 231 | /*** delete after deprecation time ***/ | 
|  | 232 |  | 
|  | 233 | static ssize_t store_sampling_down_factor(struct kobject *a, | 
|  | 234 | struct attribute *b, | 
|  | 235 | const char *buf, size_t count) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 236 | { | 
|  | 237 | unsigned int input; | 
|  | 238 | int ret; | 
| Dave Jones | 9acef48 | 2009-01-18 01:39:51 -0500 | [diff] [blame] | 239 | ret = sscanf(buf, "%u", &input); | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 240 |  | 
| Alexander Clouter | 2c906b3 | 2006-03-22 09:54:10 +0000 | [diff] [blame] | 241 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 242 | return -EINVAL; | 
|  | 243 |  | 
| akpm@osdl.org | 3fc54d3 | 2006-01-13 15:54:22 -0800 | [diff] [blame] | 244 | mutex_lock(&dbs_mutex); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 245 | dbs_tuners_ins.sampling_down_factor = input; | 
| akpm@osdl.org | 3fc54d3 | 2006-01-13 15:54:22 -0800 | [diff] [blame] | 246 | mutex_unlock(&dbs_mutex); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 247 |  | 
|  | 248 | return count; | 
|  | 249 | } | 
|  | 250 |  | 
| Thomas Renninger | 49b015c | 2009-10-01 19:49:28 +0200 | [diff] [blame] | 251 | static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, | 
|  | 252 | const char *buf, size_t count) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 253 | { | 
|  | 254 | unsigned int input; | 
|  | 255 | int ret; | 
| Dave Jones | 9acef48 | 2009-01-18 01:39:51 -0500 | [diff] [blame] | 256 | ret = sscanf(buf, "%u", &input); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 257 |  | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 258 | if (ret != 1) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 259 | return -EINVAL; | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 260 |  | 
|  | 261 | mutex_lock(&dbs_mutex); | 
| Thomas Renninger | cef9615 | 2009-04-22 13:48:29 +0200 | [diff] [blame] | 262 | dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); | 
| akpm@osdl.org | 3fc54d3 | 2006-01-13 15:54:22 -0800 | [diff] [blame] | 263 | mutex_unlock(&dbs_mutex); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 264 |  | 
|  | 265 | return count; | 
|  | 266 | } | 
|  | 267 |  | 
| Thomas Renninger | 49b015c | 2009-10-01 19:49:28 +0200 | [diff] [blame] | 268 | static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, | 
|  | 269 | const char *buf, size_t count) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 270 | { | 
|  | 271 | unsigned int input; | 
|  | 272 | int ret; | 
| Dave Jones | 9acef48 | 2009-01-18 01:39:51 -0500 | [diff] [blame] | 273 | ret = sscanf(buf, "%u", &input); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 274 |  | 
| akpm@osdl.org | 3fc54d3 | 2006-01-13 15:54:22 -0800 | [diff] [blame] | 275 | mutex_lock(&dbs_mutex); | 
| Dave Jones | 9acef48 | 2009-01-18 01:39:51 -0500 | [diff] [blame] | 276 | if (ret != 1 || input > 100 || | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 277 | input <= dbs_tuners_ins.down_threshold) { | 
| akpm@osdl.org | 3fc54d3 | 2006-01-13 15:54:22 -0800 | [diff] [blame] | 278 | mutex_unlock(&dbs_mutex); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 279 | return -EINVAL; | 
|  | 280 | } | 
|  | 281 |  | 
|  | 282 | dbs_tuners_ins.up_threshold = input; | 
| akpm@osdl.org | 3fc54d3 | 2006-01-13 15:54:22 -0800 | [diff] [blame] | 283 | mutex_unlock(&dbs_mutex); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 284 |  | 
|  | 285 | return count; | 
|  | 286 | } | 
|  | 287 |  | 
| Thomas Renninger | 49b015c | 2009-10-01 19:49:28 +0200 | [diff] [blame] | 288 | static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, | 
|  | 289 | const char *buf, size_t count) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 290 | { | 
|  | 291 | unsigned int input; | 
|  | 292 | int ret; | 
| Dave Jones | 9acef48 | 2009-01-18 01:39:51 -0500 | [diff] [blame] | 293 | ret = sscanf(buf, "%u", &input); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 294 |  | 
| akpm@osdl.org | 3fc54d3 | 2006-01-13 15:54:22 -0800 | [diff] [blame] | 295 | mutex_lock(&dbs_mutex); | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 296 | /* cannot be lower than 11 otherwise freq will not fall */ | 
|  | 297 | if (ret != 1 || input < 11 || input > 100 || | 
|  | 298 | input >= dbs_tuners_ins.up_threshold) { | 
| akpm@osdl.org | 3fc54d3 | 2006-01-13 15:54:22 -0800 | [diff] [blame] | 299 | mutex_unlock(&dbs_mutex); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 300 | return -EINVAL; | 
|  | 301 | } | 
|  | 302 |  | 
|  | 303 | dbs_tuners_ins.down_threshold = input; | 
| akpm@osdl.org | 3fc54d3 | 2006-01-13 15:54:22 -0800 | [diff] [blame] | 304 | mutex_unlock(&dbs_mutex); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 305 |  | 
|  | 306 | return count; | 
|  | 307 | } | 
|  | 308 |  | 
| Thomas Renninger | 49b015c | 2009-10-01 19:49:28 +0200 | [diff] [blame] | 309 | static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, | 
|  | 310 | const char *buf, size_t count) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 311 | { | 
|  | 312 | unsigned int input; | 
|  | 313 | int ret; | 
|  | 314 |  | 
|  | 315 | unsigned int j; | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 316 |  | 
|  | 317 | ret = sscanf(buf, "%u", &input); | 
|  | 318 | if (ret != 1) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 319 | return -EINVAL; | 
|  | 320 |  | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 321 | if (input > 1) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 322 | input = 1; | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 323 |  | 
| akpm@osdl.org | 3fc54d3 | 2006-01-13 15:54:22 -0800 | [diff] [blame] | 324 | mutex_lock(&dbs_mutex); | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 325 | if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ | 
| akpm@osdl.org | 3fc54d3 | 2006-01-13 15:54:22 -0800 | [diff] [blame] | 326 | mutex_unlock(&dbs_mutex); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 327 | return count; | 
|  | 328 | } | 
|  | 329 | dbs_tuners_ins.ignore_nice = input; | 
|  | 330 |  | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 331 | /* we need to re-evaluate prev_cpu_idle */ | 
| Dave Jones | dac1c1a | 2005-05-31 19:03:49 -0700 | [diff] [blame] | 332 | for_each_online_cpu(j) { | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 333 | struct cpu_dbs_info_s *dbs_info; | 
| Tejun Heo | 245b2e7 | 2009-06-24 15:13:48 +0900 | [diff] [blame] | 334 | dbs_info = &per_cpu(cs_cpu_dbs_info, j); | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 335 | dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | 
|  | 336 | &dbs_info->prev_cpu_wall); | 
|  | 337 | if (dbs_tuners_ins.ignore_nice) | 
|  | 338 | dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 339 | } | 
| akpm@osdl.org | 3fc54d3 | 2006-01-13 15:54:22 -0800 | [diff] [blame] | 340 | mutex_unlock(&dbs_mutex); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 341 |  | 
|  | 342 | return count; | 
|  | 343 | } | 
|  | 344 |  | 
| Thomas Renninger | 49b015c | 2009-10-01 19:49:28 +0200 | [diff] [blame] | 345 | static ssize_t store_freq_step(struct kobject *a, struct attribute *b, | 
|  | 346 | const char *buf, size_t count) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 347 | { | 
|  | 348 | unsigned int input; | 
|  | 349 | int ret; | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 350 | ret = sscanf(buf, "%u", &input); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 351 |  | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 352 | if (ret != 1) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 353 | return -EINVAL; | 
|  | 354 |  | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 355 | if (input > 100) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 356 | input = 100; | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 357 |  | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 358 | /* no need to test here if freq_step is zero as the user might actually | 
|  | 359 | * want this, they would be crazy though :) */ | 
| akpm@osdl.org | 3fc54d3 | 2006-01-13 15:54:22 -0800 | [diff] [blame] | 360 | mutex_lock(&dbs_mutex); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 361 | dbs_tuners_ins.freq_step = input; | 
| akpm@osdl.org | 3fc54d3 | 2006-01-13 15:54:22 -0800 | [diff] [blame] | 362 | mutex_unlock(&dbs_mutex); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 363 |  | 
|  | 364 | return count; | 
|  | 365 | } | 
|  | 366 |  | 
|  | 367 | #define define_one_rw(_name) \ | 
| Thomas Renninger | 49b015c | 2009-10-01 19:49:28 +0200 | [diff] [blame] | 368 | static struct global_attr _name = \ | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 369 | __ATTR(_name, 0644, show_##_name, store_##_name) | 
|  | 370 |  | 
|  | 371 | define_one_rw(sampling_rate); | 
|  | 372 | define_one_rw(sampling_down_factor); | 
|  | 373 | define_one_rw(up_threshold); | 
|  | 374 | define_one_rw(down_threshold); | 
| Alexander Clouter | 001893c | 2005-12-01 01:09:25 -0800 | [diff] [blame] | 375 | define_one_rw(ignore_nice_load); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 376 | define_one_rw(freq_step); | 
|  | 377 |  | 
| Dave Jones | 9acef48 | 2009-01-18 01:39:51 -0500 | [diff] [blame] | 378 | static struct attribute *dbs_attributes[] = { | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 379 | &sampling_rate_max.attr, | 
|  | 380 | &sampling_rate_min.attr, | 
|  | 381 | &sampling_rate.attr, | 
|  | 382 | &sampling_down_factor.attr, | 
|  | 383 | &up_threshold.attr, | 
|  | 384 | &down_threshold.attr, | 
| Alexander Clouter | 001893c | 2005-12-01 01:09:25 -0800 | [diff] [blame] | 385 | &ignore_nice_load.attr, | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 386 | &freq_step.attr, | 
|  | 387 | NULL | 
|  | 388 | }; | 
|  | 389 |  | 
|  | 390 | static struct attribute_group dbs_attr_group = { | 
|  | 391 | .attrs = dbs_attributes, | 
|  | 392 | .name = "conservative", | 
|  | 393 | }; | 
|  | 394 |  | 
| Thomas Renninger | 49b015c | 2009-10-01 19:49:28 +0200 | [diff] [blame] | 395 | /*** delete after deprecation time ***/ | 
|  | 396 |  | 
|  | 397 | #define write_one_old(file_name)					\ | 
|  | 398 | static ssize_t store_##file_name##_old					\ | 
|  | 399 | (struct cpufreq_policy *unused, const char *buf, size_t count)		\ | 
|  | 400 | {									\ | 
|  | 401 | printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs "	\ | 
|  | 402 | "interface is deprecated - " #file_name "\n");	\ | 
|  | 403 | return store_##file_name(NULL, NULL, buf, count);		\ | 
|  | 404 | } | 
|  | 405 | write_one_old(sampling_rate); | 
|  | 406 | write_one_old(sampling_down_factor); | 
|  | 407 | write_one_old(up_threshold); | 
|  | 408 | write_one_old(down_threshold); | 
|  | 409 | write_one_old(ignore_nice_load); | 
|  | 410 | write_one_old(freq_step); | 
|  | 411 |  | 
|  | 412 | #define define_one_rw_old(object, _name)	\ | 
|  | 413 | static struct freq_attr object =		\ | 
|  | 414 | __ATTR(_name, 0644, show_##_name##_old, store_##_name##_old) | 
|  | 415 |  | 
|  | 416 | define_one_rw_old(sampling_rate_old, sampling_rate); | 
|  | 417 | define_one_rw_old(sampling_down_factor_old, sampling_down_factor); | 
|  | 418 | define_one_rw_old(up_threshold_old, up_threshold); | 
|  | 419 | define_one_rw_old(down_threshold_old, down_threshold); | 
|  | 420 | define_one_rw_old(ignore_nice_load_old, ignore_nice_load); | 
|  | 421 | define_one_rw_old(freq_step_old, freq_step); | 
|  | 422 |  | 
|  | 423 | static struct attribute *dbs_attributes_old[] = { | 
|  | 424 | &sampling_rate_max_old.attr, | 
|  | 425 | &sampling_rate_min_old.attr, | 
|  | 426 | &sampling_rate_old.attr, | 
|  | 427 | &sampling_down_factor_old.attr, | 
|  | 428 | &up_threshold_old.attr, | 
|  | 429 | &down_threshold_old.attr, | 
|  | 430 | &ignore_nice_load_old.attr, | 
|  | 431 | &freq_step_old.attr, | 
|  | 432 | NULL | 
|  | 433 | }; | 
|  | 434 |  | 
|  | 435 | static struct attribute_group dbs_attr_group_old = { | 
|  | 436 | .attrs = dbs_attributes_old, | 
|  | 437 | .name = "conservative", | 
|  | 438 | }; | 
|  | 439 |  | 
|  | 440 | /*** delete after deprecation time ***/ | 
|  | 441 |  | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 442 | /************************** sysfs end ************************/ | 
|  | 443 |  | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 444 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 445 | { | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 446 | unsigned int load = 0; | 
| Dave Jones | f068c04 | 2008-07-30 12:59:56 -0400 | [diff] [blame] | 447 | unsigned int freq_target; | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 448 |  | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 449 | struct cpufreq_policy *policy; | 
|  | 450 | unsigned int j; | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 451 |  | 
| Alexander Clouter | 08a28e2 | 2006-03-22 09:59:16 +0000 | [diff] [blame] | 452 | policy = this_dbs_info->cur_policy; | 
|  | 453 |  | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 454 | /* | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 455 | * Every sampling_rate, we check, if current idle time is less | 
|  | 456 | * than 20% (default), then we try to increase frequency | 
|  | 457 | * Every sampling_rate*sampling_down_factor, we check, if current | 
|  | 458 | * idle time is more than 80%, then we try to decrease frequency | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 459 | * | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 460 | * Any frequency increase takes it to the maximum frequency. | 
|  | 461 | * Frequency reduction happens at minimum steps of | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 462 | * 5% (default) of maximum frequency | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 463 | */ | 
|  | 464 |  | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 465 | /* Get Absolute Load */ | 
|  | 466 | for_each_cpu(j, policy->cpus) { | 
|  | 467 | struct cpu_dbs_info_s *j_dbs_info; | 
|  | 468 | cputime64_t cur_wall_time, cur_idle_time; | 
|  | 469 | unsigned int idle_time, wall_time; | 
|  | 470 |  | 
| Tejun Heo | 245b2e7 | 2009-06-24 15:13:48 +0900 | [diff] [blame] | 471 | j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 472 |  | 
|  | 473 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); | 
|  | 474 |  | 
|  | 475 | wall_time = (unsigned int) cputime64_sub(cur_wall_time, | 
|  | 476 | j_dbs_info->prev_cpu_wall); | 
|  | 477 | j_dbs_info->prev_cpu_wall = cur_wall_time; | 
|  | 478 |  | 
|  | 479 | idle_time = (unsigned int) cputime64_sub(cur_idle_time, | 
|  | 480 | j_dbs_info->prev_cpu_idle); | 
|  | 481 | j_dbs_info->prev_cpu_idle = cur_idle_time; | 
|  | 482 |  | 
|  | 483 | if (dbs_tuners_ins.ignore_nice) { | 
|  | 484 | cputime64_t cur_nice; | 
|  | 485 | unsigned long cur_nice_jiffies; | 
|  | 486 |  | 
|  | 487 | cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, | 
|  | 488 | j_dbs_info->prev_cpu_nice); | 
|  | 489 | /* | 
|  | 490 | * Assumption: nice time between sampling periods will | 
|  | 491 | * be less than 2^32 jiffies for 32 bit sys | 
|  | 492 | */ | 
|  | 493 | cur_nice_jiffies = (unsigned long) | 
|  | 494 | cputime64_to_jiffies64(cur_nice); | 
|  | 495 |  | 
|  | 496 | j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; | 
|  | 497 | idle_time += jiffies_to_usecs(cur_nice_jiffies); | 
|  | 498 | } | 
|  | 499 |  | 
|  | 500 | if (unlikely(!wall_time || wall_time < idle_time)) | 
|  | 501 | continue; | 
|  | 502 |  | 
|  | 503 | load = 100 * (wall_time - idle_time) / wall_time; | 
|  | 504 | } | 
|  | 505 |  | 
|  | 506 | /* | 
|  | 507 | * break out if we 'cannot' reduce the speed as the user might | 
|  | 508 | * want freq_step to be zero | 
|  | 509 | */ | 
|  | 510 | if (dbs_tuners_ins.freq_step == 0) | 
|  | 511 | return; | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 512 |  | 
| Alexander Clouter | 08a28e2 | 2006-03-22 09:59:16 +0000 | [diff] [blame] | 513 | /* Check for frequency increase */ | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 514 | if (load > dbs_tuners_ins.up_threshold) { | 
| Alexander Clouter | a159b82 | 2006-03-22 10:00:18 +0000 | [diff] [blame] | 515 | this_dbs_info->down_skip = 0; | 
| Dave Jones | 790d76f | 2005-05-31 19:03:49 -0700 | [diff] [blame] | 516 |  | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 517 | /* if we are already at full speed then break out early */ | 
| Alexander Clouter | a159b82 | 2006-03-22 10:00:18 +0000 | [diff] [blame] | 518 | if (this_dbs_info->requested_freq == policy->max) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 519 | return; | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 520 |  | 
| Dave Jones | f068c04 | 2008-07-30 12:59:56 -0400 | [diff] [blame] | 521 | freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 522 |  | 
|  | 523 | /* max freq cannot be less than 100. But who knows.... */ | 
| Dave Jones | f068c04 | 2008-07-30 12:59:56 -0400 | [diff] [blame] | 524 | if (unlikely(freq_target == 0)) | 
|  | 525 | freq_target = 5; | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 526 |  | 
| Dave Jones | f068c04 | 2008-07-30 12:59:56 -0400 | [diff] [blame] | 527 | this_dbs_info->requested_freq += freq_target; | 
| Alexander Clouter | a159b82 | 2006-03-22 10:00:18 +0000 | [diff] [blame] | 528 | if (this_dbs_info->requested_freq > policy->max) | 
|  | 529 | this_dbs_info->requested_freq = policy->max; | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 530 |  | 
| Alexander Clouter | a159b82 | 2006-03-22 10:00:18 +0000 | [diff] [blame] | 531 | __cpufreq_driver_target(policy, this_dbs_info->requested_freq, | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 532 | CPUFREQ_RELATION_H); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 533 | return; | 
|  | 534 | } | 
|  | 535 |  | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 536 | /* | 
|  | 537 | * The optimal frequency is the frequency that is the lowest that | 
|  | 538 | * can support the current CPU usage without triggering the up | 
|  | 539 | * policy. To be safe, we focus 10 points under the threshold. | 
|  | 540 | */ | 
|  | 541 | if (load < (dbs_tuners_ins.down_threshold - 10)) { | 
| Dave Jones | f068c04 | 2008-07-30 12:59:56 -0400 | [diff] [blame] | 542 | freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 543 |  | 
| Dave Jones | f068c04 | 2008-07-30 12:59:56 -0400 | [diff] [blame] | 544 | this_dbs_info->requested_freq -= freq_target; | 
| Alexander Clouter | a159b82 | 2006-03-22 10:00:18 +0000 | [diff] [blame] | 545 | if (this_dbs_info->requested_freq < policy->min) | 
|  | 546 | this_dbs_info->requested_freq = policy->min; | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 547 |  | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 548 | /* | 
|  | 549 | * if we cannot reduce the frequency anymore, break out early | 
|  | 550 | */ | 
|  | 551 | if (policy->cur == policy->min) | 
|  | 552 | return; | 
|  | 553 |  | 
| Alexander Clouter | a159b82 | 2006-03-22 10:00:18 +0000 | [diff] [blame] | 554 | __cpufreq_driver_target(policy, this_dbs_info->requested_freq, | 
| Alexander Clouter | 2c906b3 | 2006-03-22 09:54:10 +0000 | [diff] [blame] | 555 | CPUFREQ_RELATION_H); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 556 | return; | 
|  | 557 | } | 
|  | 558 | } | 
|  | 559 |  | 
| David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 560 | static void do_dbs_timer(struct work_struct *work) | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 561 | { | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 562 | struct cpu_dbs_info_s *dbs_info = | 
|  | 563 | container_of(work, struct cpu_dbs_info_s, work.work); | 
|  | 564 | unsigned int cpu = dbs_info->cpu; | 
|  | 565 |  | 
|  | 566 | /* We want all CPUs to do sampling nearly on same jiffy */ | 
|  | 567 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 
|  | 568 |  | 
|  | 569 | delay -= jiffies % delay; | 
|  | 570 |  | 
| venkatesh.pallipadi@intel.com | ee88415 | 2009-07-02 17:08:33 -0700 | [diff] [blame] | 571 | mutex_lock(&dbs_info->timer_mutex); | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 572 |  | 
|  | 573 | dbs_check_cpu(dbs_info); | 
|  | 574 |  | 
|  | 575 | queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); | 
| venkatesh.pallipadi@intel.com | ee88415 | 2009-07-02 17:08:33 -0700 | [diff] [blame] | 576 | mutex_unlock(&dbs_info->timer_mutex); | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 577 | } | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 578 |  | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 579 | static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 580 | { | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 581 | /* We want all CPUs to do sampling nearly on same jiffy */ | 
|  | 582 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 
|  | 583 | delay -= jiffies % delay; | 
|  | 584 |  | 
|  | 585 | dbs_info->enable = 1; | 
|  | 586 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); | 
|  | 587 | queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, | 
|  | 588 | delay); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 589 | } | 
|  | 590 |  | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 591 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 592 | { | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 593 | dbs_info->enable = 0; | 
| Mathieu Desnoyers | b253d2b | 2009-05-17 10:29:33 -0400 | [diff] [blame] | 594 | cancel_delayed_work_sync(&dbs_info->work); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 595 | } | 
|  | 596 |  | 
|  | 597 | static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | 
|  | 598 | unsigned int event) | 
|  | 599 | { | 
|  | 600 | unsigned int cpu = policy->cpu; | 
|  | 601 | struct cpu_dbs_info_s *this_dbs_info; | 
|  | 602 | unsigned int j; | 
| Jeff Garzik | 914f7c3 | 2006-10-20 14:31:00 -0700 | [diff] [blame] | 603 | int rc; | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 604 |  | 
| Tejun Heo | 245b2e7 | 2009-06-24 15:13:48 +0900 | [diff] [blame] | 605 | this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 606 |  | 
|  | 607 | switch (event) { | 
|  | 608 | case CPUFREQ_GOV_START: | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 609 | if ((!cpu_online(cpu)) || (!policy->cur)) | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 610 | return -EINVAL; | 
|  | 611 |  | 
| akpm@osdl.org | 3fc54d3 | 2006-01-13 15:54:22 -0800 | [diff] [blame] | 612 | mutex_lock(&dbs_mutex); | 
| Jeff Garzik | 914f7c3 | 2006-10-20 14:31:00 -0700 | [diff] [blame] | 613 |  | 
| Thomas Renninger | 49b015c | 2009-10-01 19:49:28 +0200 | [diff] [blame] | 614 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old); | 
| Jeff Garzik | 914f7c3 | 2006-10-20 14:31:00 -0700 | [diff] [blame] | 615 | if (rc) { | 
|  | 616 | mutex_unlock(&dbs_mutex); | 
|  | 617 | return rc; | 
|  | 618 | } | 
|  | 619 |  | 
| Rusty Russell | 835481d | 2009-01-04 05:18:06 -0800 | [diff] [blame] | 620 | for_each_cpu(j, policy->cpus) { | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 621 | struct cpu_dbs_info_s *j_dbs_info; | 
| Tejun Heo | 245b2e7 | 2009-06-24 15:13:48 +0900 | [diff] [blame] | 622 | j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 623 | j_dbs_info->cur_policy = policy; | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 624 |  | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 625 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | 
|  | 626 | &j_dbs_info->prev_cpu_wall); | 
|  | 627 | if (dbs_tuners_ins.ignore_nice) { | 
|  | 628 | j_dbs_info->prev_cpu_nice = | 
|  | 629 | kstat_cpu(j).cpustat.nice; | 
|  | 630 | } | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 631 | } | 
| Alexander Clouter | a159b82 | 2006-03-22 10:00:18 +0000 | [diff] [blame] | 632 | this_dbs_info->down_skip = 0; | 
|  | 633 | this_dbs_info->requested_freq = policy->cur; | 
| Jeff Garzik | 914f7c3 | 2006-10-20 14:31:00 -0700 | [diff] [blame] | 634 |  | 
| venkatesh.pallipadi@intel.com | ee88415 | 2009-07-02 17:08:33 -0700 | [diff] [blame] | 635 | mutex_init(&this_dbs_info->timer_mutex); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 636 | dbs_enable++; | 
|  | 637 | /* | 
|  | 638 | * Start the timerschedule work, when this governor | 
|  | 639 | * is used for first time | 
|  | 640 | */ | 
|  | 641 | if (dbs_enable == 1) { | 
|  | 642 | unsigned int latency; | 
|  | 643 | /* policy latency is in nS. Convert it to uS first */ | 
| Alexander Clouter | 2c906b3 | 2006-03-22 09:54:10 +0000 | [diff] [blame] | 644 | latency = policy->cpuinfo.transition_latency / 1000; | 
|  | 645 | if (latency == 0) | 
|  | 646 | latency = 1; | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 647 |  | 
| Thomas Renninger | 49b015c | 2009-10-01 19:49:28 +0200 | [diff] [blame] | 648 | rc = sysfs_create_group(cpufreq_global_kobject, | 
|  | 649 | &dbs_attr_group); | 
|  | 650 | if (rc) { | 
|  | 651 | mutex_unlock(&dbs_mutex); | 
|  | 652 | return rc; | 
|  | 653 | } | 
|  | 654 |  | 
| Thomas Renninger | cef9615 | 2009-04-22 13:48:29 +0200 | [diff] [blame] | 655 | /* | 
|  | 656 | * conservative does not implement micro like ondemand | 
|  | 657 | * governor, thus we are bound to jiffes/HZ | 
|  | 658 | */ | 
|  | 659 | min_sampling_rate = | 
|  | 660 | MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); | 
|  | 661 | /* Bring kernel and HW constraints together */ | 
|  | 662 | min_sampling_rate = max(min_sampling_rate, | 
|  | 663 | MIN_LATENCY_MULTIPLIER * latency); | 
|  | 664 | dbs_tuners_ins.sampling_rate = | 
|  | 665 | max(min_sampling_rate, | 
|  | 666 | latency * LATENCY_MULTIPLIER); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 667 |  | 
| Elias Oltmanns | a8d7c3b | 2007-10-22 09:50:13 +0200 | [diff] [blame] | 668 | cpufreq_register_notifier( | 
|  | 669 | &dbs_cpufreq_notifier_block, | 
|  | 670 | CPUFREQ_TRANSITION_NOTIFIER); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 671 | } | 
| akpm@osdl.org | 3fc54d3 | 2006-01-13 15:54:22 -0800 | [diff] [blame] | 672 | mutex_unlock(&dbs_mutex); | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 673 |  | 
| venkatesh.pallipadi@intel.com | 7d26e2d | 2009-07-02 17:08:30 -0700 | [diff] [blame] | 674 | dbs_timer_init(this_dbs_info); | 
|  | 675 |  | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 676 | break; | 
|  | 677 |  | 
|  | 678 | case CPUFREQ_GOV_STOP: | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 679 | dbs_timer_exit(this_dbs_info); | 
| venkatesh.pallipadi@intel.com | 7d26e2d | 2009-07-02 17:08:30 -0700 | [diff] [blame] | 680 |  | 
|  | 681 | mutex_lock(&dbs_mutex); | 
| Thomas Renninger | 49b015c | 2009-10-01 19:49:28 +0200 | [diff] [blame] | 682 | sysfs_remove_group(&policy->kobj, &dbs_attr_group_old); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 683 | dbs_enable--; | 
| venkatesh.pallipadi@intel.com | ee88415 | 2009-07-02 17:08:33 -0700 | [diff] [blame] | 684 | mutex_destroy(&this_dbs_info->timer_mutex); | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 685 |  | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 686 | /* | 
|  | 687 | * Stop the timerschedule work, when this governor | 
|  | 688 | * is used for first time | 
|  | 689 | */ | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 690 | if (dbs_enable == 0) | 
| Elias Oltmanns | a8d7c3b | 2007-10-22 09:50:13 +0200 | [diff] [blame] | 691 | cpufreq_unregister_notifier( | 
|  | 692 | &dbs_cpufreq_notifier_block, | 
|  | 693 | CPUFREQ_TRANSITION_NOTIFIER); | 
| Elias Oltmanns | a8d7c3b | 2007-10-22 09:50:13 +0200 | [diff] [blame] | 694 |  | 
| akpm@osdl.org | 3fc54d3 | 2006-01-13 15:54:22 -0800 | [diff] [blame] | 695 | mutex_unlock(&dbs_mutex); | 
| Thomas Renninger | 49b015c | 2009-10-01 19:49:28 +0200 | [diff] [blame] | 696 | if (!dbs_enable) | 
|  | 697 | sysfs_remove_group(cpufreq_global_kobject, | 
|  | 698 | &dbs_attr_group); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 699 |  | 
|  | 700 | break; | 
|  | 701 |  | 
|  | 702 | case CPUFREQ_GOV_LIMITS: | 
| venkatesh.pallipadi@intel.com | ee88415 | 2009-07-02 17:08:33 -0700 | [diff] [blame] | 703 | mutex_lock(&this_dbs_info->timer_mutex); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 704 | if (policy->max < this_dbs_info->cur_policy->cur) | 
|  | 705 | __cpufreq_driver_target( | 
|  | 706 | this_dbs_info->cur_policy, | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 707 | policy->max, CPUFREQ_RELATION_H); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 708 | else if (policy->min > this_dbs_info->cur_policy->cur) | 
|  | 709 | __cpufreq_driver_target( | 
|  | 710 | this_dbs_info->cur_policy, | 
| Dave Jones | 18a7247 | 2007-10-22 16:49:09 -0400 | [diff] [blame] | 711 | policy->min, CPUFREQ_RELATION_L); | 
| venkatesh.pallipadi@intel.com | ee88415 | 2009-07-02 17:08:33 -0700 | [diff] [blame] | 712 | mutex_unlock(&this_dbs_info->timer_mutex); | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 713 |  | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 714 | break; | 
|  | 715 | } | 
|  | 716 | return 0; | 
|  | 717 | } | 
|  | 718 |  | 
| Sven Wegener | c4d14bc | 2008-09-20 16:50:08 +0200 | [diff] [blame] | 719 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE | 
|  | 720 | static | 
|  | 721 | #endif | 
| Thomas Renninger | 1c25624 | 2007-10-02 13:28:12 -0700 | [diff] [blame] | 722 | struct cpufreq_governor cpufreq_gov_conservative = { | 
|  | 723 | .name			= "conservative", | 
|  | 724 | .governor		= cpufreq_governor_dbs, | 
|  | 725 | .max_transition_latency	= TRANSITION_LATENCY_LIMIT, | 
|  | 726 | .owner			= THIS_MODULE, | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 727 | }; | 
|  | 728 |  | 
|  | 729 | static int __init cpufreq_gov_dbs_init(void) | 
|  | 730 | { | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 731 | int err; | 
|  | 732 |  | 
|  | 733 | kconservative_wq = create_workqueue("kconservative"); | 
|  | 734 | if (!kconservative_wq) { | 
|  | 735 | printk(KERN_ERR "Creation of kconservative failed\n"); | 
|  | 736 | return -EFAULT; | 
|  | 737 | } | 
|  | 738 |  | 
|  | 739 | err = cpufreq_register_governor(&cpufreq_gov_conservative); | 
|  | 740 | if (err) | 
|  | 741 | destroy_workqueue(kconservative_wq); | 
|  | 742 |  | 
|  | 743 | return err; | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 744 | } | 
|  | 745 |  | 
|  | 746 | static void __exit cpufreq_gov_dbs_exit(void) | 
|  | 747 | { | 
| Thomas Renninger | 1c25624 | 2007-10-02 13:28:12 -0700 | [diff] [blame] | 748 | cpufreq_unregister_governor(&cpufreq_gov_conservative); | 
| Alexander Clouter | 8e677ce | 2009-02-13 19:02:34 +0000 | [diff] [blame] | 749 | destroy_workqueue(kconservative_wq); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 750 | } | 
|  | 751 |  | 
|  | 752 |  | 
| Alexander Clouter | 11a80a9c76 | 2009-02-13 19:01:01 +0000 | [diff] [blame] | 753 | MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>"); | 
| Dave Jones | 9acef48 | 2009-01-18 01:39:51 -0500 | [diff] [blame] | 754 | MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for " | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 755 | "Low Latency Frequency Transition capable processors " | 
|  | 756 | "optimised for use in a battery environment"); | 
| Dave Jones | 9acef48 | 2009-01-18 01:39:51 -0500 | [diff] [blame] | 757 | MODULE_LICENSE("GPL"); | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 758 |  | 
| Johannes Weiner | 6915719 | 2008-01-17 15:21:08 -0800 | [diff] [blame] | 759 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE | 
|  | 760 | fs_initcall(cpufreq_gov_dbs_init); | 
|  | 761 | #else | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 762 | module_init(cpufreq_gov_dbs_init); | 
| Johannes Weiner | 6915719 | 2008-01-17 15:21:08 -0800 | [diff] [blame] | 763 | #endif | 
| Dave Jones | b917083 | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 764 | module_exit(cpufreq_gov_dbs_exit); |