| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1 | /* | 
 | 2 |  * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH) | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 
 | 5 |  * | 
 | 6 |  *  Interactivity improvements by Mike Galbraith | 
 | 7 |  *  (C) 2007 Mike Galbraith <efault@gmx.de> | 
 | 8 |  * | 
 | 9 |  *  Various enhancements by Dmitry Adamushko. | 
 | 10 |  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com> | 
 | 11 |  * | 
 | 12 |  *  Group scheduling enhancements by Srivatsa Vaddagiri | 
 | 13 |  *  Copyright IBM Corporation, 2007 | 
 | 14 |  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> | 
 | 15 |  * | 
 | 16 |  *  Scaled math optimizations by Thomas Gleixner | 
 | 17 |  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 18 |  * | 
 | 19 |  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra | 
 | 20 |  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 21 |  */ | 
 | 22 |  | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 23 | #include <linux/latencytop.h> | 
| Christian Ehrhardt | 1983a92 | 2009-11-30 12:16:47 +0100 | [diff] [blame] | 24 | #include <linux/sched.h> | 
| Sisir Koppaka | 3436ae1 | 2011-03-26 18:22:55 +0530 | [diff] [blame] | 25 | #include <linux/cpumask.h> | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 26 | #include <linux/slab.h> | 
 | 27 | #include <linux/profile.h> | 
 | 28 | #include <linux/interrupt.h> | 
 | 29 |  | 
 | 30 | #include <trace/events/sched.h> | 
 | 31 |  | 
 | 32 | #include "sched.h" | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 33 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 34 | /* | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 35 |  * Targeted preemption latency for CPU-bound tasks: | 
| Takuya Yoshikawa | 864616e | 2010-10-14 16:09:13 +0900 | [diff] [blame] | 36 |  * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 37 |  * | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 38 |  * NOTE: this latency value is not the same as the concept of | 
| Ingo Molnar | d274a4c | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 39 |  * 'timeslice length' - timeslices in CFS are of variable length | 
 | 40 |  * and have no persistent notion like in traditional, time-slice | 
 | 41 |  * based scheduling concepts. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 42 |  * | 
| Ingo Molnar | d274a4c | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 43 |  * (to see the precise effective timeslice length of your workload, | 
 | 44 |  *  run vmstat and monitor the context-switches (cs) field) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 45 |  */ | 
| Mike Galbraith | 2140692 | 2010-03-11 17:17:15 +0100 | [diff] [blame] | 46 | unsigned int sysctl_sched_latency = 6000000ULL; | 
 | 47 | unsigned int normalized_sysctl_sched_latency = 6000000ULL; | 
| Ingo Molnar | 2bd8e6d | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 48 |  | 
 | 49 | /* | 
| Christian Ehrhardt | 1983a92 | 2009-11-30 12:16:47 +0100 | [diff] [blame] | 50 |  * The initial- and re-scaling of tunables is configurable | 
 | 51 |  * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) | 
 | 52 |  * | 
 | 53 |  * Options are: | 
 | 54 |  * SCHED_TUNABLESCALING_NONE - unscaled, always *1 | 
 | 55 |  * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) | 
 | 56 |  * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus | 
 | 57 |  */ | 
 | 58 | enum sched_tunable_scaling sysctl_sched_tunable_scaling | 
 | 59 | 	= SCHED_TUNABLESCALING_LOG; | 
 | 60 |  | 
 | 61 | /* | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 62 |  * Minimal preemption granularity for CPU-bound tasks: | 
| Takuya Yoshikawa | 864616e | 2010-10-14 16:09:13 +0900 | [diff] [blame] | 63 |  * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 64 |  */ | 
| Ingo Molnar | 0bf377b | 2010-09-12 08:14:52 +0200 | [diff] [blame] | 65 | unsigned int sysctl_sched_min_granularity = 750000ULL; | 
 | 66 | unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 67 |  | 
 | 68 | /* | 
 | 69 |  * is kept at sysctl_sched_latency / sysctl_sched_min_granularity | 
 | 70 |  */ | 
| Ingo Molnar | 0bf377b | 2010-09-12 08:14:52 +0200 | [diff] [blame] | 71 | static unsigned int sched_nr_latency = 8; | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 72 |  | 
 | 73 | /* | 
| Mike Galbraith | 2bba22c | 2009-09-09 15:41:37 +0200 | [diff] [blame] | 74 |  * After fork, child runs first. If set to 0 (default) then | 
| Ingo Molnar | 2bd8e6d | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 75 |  * parent will (try to) run first. | 
 | 76 |  */ | 
| Mike Galbraith | 2bba22c | 2009-09-09 15:41:37 +0200 | [diff] [blame] | 77 | unsigned int sysctl_sched_child_runs_first __read_mostly; | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 78 |  | 
 | 79 | /* | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 80 |  * SCHED_OTHER wake-up granularity. | 
| Mike Galbraith | 172e082 | 2009-09-09 15:41:37 +0200 | [diff] [blame] | 81 |  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 82 |  * | 
 | 83 |  * This option delays the preemption effects of decoupled workloads | 
 | 84 |  * and reduces their over-scheduling. Synchronous workloads will still | 
 | 85 |  * have immediate wakeup/sleep latencies. | 
 | 86 |  */ | 
| Mike Galbraith | 172e082 | 2009-09-09 15:41:37 +0200 | [diff] [blame] | 87 | unsigned int sysctl_sched_wakeup_granularity = 1000000UL; | 
| Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 88 | unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 89 |  | 
| Ingo Molnar | da84d96 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 90 | const_debug unsigned int sysctl_sched_migration_cost = 500000UL; | 
 | 91 |  | 
| Paul Turner | a7a4f8a | 2010-11-15 15:47:06 -0800 | [diff] [blame] | 92 | /* | 
 | 93 |  * The exponential sliding  window over which load is averaged for shares | 
 | 94 |  * distribution. | 
 | 95 |  * (default: 10msec) | 
 | 96 |  */ | 
 | 97 | unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL; | 
 | 98 |  | 
| Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 99 | #ifdef CONFIG_CFS_BANDWIDTH | 
 | 100 | /* | 
 | 101 |  * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool | 
 | 102 |  * each time a cfs_rq requests quota. | 
 | 103 |  * | 
 | 104 |  * Note: in the case that the slice exceeds the runtime remaining (either due | 
 | 105 |  * to consumption or the quota being specified to be smaller than the slice) | 
 | 106 |  * we will always only issue the remaining available time. | 
 | 107 |  * | 
 | 108 |  * default: 5 msec, units: microseconds | 
 | 109 |   */ | 
 | 110 | unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; | 
 | 111 | #endif | 
 | 112 |  | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 113 | /* | 
 | 114 |  * Increase the granularity value when there are more CPUs, | 
 | 115 |  * because with more CPUs the 'effective latency' as visible | 
 | 116 |  * to users decreases. But the relationship is not linear, | 
 | 117 |  * so pick a second-best guess by going with the log2 of the | 
 | 118 |  * number of CPUs. | 
 | 119 |  * | 
 | 120 |  * This idea comes from the SD scheduler of Con Kolivas: | 
 | 121 |  */ | 
 | 122 | static int get_update_sysctl_factor(void) | 
 | 123 | { | 
 | 124 | 	unsigned int cpus = min_t(int, num_online_cpus(), 8); | 
 | 125 | 	unsigned int factor; | 
 | 126 |  | 
 | 127 | 	switch (sysctl_sched_tunable_scaling) { | 
 | 128 | 	case SCHED_TUNABLESCALING_NONE: | 
 | 129 | 		factor = 1; | 
 | 130 | 		break; | 
 | 131 | 	case SCHED_TUNABLESCALING_LINEAR: | 
 | 132 | 		factor = cpus; | 
 | 133 | 		break; | 
 | 134 | 	case SCHED_TUNABLESCALING_LOG: | 
 | 135 | 	default: | 
 | 136 | 		factor = 1 + ilog2(cpus); | 
 | 137 | 		break; | 
 | 138 | 	} | 
 | 139 |  | 
 | 140 | 	return factor; | 
 | 141 | } | 
 | 142 |  | 
 | 143 | static void update_sysctl(void) | 
 | 144 | { | 
 | 145 | 	unsigned int factor = get_update_sysctl_factor(); | 
 | 146 |  | 
 | 147 | #define SET_SYSCTL(name) \ | 
 | 148 | 	(sysctl_##name = (factor) * normalized_sysctl_##name) | 
 | 149 | 	SET_SYSCTL(sched_min_granularity); | 
 | 150 | 	SET_SYSCTL(sched_latency); | 
 | 151 | 	SET_SYSCTL(sched_wakeup_granularity); | 
 | 152 | #undef SET_SYSCTL | 
 | 153 | } | 
 | 154 |  | 
 | 155 | void sched_init_granularity(void) | 
 | 156 | { | 
 | 157 | 	update_sysctl(); | 
 | 158 | } | 
 | 159 |  | 
 | 160 | #if BITS_PER_LONG == 32 | 
 | 161 | # define WMULT_CONST	(~0UL) | 
 | 162 | #else | 
 | 163 | # define WMULT_CONST	(1UL << 32) | 
 | 164 | #endif | 
 | 165 |  | 
 | 166 | #define WMULT_SHIFT	32 | 
 | 167 |  | 
 | 168 | /* | 
 | 169 |  * Shift right and round: | 
 | 170 |  */ | 
 | 171 | #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) | 
 | 172 |  | 
 | 173 | /* | 
 | 174 |  * delta *= weight / lw | 
 | 175 |  */ | 
 | 176 | static unsigned long | 
 | 177 | calc_delta_mine(unsigned long delta_exec, unsigned long weight, | 
 | 178 | 		struct load_weight *lw) | 
 | 179 | { | 
 | 180 | 	u64 tmp; | 
 | 181 |  | 
 | 182 | 	/* | 
 | 183 | 	 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched | 
 | 184 | 	 * entities since MIN_SHARES = 2. Treat weight as 1 if less than | 
 | 185 | 	 * 2^SCHED_LOAD_RESOLUTION. | 
 | 186 | 	 */ | 
 | 187 | 	if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION))) | 
 | 188 | 		tmp = (u64)delta_exec * scale_load_down(weight); | 
 | 189 | 	else | 
 | 190 | 		tmp = (u64)delta_exec; | 
 | 191 |  | 
 | 192 | 	if (!lw->inv_weight) { | 
 | 193 | 		unsigned long w = scale_load_down(lw->weight); | 
 | 194 |  | 
 | 195 | 		if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) | 
 | 196 | 			lw->inv_weight = 1; | 
 | 197 | 		else if (unlikely(!w)) | 
 | 198 | 			lw->inv_weight = WMULT_CONST; | 
 | 199 | 		else | 
 | 200 | 			lw->inv_weight = WMULT_CONST / w; | 
 | 201 | 	} | 
 | 202 |  | 
 | 203 | 	/* | 
 | 204 | 	 * Check whether we'd overflow the 64-bit multiplication: | 
 | 205 | 	 */ | 
 | 206 | 	if (unlikely(tmp > WMULT_CONST)) | 
 | 207 | 		tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight, | 
 | 208 | 			WMULT_SHIFT/2); | 
 | 209 | 	else | 
 | 210 | 		tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT); | 
 | 211 |  | 
 | 212 | 	return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); | 
 | 213 | } | 
 | 214 |  | 
 | 215 |  | 
 | 216 | const struct sched_class fair_sched_class; | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 217 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 218 | /************************************************************** | 
 | 219 |  * CFS operations on generic schedulable entities: | 
 | 220 |  */ | 
 | 221 |  | 
 | 222 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
 | 223 |  | 
 | 224 | /* cpu runqueue to which this cfs_rq is attached */ | 
 | 225 | static inline struct rq *rq_of(struct cfs_rq *cfs_rq) | 
 | 226 | { | 
 | 227 | 	return cfs_rq->rq; | 
 | 228 | } | 
 | 229 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 230 | /* An entity is a task if it doesn't "own" a runqueue */ | 
 | 231 | #define entity_is_task(se)	(!se->my_q) | 
 | 232 |  | 
| Peter Zijlstra | 8f48894 | 2009-07-24 12:25:30 +0200 | [diff] [blame] | 233 | static inline struct task_struct *task_of(struct sched_entity *se) | 
 | 234 | { | 
 | 235 | #ifdef CONFIG_SCHED_DEBUG | 
 | 236 | 	WARN_ON_ONCE(!entity_is_task(se)); | 
 | 237 | #endif | 
 | 238 | 	return container_of(se, struct task_struct, se); | 
 | 239 | } | 
 | 240 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 241 | /* Walk up scheduling entities hierarchy */ | 
 | 242 | #define for_each_sched_entity(se) \ | 
 | 243 | 		for (; se; se = se->parent) | 
 | 244 |  | 
 | 245 | static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) | 
 | 246 | { | 
 | 247 | 	return p->se.cfs_rq; | 
 | 248 | } | 
 | 249 |  | 
 | 250 | /* runqueue on which this entity is (to be) queued */ | 
 | 251 | static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) | 
 | 252 | { | 
 | 253 | 	return se->cfs_rq; | 
 | 254 | } | 
 | 255 |  | 
 | 256 | /* runqueue "owned" by this group */ | 
 | 257 | static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) | 
 | 258 | { | 
 | 259 | 	return grp->my_q; | 
 | 260 | } | 
 | 261 |  | 
| Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 262 | static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) | 
 | 263 | { | 
 | 264 | 	if (!cfs_rq->on_list) { | 
| Paul Turner | 67e8625 | 2010-11-15 15:47:05 -0800 | [diff] [blame] | 265 | 		/* | 
 | 266 | 		 * Ensure we either appear before our parent (if already | 
 | 267 | 		 * enqueued) or force our parent to appear after us when it is | 
 | 268 | 		 * enqueued.  The fact that we always enqueue bottom-up | 
 | 269 | 		 * reduces this to two cases. | 
 | 270 | 		 */ | 
 | 271 | 		if (cfs_rq->tg->parent && | 
 | 272 | 		    cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) { | 
 | 273 | 			list_add_rcu(&cfs_rq->leaf_cfs_rq_list, | 
| Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 274 | 				&rq_of(cfs_rq)->leaf_cfs_rq_list); | 
| Paul Turner | 67e8625 | 2010-11-15 15:47:05 -0800 | [diff] [blame] | 275 | 		} else { | 
 | 276 | 			list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, | 
 | 277 | 				&rq_of(cfs_rq)->leaf_cfs_rq_list); | 
 | 278 | 		} | 
| Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 279 |  | 
 | 280 | 		cfs_rq->on_list = 1; | 
 | 281 | 	} | 
 | 282 | } | 
 | 283 |  | 
 | 284 | static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) | 
 | 285 | { | 
 | 286 | 	if (cfs_rq->on_list) { | 
 | 287 | 		list_del_rcu(&cfs_rq->leaf_cfs_rq_list); | 
 | 288 | 		cfs_rq->on_list = 0; | 
 | 289 | 	} | 
 | 290 | } | 
 | 291 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 292 | /* Iterate thr' all leaf cfs_rq's on a runqueue */ | 
 | 293 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ | 
 | 294 | 	list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) | 
 | 295 |  | 
 | 296 | /* Do the two (enqueued) entities belong to the same group ? */ | 
 | 297 | static inline int | 
 | 298 | is_same_group(struct sched_entity *se, struct sched_entity *pse) | 
 | 299 | { | 
 | 300 | 	if (se->cfs_rq == pse->cfs_rq) | 
 | 301 | 		return 1; | 
 | 302 |  | 
 | 303 | 	return 0; | 
 | 304 | } | 
 | 305 |  | 
 | 306 | static inline struct sched_entity *parent_entity(struct sched_entity *se) | 
 | 307 | { | 
 | 308 | 	return se->parent; | 
 | 309 | } | 
 | 310 |  | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 311 | /* return depth at which a sched entity is present in the hierarchy */ | 
 | 312 | static inline int depth_se(struct sched_entity *se) | 
 | 313 | { | 
 | 314 | 	int depth = 0; | 
 | 315 |  | 
 | 316 | 	for_each_sched_entity(se) | 
 | 317 | 		depth++; | 
 | 318 |  | 
 | 319 | 	return depth; | 
 | 320 | } | 
 | 321 |  | 
 | 322 | static void | 
 | 323 | find_matching_se(struct sched_entity **se, struct sched_entity **pse) | 
 | 324 | { | 
 | 325 | 	int se_depth, pse_depth; | 
 | 326 |  | 
 | 327 | 	/* | 
 | 328 | 	 * preemption test can be made between sibling entities who are in the | 
 | 329 | 	 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of | 
 | 330 | 	 * both tasks until we find their ancestors who are siblings of common | 
 | 331 | 	 * parent. | 
 | 332 | 	 */ | 
 | 333 |  | 
 | 334 | 	/* First walk up until both entities are at same depth */ | 
 | 335 | 	se_depth = depth_se(*se); | 
 | 336 | 	pse_depth = depth_se(*pse); | 
 | 337 |  | 
 | 338 | 	while (se_depth > pse_depth) { | 
 | 339 | 		se_depth--; | 
 | 340 | 		*se = parent_entity(*se); | 
 | 341 | 	} | 
 | 342 |  | 
 | 343 | 	while (pse_depth > se_depth) { | 
 | 344 | 		pse_depth--; | 
 | 345 | 		*pse = parent_entity(*pse); | 
 | 346 | 	} | 
 | 347 |  | 
 | 348 | 	while (!is_same_group(*se, *pse)) { | 
 | 349 | 		*se = parent_entity(*se); | 
 | 350 | 		*pse = parent_entity(*pse); | 
 | 351 | 	} | 
 | 352 | } | 
 | 353 |  | 
| Peter Zijlstra | 8f48894 | 2009-07-24 12:25:30 +0200 | [diff] [blame] | 354 | #else	/* !CONFIG_FAIR_GROUP_SCHED */ | 
 | 355 |  | 
 | 356 | static inline struct task_struct *task_of(struct sched_entity *se) | 
 | 357 | { | 
 | 358 | 	return container_of(se, struct task_struct, se); | 
 | 359 | } | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 360 |  | 
 | 361 | static inline struct rq *rq_of(struct cfs_rq *cfs_rq) | 
 | 362 | { | 
 | 363 | 	return container_of(cfs_rq, struct rq, cfs); | 
 | 364 | } | 
 | 365 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 366 | #define entity_is_task(se)	1 | 
 | 367 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 368 | #define for_each_sched_entity(se) \ | 
 | 369 | 		for (; se; se = NULL) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 370 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 371 | static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 372 | { | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 373 | 	return &task_rq(p)->cfs; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 374 | } | 
 | 375 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 376 | static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) | 
 | 377 | { | 
 | 378 | 	struct task_struct *p = task_of(se); | 
 | 379 | 	struct rq *rq = task_rq(p); | 
 | 380 |  | 
 | 381 | 	return &rq->cfs; | 
 | 382 | } | 
 | 383 |  | 
 | 384 | /* runqueue "owned" by this group */ | 
 | 385 | static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) | 
 | 386 | { | 
 | 387 | 	return NULL; | 
 | 388 | } | 
 | 389 |  | 
| Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 390 | static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) | 
 | 391 | { | 
 | 392 | } | 
 | 393 |  | 
 | 394 | static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) | 
 | 395 | { | 
 | 396 | } | 
 | 397 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 398 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ | 
 | 399 | 		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) | 
 | 400 |  | 
 | 401 | static inline int | 
 | 402 | is_same_group(struct sched_entity *se, struct sched_entity *pse) | 
 | 403 | { | 
 | 404 | 	return 1; | 
 | 405 | } | 
 | 406 |  | 
 | 407 | static inline struct sched_entity *parent_entity(struct sched_entity *se) | 
 | 408 | { | 
 | 409 | 	return NULL; | 
 | 410 | } | 
 | 411 |  | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 412 | static inline void | 
 | 413 | find_matching_se(struct sched_entity **se, struct sched_entity **pse) | 
 | 414 | { | 
 | 415 | } | 
 | 416 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 417 | #endif	/* CONFIG_FAIR_GROUP_SCHED */ | 
 | 418 |  | 
| Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 419 | static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | 
 | 420 | 				   unsigned long delta_exec); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 421 |  | 
 | 422 | /************************************************************** | 
 | 423 |  * Scheduling class tree data structure manipulation methods: | 
 | 424 |  */ | 
 | 425 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 426 | static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime) | 
| Peter Zijlstra | 02e0431 | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 427 | { | 
| Peter Zijlstra | 368059a | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 428 | 	s64 delta = (s64)(vruntime - min_vruntime); | 
 | 429 | 	if (delta > 0) | 
| Peter Zijlstra | 02e0431 | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 430 | 		min_vruntime = vruntime; | 
 | 431 |  | 
 | 432 | 	return min_vruntime; | 
 | 433 | } | 
 | 434 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 435 | static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) | 
| Peter Zijlstra | b0ffd24 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 436 | { | 
 | 437 | 	s64 delta = (s64)(vruntime - min_vruntime); | 
 | 438 | 	if (delta < 0) | 
 | 439 | 		min_vruntime = vruntime; | 
 | 440 |  | 
 | 441 | 	return min_vruntime; | 
 | 442 | } | 
 | 443 |  | 
| Fabio Checconi | 54fdc58 | 2009-07-16 12:32:27 +0200 | [diff] [blame] | 444 | static inline int entity_before(struct sched_entity *a, | 
 | 445 | 				struct sched_entity *b) | 
 | 446 | { | 
 | 447 | 	return (s64)(a->vruntime - b->vruntime) < 0; | 
 | 448 | } | 
 | 449 |  | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 450 | static void update_min_vruntime(struct cfs_rq *cfs_rq) | 
 | 451 | { | 
 | 452 | 	u64 vruntime = cfs_rq->min_vruntime; | 
 | 453 |  | 
 | 454 | 	if (cfs_rq->curr) | 
 | 455 | 		vruntime = cfs_rq->curr->vruntime; | 
 | 456 |  | 
 | 457 | 	if (cfs_rq->rb_leftmost) { | 
 | 458 | 		struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost, | 
 | 459 | 						   struct sched_entity, | 
 | 460 | 						   run_node); | 
 | 461 |  | 
| Peter Zijlstra | e17036d | 2009-01-15 14:53:39 +0100 | [diff] [blame] | 462 | 		if (!cfs_rq->curr) | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 463 | 			vruntime = se->vruntime; | 
 | 464 | 		else | 
 | 465 | 			vruntime = min_vruntime(vruntime, se->vruntime); | 
 | 466 | 	} | 
 | 467 |  | 
 | 468 | 	cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); | 
| Peter Zijlstra | 3fe1698 | 2011-04-05 17:23:48 +0200 | [diff] [blame] | 469 | #ifndef CONFIG_64BIT | 
 | 470 | 	smp_wmb(); | 
 | 471 | 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; | 
 | 472 | #endif | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 473 | } | 
 | 474 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 475 | /* | 
 | 476 |  * Enqueue an entity into the rb-tree: | 
 | 477 |  */ | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 478 | static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 479 | { | 
 | 480 | 	struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; | 
 | 481 | 	struct rb_node *parent = NULL; | 
 | 482 | 	struct sched_entity *entry; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 483 | 	int leftmost = 1; | 
 | 484 |  | 
 | 485 | 	/* | 
 | 486 | 	 * Find the right place in the rbtree: | 
 | 487 | 	 */ | 
 | 488 | 	while (*link) { | 
 | 489 | 		parent = *link; | 
 | 490 | 		entry = rb_entry(parent, struct sched_entity, run_node); | 
 | 491 | 		/* | 
 | 492 | 		 * We dont care about collisions. Nodes with | 
 | 493 | 		 * the same key stay together. | 
 | 494 | 		 */ | 
| Stephan Baerwolf | 2bd2d6f | 2011-07-20 14:46:59 +0200 | [diff] [blame] | 495 | 		if (entity_before(se, entry)) { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 496 | 			link = &parent->rb_left; | 
 | 497 | 		} else { | 
 | 498 | 			link = &parent->rb_right; | 
 | 499 | 			leftmost = 0; | 
 | 500 | 		} | 
 | 501 | 	} | 
 | 502 |  | 
 | 503 | 	/* | 
 | 504 | 	 * Maintain a cache of leftmost tree entries (it is frequently | 
 | 505 | 	 * used): | 
 | 506 | 	 */ | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 507 | 	if (leftmost) | 
| Ingo Molnar | 57cb499 | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 508 | 		cfs_rq->rb_leftmost = &se->run_node; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 509 |  | 
 | 510 | 	rb_link_node(&se->run_node, parent, link); | 
 | 511 | 	rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 512 | } | 
 | 513 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 514 | static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 515 | { | 
| Peter Zijlstra | 3fe6974 | 2008-03-14 20:55:51 +0100 | [diff] [blame] | 516 | 	if (cfs_rq->rb_leftmost == &se->run_node) { | 
 | 517 | 		struct rb_node *next_node; | 
| Peter Zijlstra | 3fe6974 | 2008-03-14 20:55:51 +0100 | [diff] [blame] | 518 |  | 
 | 519 | 		next_node = rb_next(&se->run_node); | 
 | 520 | 		cfs_rq->rb_leftmost = next_node; | 
| Peter Zijlstra | 3fe6974 | 2008-03-14 20:55:51 +0100 | [diff] [blame] | 521 | 	} | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 522 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 523 | 	rb_erase(&se->run_node, &cfs_rq->tasks_timeline); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 524 | } | 
 | 525 |  | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 526 | struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 527 | { | 
| Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 528 | 	struct rb_node *left = cfs_rq->rb_leftmost; | 
 | 529 |  | 
 | 530 | 	if (!left) | 
 | 531 | 		return NULL; | 
 | 532 |  | 
 | 533 | 	return rb_entry(left, struct sched_entity, run_node); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 534 | } | 
 | 535 |  | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 536 | static struct sched_entity *__pick_next_entity(struct sched_entity *se) | 
 | 537 | { | 
 | 538 | 	struct rb_node *next = rb_next(&se->run_node); | 
 | 539 |  | 
 | 540 | 	if (!next) | 
 | 541 | 		return NULL; | 
 | 542 |  | 
 | 543 | 	return rb_entry(next, struct sched_entity, run_node); | 
 | 544 | } | 
 | 545 |  | 
 | 546 | #ifdef CONFIG_SCHED_DEBUG | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 547 | struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 548 | { | 
| Ingo Molnar | 7eee3e6 | 2008-02-22 10:32:21 +0100 | [diff] [blame] | 549 | 	struct rb_node *last = rb_last(&cfs_rq->tasks_timeline); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 550 |  | 
| Balbir Singh | 70eee74 | 2008-02-22 13:25:53 +0530 | [diff] [blame] | 551 | 	if (!last) | 
 | 552 | 		return NULL; | 
| Ingo Molnar | 7eee3e6 | 2008-02-22 10:32:21 +0100 | [diff] [blame] | 553 |  | 
 | 554 | 	return rb_entry(last, struct sched_entity, run_node); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 555 | } | 
 | 556 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 557 | /************************************************************** | 
 | 558 |  * Scheduling class statistics methods: | 
 | 559 |  */ | 
 | 560 |  | 
| Christian Ehrhardt | acb4a84 | 2009-11-30 12:16:48 +0100 | [diff] [blame] | 561 | int sched_proc_update_handler(struct ctl_table *table, int write, | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 562 | 		void __user *buffer, size_t *lenp, | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 563 | 		loff_t *ppos) | 
 | 564 | { | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 565 | 	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 
| Christian Ehrhardt | acb4a84 | 2009-11-30 12:16:48 +0100 | [diff] [blame] | 566 | 	int factor = get_update_sysctl_factor(); | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 567 |  | 
 | 568 | 	if (ret || !write) | 
 | 569 | 		return ret; | 
 | 570 |  | 
 | 571 | 	sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, | 
 | 572 | 					sysctl_sched_min_granularity); | 
 | 573 |  | 
| Christian Ehrhardt | acb4a84 | 2009-11-30 12:16:48 +0100 | [diff] [blame] | 574 | #define WRT_SYSCTL(name) \ | 
 | 575 | 	(normalized_sysctl_##name = sysctl_##name / (factor)) | 
 | 576 | 	WRT_SYSCTL(sched_min_granularity); | 
 | 577 | 	WRT_SYSCTL(sched_latency); | 
 | 578 | 	WRT_SYSCTL(sched_wakeup_granularity); | 
| Christian Ehrhardt | acb4a84 | 2009-11-30 12:16:48 +0100 | [diff] [blame] | 579 | #undef WRT_SYSCTL | 
 | 580 |  | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 581 | 	return 0; | 
 | 582 | } | 
 | 583 | #endif | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 584 |  | 
 | 585 | /* | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 586 |  * delta /= w | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 587 |  */ | 
 | 588 | static inline unsigned long | 
 | 589 | calc_delta_fair(unsigned long delta, struct sched_entity *se) | 
 | 590 | { | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 591 | 	if (unlikely(se->load.weight != NICE_0_LOAD)) | 
 | 592 | 		delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load); | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 593 |  | 
 | 594 | 	return delta; | 
 | 595 | } | 
 | 596 |  | 
 | 597 | /* | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 598 |  * The idea is to set a period in which each task runs once. | 
 | 599 |  * | 
 | 600 |  * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch | 
 | 601 |  * this period because otherwise the slices get too small. | 
 | 602 |  * | 
 | 603 |  * p = (nr <= nl) ? l : l*nr/nl | 
 | 604 |  */ | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 605 | static u64 __sched_period(unsigned long nr_running) | 
 | 606 | { | 
 | 607 | 	u64 period = sysctl_sched_latency; | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 608 | 	unsigned long nr_latency = sched_nr_latency; | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 609 |  | 
 | 610 | 	if (unlikely(nr_running > nr_latency)) { | 
| Peter Zijlstra | 4bf0b77 | 2008-01-25 21:08:21 +0100 | [diff] [blame] | 611 | 		period = sysctl_sched_min_granularity; | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 612 | 		period *= nr_running; | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 613 | 	} | 
 | 614 |  | 
 | 615 | 	return period; | 
 | 616 | } | 
 | 617 |  | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 618 | /* | 
 | 619 |  * We calculate the wall-time slice from the period by taking a part | 
 | 620 |  * proportional to the weight. | 
 | 621 |  * | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 622 |  * s = p*P[w/rw] | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 623 |  */ | 
| Peter Zijlstra | 6d0f0eb | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 624 | static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 625 | { | 
| Mike Galbraith | 0a58244 | 2009-01-02 12:16:42 +0100 | [diff] [blame] | 626 | 	u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 627 |  | 
| Mike Galbraith | 0a58244 | 2009-01-02 12:16:42 +0100 | [diff] [blame] | 628 | 	for_each_sched_entity(se) { | 
| Lin Ming | 6272d68 | 2009-01-15 17:17:15 +0100 | [diff] [blame] | 629 | 		struct load_weight *load; | 
| Christian Engelmayer | 3104bf0 | 2009-06-16 10:35:12 +0200 | [diff] [blame] | 630 | 		struct load_weight lw; | 
| Lin Ming | 6272d68 | 2009-01-15 17:17:15 +0100 | [diff] [blame] | 631 |  | 
 | 632 | 		cfs_rq = cfs_rq_of(se); | 
 | 633 | 		load = &cfs_rq->load; | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 634 |  | 
| Mike Galbraith | 0a58244 | 2009-01-02 12:16:42 +0100 | [diff] [blame] | 635 | 		if (unlikely(!se->on_rq)) { | 
| Christian Engelmayer | 3104bf0 | 2009-06-16 10:35:12 +0200 | [diff] [blame] | 636 | 			lw = cfs_rq->load; | 
| Mike Galbraith | 0a58244 | 2009-01-02 12:16:42 +0100 | [diff] [blame] | 637 |  | 
 | 638 | 			update_load_add(&lw, se->load.weight); | 
 | 639 | 			load = &lw; | 
 | 640 | 		} | 
 | 641 | 		slice = calc_delta_mine(slice, se->load.weight, load); | 
 | 642 | 	} | 
 | 643 | 	return slice; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 644 | } | 
 | 645 |  | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 646 | /* | 
| Peter Zijlstra | ac884de | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 647 |  * We calculate the vruntime slice of a to be inserted task | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 648 |  * | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 649 |  * vs = s/w | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 650 |  */ | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 651 | static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 652 | { | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 653 | 	return calc_delta_fair(sched_slice(cfs_rq, se), se); | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 654 | } | 
 | 655 |  | 
| Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 656 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update); | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 657 | static void update_cfs_shares(struct cfs_rq *cfs_rq); | 
| Paul Turner | 3b3d190 | 2010-11-15 15:47:08 -0800 | [diff] [blame] | 658 |  | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 659 | /* | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 660 |  * Update the current task's runtime statistics. Skip current tasks that | 
 | 661 |  * are not in our scheduling class. | 
 | 662 |  */ | 
 | 663 | static inline void | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 664 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | 
 | 665 | 	      unsigned long delta_exec) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 666 | { | 
| Ingo Molnar | bbdba7c | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 667 | 	unsigned long delta_exec_weighted; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 668 |  | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 669 | 	schedstat_set(curr->statistics.exec_max, | 
 | 670 | 		      max((u64)delta_exec, curr->statistics.exec_max)); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 671 |  | 
 | 672 | 	curr->sum_exec_runtime += delta_exec; | 
| Ingo Molnar | 7a62eab | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 673 | 	schedstat_add(cfs_rq, exec_clock, delta_exec); | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 674 | 	delta_exec_weighted = calc_delta_fair(delta_exec, curr); | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 675 |  | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 676 | 	curr->vruntime += delta_exec_weighted; | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 677 | 	update_min_vruntime(cfs_rq); | 
| Paul Turner | 3b3d190 | 2010-11-15 15:47:08 -0800 | [diff] [blame] | 678 |  | 
| Peter Zijlstra | 70caf8a | 2010-11-20 00:53:51 +0100 | [diff] [blame] | 679 | #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED | 
| Paul Turner | 3b3d190 | 2010-11-15 15:47:08 -0800 | [diff] [blame] | 680 | 	cfs_rq->load_unacc_exec_time += delta_exec; | 
| Paul Turner | 3b3d190 | 2010-11-15 15:47:08 -0800 | [diff] [blame] | 681 | #endif | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 682 | } | 
 | 683 |  | 
| Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 684 | static void update_curr(struct cfs_rq *cfs_rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 685 | { | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 686 | 	struct sched_entity *curr = cfs_rq->curr; | 
| Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 687 | 	u64 now = rq_of(cfs_rq)->clock_task; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 688 | 	unsigned long delta_exec; | 
 | 689 |  | 
 | 690 | 	if (unlikely(!curr)) | 
 | 691 | 		return; | 
 | 692 |  | 
 | 693 | 	/* | 
 | 694 | 	 * Get the amount of time the current task was running | 
 | 695 | 	 * since the last time we changed load (this cannot | 
 | 696 | 	 * overflow on 32 bits): | 
 | 697 | 	 */ | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 698 | 	delta_exec = (unsigned long)(now - curr->exec_start); | 
| Peter Zijlstra | 34f28ec | 2008-12-16 08:45:31 +0100 | [diff] [blame] | 699 | 	if (!delta_exec) | 
 | 700 | 		return; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 701 |  | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 702 | 	__update_curr(cfs_rq, curr, delta_exec); | 
 | 703 | 	curr->exec_start = now; | 
| Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 704 |  | 
 | 705 | 	if (entity_is_task(curr)) { | 
 | 706 | 		struct task_struct *curtask = task_of(curr); | 
 | 707 |  | 
| Ingo Molnar | f977bb4 | 2009-09-13 18:15:54 +0200 | [diff] [blame] | 708 | 		trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); | 
| Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 709 | 		cpuacct_charge(curtask, delta_exec); | 
| Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 710 | 		account_group_exec_runtime(curtask, delta_exec); | 
| Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 711 | 	} | 
| Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 712 |  | 
 | 713 | 	account_cfs_rq_runtime(cfs_rq, delta_exec); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 714 | } | 
 | 715 |  | 
 | 716 | static inline void | 
| Ingo Molnar | 5870db5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 717 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 718 | { | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 719 | 	schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 720 | } | 
 | 721 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 722 | /* | 
 | 723 |  * Task is being enqueued - update stats: | 
 | 724 |  */ | 
| Ingo Molnar | d2417e5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 725 | static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 726 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 727 | 	/* | 
 | 728 | 	 * Are we enqueueing a waiting task? (for current tasks | 
 | 729 | 	 * a dequeue/enqueue event is a NOP) | 
 | 730 | 	 */ | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 731 | 	if (se != cfs_rq->curr) | 
| Ingo Molnar | 5870db5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 732 | 		update_stats_wait_start(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 733 | } | 
 | 734 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 735 | static void | 
| Ingo Molnar | 9ef0a96 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 736 | update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 737 | { | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 738 | 	schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max, | 
 | 739 | 			rq_of(cfs_rq)->clock - se->statistics.wait_start)); | 
 | 740 | 	schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1); | 
 | 741 | 	schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum + | 
 | 742 | 			rq_of(cfs_rq)->clock - se->statistics.wait_start); | 
| Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 743 | #ifdef CONFIG_SCHEDSTATS | 
 | 744 | 	if (entity_is_task(se)) { | 
 | 745 | 		trace_sched_stat_wait(task_of(se), | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 746 | 			rq_of(cfs_rq)->clock - se->statistics.wait_start); | 
| Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 747 | 	} | 
 | 748 | #endif | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 749 | 	schedstat_set(se->statistics.wait_start, 0); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 750 | } | 
 | 751 |  | 
 | 752 | static inline void | 
| Ingo Molnar | 19b6a2e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 753 | update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 754 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 755 | 	/* | 
 | 756 | 	 * Mark the end of the wait period if dequeueing a | 
 | 757 | 	 * waiting task: | 
 | 758 | 	 */ | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 759 | 	if (se != cfs_rq->curr) | 
| Ingo Molnar | 9ef0a96 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 760 | 		update_stats_wait_end(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 761 | } | 
 | 762 |  | 
 | 763 | /* | 
 | 764 |  * We are picking a new current task - update its stats: | 
 | 765 |  */ | 
 | 766 | static inline void | 
| Ingo Molnar | 79303e9 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 767 | update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 768 | { | 
 | 769 | 	/* | 
 | 770 | 	 * We are starting a new run period: | 
 | 771 | 	 */ | 
| Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 772 | 	se->exec_start = rq_of(cfs_rq)->clock_task; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 773 | } | 
 | 774 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 775 | /************************************************** | 
 | 776 |  * Scheduling class queueing methods: | 
 | 777 |  */ | 
 | 778 |  | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 779 | static void | 
 | 780 | account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
 | 781 | { | 
 | 782 | 	update_load_add(&cfs_rq->load, se->load.weight); | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 783 | 	if (!parent_entity(se)) | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 784 | 		update_load_add(&rq_of(cfs_rq)->load, se->load.weight); | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 785 | #ifdef CONFIG_SMP | 
 | 786 | 	if (entity_is_task(se)) | 
| Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 787 | 		list_add_tail(&se->group_node, &rq_of(cfs_rq)->cfs_tasks); | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 788 | #endif | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 789 | 	cfs_rq->nr_running++; | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 790 | } | 
 | 791 |  | 
 | 792 | static void | 
 | 793 | account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
 | 794 | { | 
 | 795 | 	update_load_sub(&cfs_rq->load, se->load.weight); | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 796 | 	if (!parent_entity(se)) | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 797 | 		update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 798 | 	if (entity_is_task(se)) | 
| Bharata B Rao | b87f172 | 2008-09-25 09:53:54 +0530 | [diff] [blame] | 799 | 		list_del_init(&se->group_node); | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 800 | 	cfs_rq->nr_running--; | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 801 | } | 
 | 802 |  | 
| Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 803 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| Paul Turner | 64660c8 | 2011-07-21 09:43:36 -0700 | [diff] [blame] | 804 | /* we need this in update_cfs_load and load-balance functions below */ | 
 | 805 | static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); | 
| Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 806 | # ifdef CONFIG_SMP | 
| Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 807 | static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq, | 
 | 808 | 					    int global_update) | 
 | 809 | { | 
 | 810 | 	struct task_group *tg = cfs_rq->tg; | 
 | 811 | 	long load_avg; | 
 | 812 |  | 
 | 813 | 	load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1); | 
 | 814 | 	load_avg -= cfs_rq->load_contribution; | 
 | 815 |  | 
 | 816 | 	if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) { | 
 | 817 | 		atomic_add(load_avg, &tg->load_weight); | 
 | 818 | 		cfs_rq->load_contribution += load_avg; | 
 | 819 | 	} | 
 | 820 | } | 
 | 821 |  | 
 | 822 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 823 | { | 
| Paul Turner | a7a4f8a | 2010-11-15 15:47:06 -0800 | [diff] [blame] | 824 | 	u64 period = sysctl_sched_shares_window; | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 825 | 	u64 now, delta; | 
| Paul Turner | e33078b | 2010-11-15 15:47:04 -0800 | [diff] [blame] | 826 | 	unsigned long load = cfs_rq->load.weight; | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 827 |  | 
| Paul Turner | 64660c8 | 2011-07-21 09:43:36 -0700 | [diff] [blame] | 828 | 	if (cfs_rq->tg == &root_task_group || throttled_hierarchy(cfs_rq)) | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 829 | 		return; | 
 | 830 |  | 
| Paul Turner | 05ca62c | 2011-01-21 20:45:02 -0800 | [diff] [blame] | 831 | 	now = rq_of(cfs_rq)->clock_task; | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 832 | 	delta = now - cfs_rq->load_stamp; | 
 | 833 |  | 
| Paul Turner | e33078b | 2010-11-15 15:47:04 -0800 | [diff] [blame] | 834 | 	/* truncate load history at 4 idle periods */ | 
 | 835 | 	if (cfs_rq->load_stamp > cfs_rq->load_last && | 
 | 836 | 	    now - cfs_rq->load_last > 4 * period) { | 
 | 837 | 		cfs_rq->load_period = 0; | 
 | 838 | 		cfs_rq->load_avg = 0; | 
| Paul Turner | f07333b | 2011-01-21 20:45:03 -0800 | [diff] [blame] | 839 | 		delta = period - 1; | 
| Paul Turner | e33078b | 2010-11-15 15:47:04 -0800 | [diff] [blame] | 840 | 	} | 
 | 841 |  | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 842 | 	cfs_rq->load_stamp = now; | 
| Paul Turner | 3b3d190 | 2010-11-15 15:47:08 -0800 | [diff] [blame] | 843 | 	cfs_rq->load_unacc_exec_time = 0; | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 844 | 	cfs_rq->load_period += delta; | 
| Paul Turner | e33078b | 2010-11-15 15:47:04 -0800 | [diff] [blame] | 845 | 	if (load) { | 
 | 846 | 		cfs_rq->load_last = now; | 
 | 847 | 		cfs_rq->load_avg += delta * load; | 
 | 848 | 	} | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 849 |  | 
| Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 850 | 	/* consider updating load contribution on each fold or truncate */ | 
 | 851 | 	if (global_update || cfs_rq->load_period > period | 
 | 852 | 	    || !cfs_rq->load_period) | 
 | 853 | 		update_cfs_rq_load_contribution(cfs_rq, global_update); | 
 | 854 |  | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 855 | 	while (cfs_rq->load_period > period) { | 
 | 856 | 		/* | 
 | 857 | 		 * Inline assembly required to prevent the compiler | 
 | 858 | 		 * optimising this loop into a divmod call. | 
 | 859 | 		 * See __iter_div_u64_rem() for another example of this. | 
 | 860 | 		 */ | 
 | 861 | 		asm("" : "+rm" (cfs_rq->load_period)); | 
 | 862 | 		cfs_rq->load_period /= 2; | 
 | 863 | 		cfs_rq->load_avg /= 2; | 
 | 864 | 	} | 
| Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 865 |  | 
| Paul Turner | e33078b | 2010-11-15 15:47:04 -0800 | [diff] [blame] | 866 | 	if (!cfs_rq->curr && !cfs_rq->nr_running && !cfs_rq->load_avg) | 
 | 867 | 		list_del_leaf_cfs_rq(cfs_rq); | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 868 | } | 
 | 869 |  | 
| Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 870 | static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq) | 
 | 871 | { | 
 | 872 | 	long tg_weight; | 
 | 873 |  | 
 | 874 | 	/* | 
 | 875 | 	 * Use this CPU's actual weight instead of the last load_contribution | 
 | 876 | 	 * to gain a more accurate current total weight. See | 
 | 877 | 	 * update_cfs_rq_load_contribution(). | 
 | 878 | 	 */ | 
 | 879 | 	tg_weight = atomic_read(&tg->load_weight); | 
 | 880 | 	tg_weight -= cfs_rq->load_contribution; | 
 | 881 | 	tg_weight += cfs_rq->load.weight; | 
 | 882 |  | 
 | 883 | 	return tg_weight; | 
 | 884 | } | 
 | 885 |  | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 886 | static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) | 
| Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 887 | { | 
| Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 888 | 	long tg_weight, load, shares; | 
| Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 889 |  | 
| Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 890 | 	tg_weight = calc_tg_weight(tg, cfs_rq); | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 891 | 	load = cfs_rq->load.weight; | 
| Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 892 |  | 
| Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 893 | 	shares = (tg->shares * load); | 
| Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 894 | 	if (tg_weight) | 
 | 895 | 		shares /= tg_weight; | 
| Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 896 |  | 
 | 897 | 	if (shares < MIN_SHARES) | 
 | 898 | 		shares = MIN_SHARES; | 
 | 899 | 	if (shares > tg->shares) | 
 | 900 | 		shares = tg->shares; | 
 | 901 |  | 
 | 902 | 	return shares; | 
 | 903 | } | 
 | 904 |  | 
 | 905 | static void update_entity_shares_tick(struct cfs_rq *cfs_rq) | 
 | 906 | { | 
 | 907 | 	if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { | 
 | 908 | 		update_cfs_load(cfs_rq, 0); | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 909 | 		update_cfs_shares(cfs_rq); | 
| Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 910 | 	} | 
 | 911 | } | 
 | 912 | # else /* CONFIG_SMP */ | 
 | 913 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | 
 | 914 | { | 
 | 915 | } | 
 | 916 |  | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 917 | static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) | 
| Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 918 | { | 
 | 919 | 	return tg->shares; | 
 | 920 | } | 
 | 921 |  | 
 | 922 | static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq) | 
 | 923 | { | 
 | 924 | } | 
 | 925 | # endif /* CONFIG_SMP */ | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 926 | static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, | 
 | 927 | 			    unsigned long weight) | 
 | 928 | { | 
| Paul Turner | 19e5eeb | 2010-12-15 19:10:18 -0800 | [diff] [blame] | 929 | 	if (se->on_rq) { | 
 | 930 | 		/* commit outstanding execution time */ | 
 | 931 | 		if (cfs_rq->curr == se) | 
 | 932 | 			update_curr(cfs_rq); | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 933 | 		account_entity_dequeue(cfs_rq, se); | 
| Paul Turner | 19e5eeb | 2010-12-15 19:10:18 -0800 | [diff] [blame] | 934 | 	} | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 935 |  | 
 | 936 | 	update_load_set(&se->load, weight); | 
 | 937 |  | 
 | 938 | 	if (se->on_rq) | 
 | 939 | 		account_entity_enqueue(cfs_rq, se); | 
 | 940 | } | 
 | 941 |  | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 942 | static void update_cfs_shares(struct cfs_rq *cfs_rq) | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 943 | { | 
 | 944 | 	struct task_group *tg; | 
 | 945 | 	struct sched_entity *se; | 
| Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 946 | 	long shares; | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 947 |  | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 948 | 	tg = cfs_rq->tg; | 
 | 949 | 	se = tg->se[cpu_of(rq_of(cfs_rq))]; | 
| Paul Turner | 64660c8 | 2011-07-21 09:43:36 -0700 | [diff] [blame] | 950 | 	if (!se || throttled_hierarchy(cfs_rq)) | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 951 | 		return; | 
| Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 952 | #ifndef CONFIG_SMP | 
 | 953 | 	if (likely(se->load.weight == tg->shares)) | 
 | 954 | 		return; | 
 | 955 | #endif | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 956 | 	shares = calc_cfs_shares(cfs_rq, tg); | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 957 |  | 
 | 958 | 	reweight_entity(cfs_rq_of(se), se, shares); | 
 | 959 | } | 
 | 960 | #else /* CONFIG_FAIR_GROUP_SCHED */ | 
| Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 961 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 962 | { | 
 | 963 | } | 
 | 964 |  | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 965 | static inline void update_cfs_shares(struct cfs_rq *cfs_rq) | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 966 | { | 
 | 967 | } | 
| Paul Turner | 43365bd | 2010-12-15 19:10:17 -0800 | [diff] [blame] | 968 |  | 
 | 969 | static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq) | 
 | 970 | { | 
 | 971 | } | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 972 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 
 | 973 |  | 
| Ingo Molnar | 2396af6 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 974 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 975 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 976 | #ifdef CONFIG_SCHEDSTATS | 
| Peter Zijlstra | e414314 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 977 | 	struct task_struct *tsk = NULL; | 
 | 978 |  | 
 | 979 | 	if (entity_is_task(se)) | 
 | 980 | 		tsk = task_of(se); | 
 | 981 |  | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 982 | 	if (se->statistics.sleep_start) { | 
 | 983 | 		u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 984 |  | 
 | 985 | 		if ((s64)delta < 0) | 
 | 986 | 			delta = 0; | 
 | 987 |  | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 988 | 		if (unlikely(delta > se->statistics.sleep_max)) | 
 | 989 | 			se->statistics.sleep_max = delta; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 990 |  | 
| Peter Zijlstra | 8c79a04 | 2012-01-30 14:51:37 +0100 | [diff] [blame] | 991 | 		se->statistics.sleep_start = 0; | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 992 | 		se->statistics.sum_sleep_runtime += delta; | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 993 |  | 
| Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 994 | 		if (tsk) { | 
| Peter Zijlstra | e414314 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 995 | 			account_scheduler_latency(tsk, delta >> 10, 1); | 
| Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 996 | 			trace_sched_stat_sleep(tsk, delta); | 
 | 997 | 		} | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 998 | 	} | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 999 | 	if (se->statistics.block_start) { | 
 | 1000 | 		u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1001 |  | 
 | 1002 | 		if ((s64)delta < 0) | 
 | 1003 | 			delta = 0; | 
 | 1004 |  | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 1005 | 		if (unlikely(delta > se->statistics.block_max)) | 
 | 1006 | 			se->statistics.block_max = delta; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1007 |  | 
| Peter Zijlstra | 8c79a04 | 2012-01-30 14:51:37 +0100 | [diff] [blame] | 1008 | 		se->statistics.block_start = 0; | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 1009 | 		se->statistics.sum_sleep_runtime += delta; | 
| Ingo Molnar | 30084fb | 2007-10-02 14:13:08 +0200 | [diff] [blame] | 1010 |  | 
| Peter Zijlstra | e414314 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 1011 | 		if (tsk) { | 
| Arjan van de Ven | 8f0dfc3 | 2009-07-20 11:26:58 -0700 | [diff] [blame] | 1012 | 			if (tsk->in_iowait) { | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 1013 | 				se->statistics.iowait_sum += delta; | 
 | 1014 | 				se->statistics.iowait_count++; | 
| Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 1015 | 				trace_sched_stat_iowait(tsk, delta); | 
| Arjan van de Ven | 8f0dfc3 | 2009-07-20 11:26:58 -0700 | [diff] [blame] | 1016 | 			} | 
 | 1017 |  | 
| Andrew Vagin | b781a60 | 2011-11-28 12:03:35 +0300 | [diff] [blame] | 1018 | 			trace_sched_stat_blocked(tsk, delta); | 
 | 1019 |  | 
| Peter Zijlstra | e414314 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 1020 | 			/* | 
 | 1021 | 			 * Blocking time is in units of nanosecs, so shift by | 
 | 1022 | 			 * 20 to get a milliseconds-range estimation of the | 
 | 1023 | 			 * amount of time that the task spent sleeping: | 
 | 1024 | 			 */ | 
 | 1025 | 			if (unlikely(prof_on == SLEEP_PROFILING)) { | 
 | 1026 | 				profile_hits(SLEEP_PROFILING, | 
 | 1027 | 						(void *)get_wchan(tsk), | 
 | 1028 | 						delta >> 20); | 
 | 1029 | 			} | 
 | 1030 | 			account_scheduler_latency(tsk, delta >> 10, 0); | 
| Ingo Molnar | 30084fb | 2007-10-02 14:13:08 +0200 | [diff] [blame] | 1031 | 		} | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1032 | 	} | 
 | 1033 | #endif | 
 | 1034 | } | 
 | 1035 |  | 
| Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 1036 | static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
 | 1037 | { | 
 | 1038 | #ifdef CONFIG_SCHED_DEBUG | 
 | 1039 | 	s64 d = se->vruntime - cfs_rq->min_vruntime; | 
 | 1040 |  | 
 | 1041 | 	if (d < 0) | 
 | 1042 | 		d = -d; | 
 | 1043 |  | 
 | 1044 | 	if (d > 3*sysctl_sched_latency) | 
 | 1045 | 		schedstat_inc(cfs_rq, nr_spread_over); | 
 | 1046 | #endif | 
 | 1047 | } | 
 | 1048 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1049 | static void | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1050 | place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | 
 | 1051 | { | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 1052 | 	u64 vruntime = cfs_rq->min_vruntime; | 
| Peter Zijlstra | 94dfb5e | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1053 |  | 
| Peter Zijlstra | 2cb8600 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 1054 | 	/* | 
 | 1055 | 	 * The 'current' period is already promised to the current tasks, | 
 | 1056 | 	 * however the extra weight of the new task will slow them down a | 
 | 1057 | 	 * little, place the new task so that it fits in the slot that | 
 | 1058 | 	 * stays open at the end. | 
 | 1059 | 	 */ | 
| Peter Zijlstra | 94dfb5e | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1060 | 	if (initial && sched_feat(START_DEBIT)) | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 1061 | 		vruntime += sched_vslice(cfs_rq, se); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1062 |  | 
| Mike Galbraith | a2e7a7e | 2009-09-18 09:19:25 +0200 | [diff] [blame] | 1063 | 	/* sleeps up to a single latency don't count. */ | 
| Mike Galbraith | 5ca9880 | 2010-03-11 17:17:17 +0100 | [diff] [blame] | 1064 | 	if (!initial) { | 
| Mike Galbraith | a2e7a7e | 2009-09-18 09:19:25 +0200 | [diff] [blame] | 1065 | 		unsigned long thresh = sysctl_sched_latency; | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 1066 |  | 
| Mike Galbraith | a2e7a7e | 2009-09-18 09:19:25 +0200 | [diff] [blame] | 1067 | 		/* | 
| Mike Galbraith | a2e7a7e | 2009-09-18 09:19:25 +0200 | [diff] [blame] | 1068 | 		 * Halve their sleep time's effect, to allow | 
 | 1069 | 		 * for a gentler effect of sleepers: | 
 | 1070 | 		 */ | 
 | 1071 | 		if (sched_feat(GENTLE_FAIR_SLEEPERS)) | 
 | 1072 | 			thresh >>= 1; | 
| Ingo Molnar | 51e0304 | 2009-09-16 08:54:45 +0200 | [diff] [blame] | 1073 |  | 
| Mike Galbraith | a2e7a7e | 2009-09-18 09:19:25 +0200 | [diff] [blame] | 1074 | 		vruntime -= thresh; | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1075 | 	} | 
 | 1076 |  | 
| Mike Galbraith | b5d9d73 | 2009-09-08 11:12:28 +0200 | [diff] [blame] | 1077 | 	/* ensure we never gain time by being placed backwards. */ | 
 | 1078 | 	vruntime = max_vruntime(se->vruntime, vruntime); | 
 | 1079 |  | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 1080 | 	se->vruntime = vruntime; | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1081 | } | 
 | 1082 |  | 
| Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 1083 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq); | 
 | 1084 |  | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1085 | static void | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 1086 | enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1087 | { | 
 | 1088 | 	/* | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 1089 | 	 * Update the normalized vruntime before updating min_vruntime | 
 | 1090 | 	 * through callig update_curr(). | 
 | 1091 | 	 */ | 
| Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1092 | 	if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING)) | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 1093 | 		se->vruntime += cfs_rq->min_vruntime; | 
 | 1094 |  | 
 | 1095 | 	/* | 
| Dmitry Adamushko | a2a2d68 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 1096 | 	 * Update run-time statistics of the 'current'. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1097 | 	 */ | 
| Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1098 | 	update_curr(cfs_rq); | 
| Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 1099 | 	update_cfs_load(cfs_rq, 0); | 
| Peter Zijlstra | a992241 | 2008-05-05 23:56:17 +0200 | [diff] [blame] | 1100 | 	account_entity_enqueue(cfs_rq, se); | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 1101 | 	update_cfs_shares(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1102 |  | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 1103 | 	if (flags & ENQUEUE_WAKEUP) { | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1104 | 		place_entity(cfs_rq, se, 0); | 
| Ingo Molnar | 2396af6 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1105 | 		enqueue_sleeper(cfs_rq, se); | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1106 | 	} | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1107 |  | 
| Ingo Molnar | d2417e5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1108 | 	update_stats_enqueue(cfs_rq, se); | 
| Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 1109 | 	check_spread(cfs_rq, se); | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1110 | 	if (se != cfs_rq->curr) | 
 | 1111 | 		__enqueue_entity(cfs_rq, se); | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 1112 | 	se->on_rq = 1; | 
| Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 1113 |  | 
| Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 1114 | 	if (cfs_rq->nr_running == 1) { | 
| Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 1115 | 		list_add_leaf_cfs_rq(cfs_rq); | 
| Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 1116 | 		check_enqueue_throttle(cfs_rq); | 
 | 1117 | 	} | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1118 | } | 
 | 1119 |  | 
| Rik van Riel | 2c13c919 | 2011-02-01 09:48:37 -0500 | [diff] [blame] | 1120 | static void __clear_buddies_last(struct sched_entity *se) | 
| Peter Zijlstra | 2002c69 | 2008-11-11 11:52:33 +0100 | [diff] [blame] | 1121 | { | 
| Rik van Riel | 2c13c919 | 2011-02-01 09:48:37 -0500 | [diff] [blame] | 1122 | 	for_each_sched_entity(se) { | 
 | 1123 | 		struct cfs_rq *cfs_rq = cfs_rq_of(se); | 
 | 1124 | 		if (cfs_rq->last == se) | 
 | 1125 | 			cfs_rq->last = NULL; | 
 | 1126 | 		else | 
 | 1127 | 			break; | 
 | 1128 | 	} | 
 | 1129 | } | 
| Peter Zijlstra | 2002c69 | 2008-11-11 11:52:33 +0100 | [diff] [blame] | 1130 |  | 
| Rik van Riel | 2c13c919 | 2011-02-01 09:48:37 -0500 | [diff] [blame] | 1131 | static void __clear_buddies_next(struct sched_entity *se) | 
 | 1132 | { | 
 | 1133 | 	for_each_sched_entity(se) { | 
 | 1134 | 		struct cfs_rq *cfs_rq = cfs_rq_of(se); | 
 | 1135 | 		if (cfs_rq->next == se) | 
 | 1136 | 			cfs_rq->next = NULL; | 
 | 1137 | 		else | 
 | 1138 | 			break; | 
 | 1139 | 	} | 
| Peter Zijlstra | 2002c69 | 2008-11-11 11:52:33 +0100 | [diff] [blame] | 1140 | } | 
 | 1141 |  | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1142 | static void __clear_buddies_skip(struct sched_entity *se) | 
 | 1143 | { | 
 | 1144 | 	for_each_sched_entity(se) { | 
 | 1145 | 		struct cfs_rq *cfs_rq = cfs_rq_of(se); | 
 | 1146 | 		if (cfs_rq->skip == se) | 
 | 1147 | 			cfs_rq->skip = NULL; | 
 | 1148 | 		else | 
 | 1149 | 			break; | 
 | 1150 | 	} | 
 | 1151 | } | 
 | 1152 |  | 
| Peter Zijlstra | a571bbe | 2009-01-28 14:51:40 +0100 | [diff] [blame] | 1153 | static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
 | 1154 | { | 
| Rik van Riel | 2c13c919 | 2011-02-01 09:48:37 -0500 | [diff] [blame] | 1155 | 	if (cfs_rq->last == se) | 
 | 1156 | 		__clear_buddies_last(se); | 
 | 1157 |  | 
 | 1158 | 	if (cfs_rq->next == se) | 
 | 1159 | 		__clear_buddies_next(se); | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1160 |  | 
 | 1161 | 	if (cfs_rq->skip == se) | 
 | 1162 | 		__clear_buddies_skip(se); | 
| Peter Zijlstra | a571bbe | 2009-01-28 14:51:40 +0100 | [diff] [blame] | 1163 | } | 
 | 1164 |  | 
| Paul Turner | d8b4986 | 2011-07-21 09:43:41 -0700 | [diff] [blame] | 1165 | static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); | 
 | 1166 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1167 | static void | 
| Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1168 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1169 | { | 
| Dmitry Adamushko | a2a2d68 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 1170 | 	/* | 
 | 1171 | 	 * Update run-time statistics of the 'current'. | 
 | 1172 | 	 */ | 
 | 1173 | 	update_curr(cfs_rq); | 
 | 1174 |  | 
| Ingo Molnar | 19b6a2e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1175 | 	update_stats_dequeue(cfs_rq, se); | 
| Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1176 | 	if (flags & DEQUEUE_SLEEP) { | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 1177 | #ifdef CONFIG_SCHEDSTATS | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1178 | 		if (entity_is_task(se)) { | 
 | 1179 | 			struct task_struct *tsk = task_of(se); | 
 | 1180 |  | 
 | 1181 | 			if (tsk->state & TASK_INTERRUPTIBLE) | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 1182 | 				se->statistics.sleep_start = rq_of(cfs_rq)->clock; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1183 | 			if (tsk->state & TASK_UNINTERRUPTIBLE) | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 1184 | 				se->statistics.block_start = rq_of(cfs_rq)->clock; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1185 | 		} | 
| Dmitry Adamushko | db36cc7 | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 1186 | #endif | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 1187 | 	} | 
 | 1188 |  | 
| Peter Zijlstra | 2002c69 | 2008-11-11 11:52:33 +0100 | [diff] [blame] | 1189 | 	clear_buddies(cfs_rq, se); | 
| Peter Zijlstra | 4793241 | 2008-11-04 21:25:09 +0100 | [diff] [blame] | 1190 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1191 | 	if (se != cfs_rq->curr) | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 1192 | 		__dequeue_entity(cfs_rq, se); | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 1193 | 	se->on_rq = 0; | 
| Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 1194 | 	update_cfs_load(cfs_rq, 0); | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 1195 | 	account_entity_dequeue(cfs_rq, se); | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 1196 |  | 
 | 1197 | 	/* | 
 | 1198 | 	 * Normalize the entity after updating the min_vruntime because the | 
 | 1199 | 	 * update can refer to the ->curr item and we need to reflect this | 
 | 1200 | 	 * movement in our normalized position. | 
 | 1201 | 	 */ | 
| Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1202 | 	if (!(flags & DEQUEUE_SLEEP)) | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 1203 | 		se->vruntime -= cfs_rq->min_vruntime; | 
| Peter Zijlstra | 1e87623 | 2011-05-17 16:21:10 -0700 | [diff] [blame] | 1204 |  | 
| Paul Turner | d8b4986 | 2011-07-21 09:43:41 -0700 | [diff] [blame] | 1205 | 	/* return excess runtime on last dequeue */ | 
 | 1206 | 	return_cfs_rq_runtime(cfs_rq); | 
 | 1207 |  | 
| Peter Zijlstra | 1e87623 | 2011-05-17 16:21:10 -0700 | [diff] [blame] | 1208 | 	update_min_vruntime(cfs_rq); | 
 | 1209 | 	update_cfs_shares(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1210 | } | 
 | 1211 |  | 
 | 1212 | /* | 
 | 1213 |  * Preempt the current task with a newly woken task if needed: | 
 | 1214 |  */ | 
| Peter Zijlstra | 7c92e54 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1215 | static void | 
| Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1216 | check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1217 | { | 
| Peter Zijlstra | 1169783 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1218 | 	unsigned long ideal_runtime, delta_exec; | 
| Wang Xingchao | f4cfb33 | 2011-09-16 13:35:52 -0400 | [diff] [blame] | 1219 | 	struct sched_entity *se; | 
 | 1220 | 	s64 delta; | 
| Peter Zijlstra | 1169783 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1221 |  | 
| Peter Zijlstra | 6d0f0eb | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1222 | 	ideal_runtime = sched_slice(cfs_rq, curr); | 
| Peter Zijlstra | 1169783 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1223 | 	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | 
| Mike Galbraith | a9f3e2b | 2009-01-28 14:51:39 +0100 | [diff] [blame] | 1224 | 	if (delta_exec > ideal_runtime) { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1225 | 		resched_task(rq_of(cfs_rq)->curr); | 
| Mike Galbraith | a9f3e2b | 2009-01-28 14:51:39 +0100 | [diff] [blame] | 1226 | 		/* | 
 | 1227 | 		 * The current task ran long enough, ensure it doesn't get | 
 | 1228 | 		 * re-elected due to buddy favours. | 
 | 1229 | 		 */ | 
 | 1230 | 		clear_buddies(cfs_rq, curr); | 
| Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 1231 | 		return; | 
 | 1232 | 	} | 
 | 1233 |  | 
 | 1234 | 	/* | 
 | 1235 | 	 * Ensure that a task that missed wakeup preemption by a | 
 | 1236 | 	 * narrow margin doesn't have to wait for a full slice. | 
 | 1237 | 	 * This also mitigates buddy induced latencies under load. | 
 | 1238 | 	 */ | 
| Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 1239 | 	if (delta_exec < sysctl_sched_min_granularity) | 
 | 1240 | 		return; | 
 | 1241 |  | 
| Wang Xingchao | f4cfb33 | 2011-09-16 13:35:52 -0400 | [diff] [blame] | 1242 | 	se = __pick_first_entity(cfs_rq); | 
 | 1243 | 	delta = curr->vruntime - se->vruntime; | 
| Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 1244 |  | 
| Wang Xingchao | f4cfb33 | 2011-09-16 13:35:52 -0400 | [diff] [blame] | 1245 | 	if (delta < 0) | 
 | 1246 | 		return; | 
| Mike Galbraith | d7d82944 | 2011-01-05 05:41:17 +0100 | [diff] [blame] | 1247 |  | 
| Wang Xingchao | f4cfb33 | 2011-09-16 13:35:52 -0400 | [diff] [blame] | 1248 | 	if (delta > ideal_runtime) | 
 | 1249 | 		resched_task(rq_of(cfs_rq)->curr); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1250 | } | 
 | 1251 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1252 | static void | 
| Ingo Molnar | 8494f41 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1253 | set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1254 | { | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1255 | 	/* 'current' is not kept within the tree. */ | 
 | 1256 | 	if (se->on_rq) { | 
 | 1257 | 		/* | 
 | 1258 | 		 * Any task has to be enqueued before it get to execute on | 
 | 1259 | 		 * a CPU. So account for the time it spent waiting on the | 
 | 1260 | 		 * runqueue. | 
 | 1261 | 		 */ | 
 | 1262 | 		update_stats_wait_end(cfs_rq, se); | 
 | 1263 | 		__dequeue_entity(cfs_rq, se); | 
 | 1264 | 	} | 
 | 1265 |  | 
| Ingo Molnar | 79303e9 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1266 | 	update_stats_curr_start(cfs_rq, se); | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 1267 | 	cfs_rq->curr = se; | 
| Ingo Molnar | eba1ed4 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 1268 | #ifdef CONFIG_SCHEDSTATS | 
 | 1269 | 	/* | 
 | 1270 | 	 * Track our maximum slice length, if the CPU's load is at | 
 | 1271 | 	 * least twice that of our own weight (i.e. dont track it | 
 | 1272 | 	 * when there are only lesser-weight tasks around): | 
 | 1273 | 	 */ | 
| Dmitry Adamushko | 495eca4 | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 1274 | 	if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 1275 | 		se->statistics.slice_max = max(se->statistics.slice_max, | 
| Ingo Molnar | eba1ed4 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 1276 | 			se->sum_exec_runtime - se->prev_sum_exec_runtime); | 
 | 1277 | 	} | 
 | 1278 | #endif | 
| Peter Zijlstra | 4a55b45 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1279 | 	se->prev_sum_exec_runtime = se->sum_exec_runtime; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1280 | } | 
 | 1281 |  | 
| Peter Zijlstra | 3f3a490 | 2008-10-24 11:06:16 +0200 | [diff] [blame] | 1282 | static int | 
 | 1283 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); | 
 | 1284 |  | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1285 | /* | 
 | 1286 |  * Pick the next process, keeping these things in mind, in this order: | 
 | 1287 |  * 1) keep things fair between processes/task groups | 
 | 1288 |  * 2) pick the "next" process, since someone really wants that to run | 
 | 1289 |  * 3) pick the "last" process, for cache locality | 
 | 1290 |  * 4) do not run the "skip" process, if something else is available | 
 | 1291 |  */ | 
| Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 1292 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 1293 | { | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1294 | 	struct sched_entity *se = __pick_first_entity(cfs_rq); | 
| Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 1295 | 	struct sched_entity *left = se; | 
| Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 1296 |  | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1297 | 	/* | 
 | 1298 | 	 * Avoid running the skip buddy, if running something else can | 
 | 1299 | 	 * be done without getting too unfair. | 
 | 1300 | 	 */ | 
 | 1301 | 	if (cfs_rq->skip == se) { | 
 | 1302 | 		struct sched_entity *second = __pick_next_entity(se); | 
 | 1303 | 		if (second && wakeup_preempt_entity(second, left) < 1) | 
 | 1304 | 			se = second; | 
 | 1305 | 	} | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 1306 |  | 
| Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 1307 | 	/* | 
 | 1308 | 	 * Prefer last buddy, try to return the CPU to a preempted task. | 
 | 1309 | 	 */ | 
 | 1310 | 	if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) | 
 | 1311 | 		se = cfs_rq->last; | 
 | 1312 |  | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1313 | 	/* | 
 | 1314 | 	 * Someone really wants this to run. If it's not unfair, run it. | 
 | 1315 | 	 */ | 
 | 1316 | 	if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) | 
 | 1317 | 		se = cfs_rq->next; | 
 | 1318 |  | 
| Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 1319 | 	clear_buddies(cfs_rq, se); | 
| Peter Zijlstra | 4793241 | 2008-11-04 21:25:09 +0100 | [diff] [blame] | 1320 |  | 
 | 1321 | 	return se; | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 1322 | } | 
 | 1323 |  | 
| Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 1324 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq); | 
 | 1325 |  | 
| Ingo Molnar | ab6cde2 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1326 | static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1327 | { | 
 | 1328 | 	/* | 
 | 1329 | 	 * If still on the runqueue then deactivate_task() | 
 | 1330 | 	 * was not called and update_curr() has to be done: | 
 | 1331 | 	 */ | 
 | 1332 | 	if (prev->on_rq) | 
| Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1333 | 		update_curr(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1334 |  | 
| Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 1335 | 	/* throttle cfs_rqs exceeding runtime */ | 
 | 1336 | 	check_cfs_rq_runtime(cfs_rq); | 
 | 1337 |  | 
| Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 1338 | 	check_spread(cfs_rq, prev); | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 1339 | 	if (prev->on_rq) { | 
| Ingo Molnar | 5870db5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1340 | 		update_stats_wait_start(cfs_rq, prev); | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 1341 | 		/* Put 'current' back into the tree. */ | 
 | 1342 | 		__enqueue_entity(cfs_rq, prev); | 
 | 1343 | 	} | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 1344 | 	cfs_rq->curr = NULL; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1345 | } | 
 | 1346 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1347 | static void | 
 | 1348 | entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1349 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1350 | 	/* | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 1351 | 	 * Update run-time statistics of the 'current'. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1352 | 	 */ | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 1353 | 	update_curr(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1354 |  | 
| Paul Turner | 43365bd | 2010-12-15 19:10:17 -0800 | [diff] [blame] | 1355 | 	/* | 
 | 1356 | 	 * Update share accounting for long-running entities. | 
 | 1357 | 	 */ | 
 | 1358 | 	update_entity_shares_tick(cfs_rq); | 
 | 1359 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1360 | #ifdef CONFIG_SCHED_HRTICK | 
 | 1361 | 	/* | 
 | 1362 | 	 * queued ticks are scheduled to match the slice, so don't bother | 
 | 1363 | 	 * validating it and just reschedule. | 
 | 1364 | 	 */ | 
| Harvey Harrison | 983ed7a | 2008-04-24 18:17:55 -0700 | [diff] [blame] | 1365 | 	if (queued) { | 
 | 1366 | 		resched_task(rq_of(cfs_rq)->curr); | 
 | 1367 | 		return; | 
 | 1368 | 	} | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1369 | 	/* | 
 | 1370 | 	 * don't let the period tick interfere with the hrtick preemption | 
 | 1371 | 	 */ | 
 | 1372 | 	if (!sched_feat(DOUBLE_TICK) && | 
 | 1373 | 			hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) | 
 | 1374 | 		return; | 
 | 1375 | #endif | 
 | 1376 |  | 
| Yong Zhang | 2c2efae | 2011-07-29 16:20:33 +0800 | [diff] [blame] | 1377 | 	if (cfs_rq->nr_running > 1) | 
| Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1378 | 		check_preempt_tick(cfs_rq, curr); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1379 | } | 
 | 1380 |  | 
| Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 1381 |  | 
 | 1382 | /************************************************** | 
 | 1383 |  * CFS bandwidth control machinery | 
 | 1384 |  */ | 
 | 1385 |  | 
 | 1386 | #ifdef CONFIG_CFS_BANDWIDTH | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1387 |  | 
 | 1388 | #ifdef HAVE_JUMP_LABEL | 
| Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1389 | static struct static_key __cfs_bandwidth_used; | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1390 |  | 
 | 1391 | static inline bool cfs_bandwidth_used(void) | 
 | 1392 | { | 
| Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1393 | 	return static_key_false(&__cfs_bandwidth_used); | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1394 | } | 
 | 1395 |  | 
 | 1396 | void account_cfs_bandwidth_used(int enabled, int was_enabled) | 
 | 1397 | { | 
 | 1398 | 	/* only need to count groups transitioning between enabled/!enabled */ | 
 | 1399 | 	if (enabled && !was_enabled) | 
| Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1400 | 		static_key_slow_inc(&__cfs_bandwidth_used); | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1401 | 	else if (!enabled && was_enabled) | 
| Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1402 | 		static_key_slow_dec(&__cfs_bandwidth_used); | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1403 | } | 
 | 1404 | #else /* HAVE_JUMP_LABEL */ | 
 | 1405 | static bool cfs_bandwidth_used(void) | 
 | 1406 | { | 
 | 1407 | 	return true; | 
 | 1408 | } | 
 | 1409 |  | 
 | 1410 | void account_cfs_bandwidth_used(int enabled, int was_enabled) {} | 
 | 1411 | #endif /* HAVE_JUMP_LABEL */ | 
 | 1412 |  | 
| Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 1413 | /* | 
 | 1414 |  * default period for cfs group bandwidth. | 
 | 1415 |  * default: 0.1s, units: nanoseconds | 
 | 1416 |  */ | 
 | 1417 | static inline u64 default_cfs_period(void) | 
 | 1418 | { | 
 | 1419 | 	return 100000000ULL; | 
 | 1420 | } | 
| Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 1421 |  | 
 | 1422 | static inline u64 sched_cfs_bandwidth_slice(void) | 
 | 1423 | { | 
 | 1424 | 	return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC; | 
 | 1425 | } | 
 | 1426 |  | 
| Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 1427 | /* | 
 | 1428 |  * Replenish runtime according to assigned quota and update expiration time. | 
 | 1429 |  * We use sched_clock_cpu directly instead of rq->clock to avoid adding | 
 | 1430 |  * additional synchronization around rq->lock. | 
 | 1431 |  * | 
 | 1432 |  * requires cfs_b->lock | 
 | 1433 |  */ | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1434 | void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) | 
| Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 1435 | { | 
 | 1436 | 	u64 now; | 
 | 1437 |  | 
 | 1438 | 	if (cfs_b->quota == RUNTIME_INF) | 
 | 1439 | 		return; | 
 | 1440 |  | 
 | 1441 | 	now = sched_clock_cpu(smp_processor_id()); | 
 | 1442 | 	cfs_b->runtime = cfs_b->quota; | 
 | 1443 | 	cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period); | 
 | 1444 | } | 
 | 1445 |  | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1446 | static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) | 
 | 1447 | { | 
 | 1448 | 	return &tg->cfs_bandwidth; | 
 | 1449 | } | 
 | 1450 |  | 
| Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 1451 | /* returns 0 on failure to allocate runtime */ | 
 | 1452 | static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) | 
| Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 1453 | { | 
 | 1454 | 	struct task_group *tg = cfs_rq->tg; | 
 | 1455 | 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); | 
| Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 1456 | 	u64 amount = 0, min_amount, expires; | 
| Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 1457 |  | 
 | 1458 | 	/* note: this is a positive sum as runtime_remaining <= 0 */ | 
 | 1459 | 	min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; | 
 | 1460 |  | 
 | 1461 | 	raw_spin_lock(&cfs_b->lock); | 
 | 1462 | 	if (cfs_b->quota == RUNTIME_INF) | 
 | 1463 | 		amount = min_amount; | 
| Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 1464 | 	else { | 
| Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 1465 | 		/* | 
 | 1466 | 		 * If the bandwidth pool has become inactive, then at least one | 
 | 1467 | 		 * period must have elapsed since the last consumption. | 
 | 1468 | 		 * Refresh the global state and ensure bandwidth timer becomes | 
 | 1469 | 		 * active. | 
 | 1470 | 		 */ | 
 | 1471 | 		if (!cfs_b->timer_active) { | 
 | 1472 | 			__refill_cfs_bandwidth_runtime(cfs_b); | 
| Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 1473 | 			__start_cfs_bandwidth(cfs_b); | 
| Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 1474 | 		} | 
| Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 1475 |  | 
 | 1476 | 		if (cfs_b->runtime > 0) { | 
 | 1477 | 			amount = min(cfs_b->runtime, min_amount); | 
 | 1478 | 			cfs_b->runtime -= amount; | 
 | 1479 | 			cfs_b->idle = 0; | 
 | 1480 | 		} | 
| Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 1481 | 	} | 
| Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 1482 | 	expires = cfs_b->runtime_expires; | 
| Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 1483 | 	raw_spin_unlock(&cfs_b->lock); | 
 | 1484 |  | 
 | 1485 | 	cfs_rq->runtime_remaining += amount; | 
| Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 1486 | 	/* | 
 | 1487 | 	 * we may have advanced our local expiration to account for allowed | 
 | 1488 | 	 * spread between our sched_clock and the one on which runtime was | 
 | 1489 | 	 * issued. | 
 | 1490 | 	 */ | 
 | 1491 | 	if ((s64)(expires - cfs_rq->runtime_expires) > 0) | 
 | 1492 | 		cfs_rq->runtime_expires = expires; | 
| Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 1493 |  | 
 | 1494 | 	return cfs_rq->runtime_remaining > 0; | 
| Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 1495 | } | 
 | 1496 |  | 
 | 1497 | /* | 
 | 1498 |  * Note: This depends on the synchronization provided by sched_clock and the | 
 | 1499 |  * fact that rq->clock snapshots this value. | 
 | 1500 |  */ | 
 | 1501 | static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) | 
 | 1502 | { | 
 | 1503 | 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); | 
 | 1504 | 	struct rq *rq = rq_of(cfs_rq); | 
 | 1505 |  | 
 | 1506 | 	/* if the deadline is ahead of our clock, nothing to do */ | 
 | 1507 | 	if (likely((s64)(rq->clock - cfs_rq->runtime_expires) < 0)) | 
 | 1508 | 		return; | 
 | 1509 |  | 
 | 1510 | 	if (cfs_rq->runtime_remaining < 0) | 
 | 1511 | 		return; | 
 | 1512 |  | 
 | 1513 | 	/* | 
 | 1514 | 	 * If the local deadline has passed we have to consider the | 
 | 1515 | 	 * possibility that our sched_clock is 'fast' and the global deadline | 
 | 1516 | 	 * has not truly expired. | 
 | 1517 | 	 * | 
 | 1518 | 	 * Fortunately we can check determine whether this the case by checking | 
 | 1519 | 	 * whether the global deadline has advanced. | 
 | 1520 | 	 */ | 
 | 1521 |  | 
 | 1522 | 	if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) { | 
 | 1523 | 		/* extend local deadline, drift is bounded above by 2 ticks */ | 
 | 1524 | 		cfs_rq->runtime_expires += TICK_NSEC; | 
 | 1525 | 	} else { | 
 | 1526 | 		/* global deadline is ahead, expiration has passed */ | 
 | 1527 | 		cfs_rq->runtime_remaining = 0; | 
 | 1528 | 	} | 
| Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 1529 | } | 
 | 1530 |  | 
 | 1531 | static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | 
 | 1532 | 				     unsigned long delta_exec) | 
 | 1533 | { | 
| Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 1534 | 	/* dock delta_exec before expiring quota (as it could span periods) */ | 
| Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 1535 | 	cfs_rq->runtime_remaining -= delta_exec; | 
| Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 1536 | 	expire_cfs_rq_runtime(cfs_rq); | 
 | 1537 |  | 
 | 1538 | 	if (likely(cfs_rq->runtime_remaining > 0)) | 
| Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 1539 | 		return; | 
 | 1540 |  | 
| Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 1541 | 	/* | 
 | 1542 | 	 * if we're unable to extend our runtime we resched so that the active | 
 | 1543 | 	 * hierarchy can be throttled | 
 | 1544 | 	 */ | 
 | 1545 | 	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) | 
 | 1546 | 		resched_task(rq_of(cfs_rq)->curr); | 
| Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 1547 | } | 
 | 1548 |  | 
 | 1549 | static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | 
 | 1550 | 						   unsigned long delta_exec) | 
 | 1551 | { | 
| Paul Turner | 56f570e | 2011-11-07 20:26:33 -0800 | [diff] [blame] | 1552 | 	if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) | 
| Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 1553 | 		return; | 
 | 1554 |  | 
 | 1555 | 	__account_cfs_rq_runtime(cfs_rq, delta_exec); | 
 | 1556 | } | 
 | 1557 |  | 
| Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 1558 | static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) | 
 | 1559 | { | 
| Paul Turner | 56f570e | 2011-11-07 20:26:33 -0800 | [diff] [blame] | 1560 | 	return cfs_bandwidth_used() && cfs_rq->throttled; | 
| Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 1561 | } | 
 | 1562 |  | 
| Paul Turner | 64660c8 | 2011-07-21 09:43:36 -0700 | [diff] [blame] | 1563 | /* check whether cfs_rq, or any parent, is throttled */ | 
 | 1564 | static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) | 
 | 1565 | { | 
| Paul Turner | 56f570e | 2011-11-07 20:26:33 -0800 | [diff] [blame] | 1566 | 	return cfs_bandwidth_used() && cfs_rq->throttle_count; | 
| Paul Turner | 64660c8 | 2011-07-21 09:43:36 -0700 | [diff] [blame] | 1567 | } | 
 | 1568 |  | 
 | 1569 | /* | 
 | 1570 |  * Ensure that neither of the group entities corresponding to src_cpu or | 
 | 1571 |  * dest_cpu are members of a throttled hierarchy when performing group | 
 | 1572 |  * load-balance operations. | 
 | 1573 |  */ | 
 | 1574 | static inline int throttled_lb_pair(struct task_group *tg, | 
 | 1575 | 				    int src_cpu, int dest_cpu) | 
 | 1576 | { | 
 | 1577 | 	struct cfs_rq *src_cfs_rq, *dest_cfs_rq; | 
 | 1578 |  | 
 | 1579 | 	src_cfs_rq = tg->cfs_rq[src_cpu]; | 
 | 1580 | 	dest_cfs_rq = tg->cfs_rq[dest_cpu]; | 
 | 1581 |  | 
 | 1582 | 	return throttled_hierarchy(src_cfs_rq) || | 
 | 1583 | 	       throttled_hierarchy(dest_cfs_rq); | 
 | 1584 | } | 
 | 1585 |  | 
 | 1586 | /* updated child weight may affect parent so we have to do this bottom up */ | 
 | 1587 | static int tg_unthrottle_up(struct task_group *tg, void *data) | 
 | 1588 | { | 
 | 1589 | 	struct rq *rq = data; | 
 | 1590 | 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; | 
 | 1591 |  | 
 | 1592 | 	cfs_rq->throttle_count--; | 
 | 1593 | #ifdef CONFIG_SMP | 
 | 1594 | 	if (!cfs_rq->throttle_count) { | 
 | 1595 | 		u64 delta = rq->clock_task - cfs_rq->load_stamp; | 
 | 1596 |  | 
 | 1597 | 		/* leaving throttled state, advance shares averaging windows */ | 
 | 1598 | 		cfs_rq->load_stamp += delta; | 
 | 1599 | 		cfs_rq->load_last += delta; | 
 | 1600 |  | 
 | 1601 | 		/* update entity weight now that we are on_rq again */ | 
 | 1602 | 		update_cfs_shares(cfs_rq); | 
 | 1603 | 	} | 
 | 1604 | #endif | 
 | 1605 |  | 
 | 1606 | 	return 0; | 
 | 1607 | } | 
 | 1608 |  | 
 | 1609 | static int tg_throttle_down(struct task_group *tg, void *data) | 
 | 1610 | { | 
 | 1611 | 	struct rq *rq = data; | 
 | 1612 | 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; | 
 | 1613 |  | 
 | 1614 | 	/* group is entering throttled state, record last load */ | 
 | 1615 | 	if (!cfs_rq->throttle_count) | 
 | 1616 | 		update_cfs_load(cfs_rq, 0); | 
 | 1617 | 	cfs_rq->throttle_count++; | 
 | 1618 |  | 
 | 1619 | 	return 0; | 
 | 1620 | } | 
 | 1621 |  | 
| Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 1622 | static void throttle_cfs_rq(struct cfs_rq *cfs_rq) | 
| Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 1623 | { | 
 | 1624 | 	struct rq *rq = rq_of(cfs_rq); | 
 | 1625 | 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); | 
 | 1626 | 	struct sched_entity *se; | 
 | 1627 | 	long task_delta, dequeue = 1; | 
 | 1628 |  | 
 | 1629 | 	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; | 
 | 1630 |  | 
 | 1631 | 	/* account load preceding throttle */ | 
| Paul Turner | 64660c8 | 2011-07-21 09:43:36 -0700 | [diff] [blame] | 1632 | 	rcu_read_lock(); | 
 | 1633 | 	walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); | 
 | 1634 | 	rcu_read_unlock(); | 
| Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 1635 |  | 
 | 1636 | 	task_delta = cfs_rq->h_nr_running; | 
 | 1637 | 	for_each_sched_entity(se) { | 
 | 1638 | 		struct cfs_rq *qcfs_rq = cfs_rq_of(se); | 
 | 1639 | 		/* throttled entity or throttle-on-deactivate */ | 
 | 1640 | 		if (!se->on_rq) | 
 | 1641 | 			break; | 
 | 1642 |  | 
 | 1643 | 		if (dequeue) | 
 | 1644 | 			dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); | 
 | 1645 | 		qcfs_rq->h_nr_running -= task_delta; | 
 | 1646 |  | 
 | 1647 | 		if (qcfs_rq->load.weight) | 
 | 1648 | 			dequeue = 0; | 
 | 1649 | 	} | 
 | 1650 |  | 
 | 1651 | 	if (!se) | 
 | 1652 | 		rq->nr_running -= task_delta; | 
 | 1653 |  | 
 | 1654 | 	cfs_rq->throttled = 1; | 
| Nikhil Rao | e8da1b1 | 2011-07-21 09:43:40 -0700 | [diff] [blame] | 1655 | 	cfs_rq->throttled_timestamp = rq->clock; | 
| Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 1656 | 	raw_spin_lock(&cfs_b->lock); | 
 | 1657 | 	list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); | 
 | 1658 | 	raw_spin_unlock(&cfs_b->lock); | 
 | 1659 | } | 
 | 1660 |  | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1661 | void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) | 
| Paul Turner | 671fd9d | 2011-07-21 09:43:34 -0700 | [diff] [blame] | 1662 | { | 
 | 1663 | 	struct rq *rq = rq_of(cfs_rq); | 
 | 1664 | 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); | 
 | 1665 | 	struct sched_entity *se; | 
 | 1666 | 	int enqueue = 1; | 
 | 1667 | 	long task_delta; | 
 | 1668 |  | 
 | 1669 | 	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; | 
 | 1670 |  | 
 | 1671 | 	cfs_rq->throttled = 0; | 
 | 1672 | 	raw_spin_lock(&cfs_b->lock); | 
| Nikhil Rao | e8da1b1 | 2011-07-21 09:43:40 -0700 | [diff] [blame] | 1673 | 	cfs_b->throttled_time += rq->clock - cfs_rq->throttled_timestamp; | 
| Paul Turner | 671fd9d | 2011-07-21 09:43:34 -0700 | [diff] [blame] | 1674 | 	list_del_rcu(&cfs_rq->throttled_list); | 
 | 1675 | 	raw_spin_unlock(&cfs_b->lock); | 
| Nikhil Rao | e8da1b1 | 2011-07-21 09:43:40 -0700 | [diff] [blame] | 1676 | 	cfs_rq->throttled_timestamp = 0; | 
| Paul Turner | 671fd9d | 2011-07-21 09:43:34 -0700 | [diff] [blame] | 1677 |  | 
| Paul Turner | 64660c8 | 2011-07-21 09:43:36 -0700 | [diff] [blame] | 1678 | 	update_rq_clock(rq); | 
 | 1679 | 	/* update hierarchical throttle state */ | 
 | 1680 | 	walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); | 
 | 1681 |  | 
| Paul Turner | 671fd9d | 2011-07-21 09:43:34 -0700 | [diff] [blame] | 1682 | 	if (!cfs_rq->load.weight) | 
 | 1683 | 		return; | 
 | 1684 |  | 
 | 1685 | 	task_delta = cfs_rq->h_nr_running; | 
 | 1686 | 	for_each_sched_entity(se) { | 
 | 1687 | 		if (se->on_rq) | 
 | 1688 | 			enqueue = 0; | 
 | 1689 |  | 
 | 1690 | 		cfs_rq = cfs_rq_of(se); | 
 | 1691 | 		if (enqueue) | 
 | 1692 | 			enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); | 
 | 1693 | 		cfs_rq->h_nr_running += task_delta; | 
 | 1694 |  | 
 | 1695 | 		if (cfs_rq_throttled(cfs_rq)) | 
 | 1696 | 			break; | 
 | 1697 | 	} | 
 | 1698 |  | 
 | 1699 | 	if (!se) | 
 | 1700 | 		rq->nr_running += task_delta; | 
 | 1701 |  | 
 | 1702 | 	/* determine whether we need to wake up potentially idle cpu */ | 
 | 1703 | 	if (rq->curr == rq->idle && rq->cfs.nr_running) | 
 | 1704 | 		resched_task(rq->curr); | 
 | 1705 | } | 
 | 1706 |  | 
 | 1707 | static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, | 
 | 1708 | 		u64 remaining, u64 expires) | 
 | 1709 | { | 
 | 1710 | 	struct cfs_rq *cfs_rq; | 
 | 1711 | 	u64 runtime = remaining; | 
 | 1712 |  | 
 | 1713 | 	rcu_read_lock(); | 
 | 1714 | 	list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, | 
 | 1715 | 				throttled_list) { | 
 | 1716 | 		struct rq *rq = rq_of(cfs_rq); | 
 | 1717 |  | 
 | 1718 | 		raw_spin_lock(&rq->lock); | 
 | 1719 | 		if (!cfs_rq_throttled(cfs_rq)) | 
 | 1720 | 			goto next; | 
 | 1721 |  | 
 | 1722 | 		runtime = -cfs_rq->runtime_remaining + 1; | 
 | 1723 | 		if (runtime > remaining) | 
 | 1724 | 			runtime = remaining; | 
 | 1725 | 		remaining -= runtime; | 
 | 1726 |  | 
 | 1727 | 		cfs_rq->runtime_remaining += runtime; | 
 | 1728 | 		cfs_rq->runtime_expires = expires; | 
 | 1729 |  | 
 | 1730 | 		/* we check whether we're throttled above */ | 
 | 1731 | 		if (cfs_rq->runtime_remaining > 0) | 
 | 1732 | 			unthrottle_cfs_rq(cfs_rq); | 
 | 1733 |  | 
 | 1734 | next: | 
 | 1735 | 		raw_spin_unlock(&rq->lock); | 
 | 1736 |  | 
 | 1737 | 		if (!remaining) | 
 | 1738 | 			break; | 
 | 1739 | 	} | 
 | 1740 | 	rcu_read_unlock(); | 
 | 1741 |  | 
 | 1742 | 	return remaining; | 
 | 1743 | } | 
 | 1744 |  | 
| Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 1745 | /* | 
 | 1746 |  * Responsible for refilling a task_group's bandwidth and unthrottling its | 
 | 1747 |  * cfs_rqs as appropriate. If there has been no activity within the last | 
 | 1748 |  * period the timer is deactivated until scheduling resumes; cfs_b->idle is | 
 | 1749 |  * used to track this state. | 
 | 1750 |  */ | 
 | 1751 | static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) | 
 | 1752 | { | 
| Paul Turner | 671fd9d | 2011-07-21 09:43:34 -0700 | [diff] [blame] | 1753 | 	u64 runtime, runtime_expires; | 
 | 1754 | 	int idle = 1, throttled; | 
| Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 1755 |  | 
 | 1756 | 	raw_spin_lock(&cfs_b->lock); | 
 | 1757 | 	/* no need to continue the timer with no bandwidth constraint */ | 
 | 1758 | 	if (cfs_b->quota == RUNTIME_INF) | 
 | 1759 | 		goto out_unlock; | 
 | 1760 |  | 
| Paul Turner | 671fd9d | 2011-07-21 09:43:34 -0700 | [diff] [blame] | 1761 | 	throttled = !list_empty(&cfs_b->throttled_cfs_rq); | 
 | 1762 | 	/* idle depends on !throttled (for the case of a large deficit) */ | 
 | 1763 | 	idle = cfs_b->idle && !throttled; | 
| Nikhil Rao | e8da1b1 | 2011-07-21 09:43:40 -0700 | [diff] [blame] | 1764 | 	cfs_b->nr_periods += overrun; | 
| Paul Turner | 671fd9d | 2011-07-21 09:43:34 -0700 | [diff] [blame] | 1765 |  | 
| Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 1766 | 	/* if we're going inactive then everything else can be deferred */ | 
 | 1767 | 	if (idle) | 
 | 1768 | 		goto out_unlock; | 
 | 1769 |  | 
 | 1770 | 	__refill_cfs_bandwidth_runtime(cfs_b); | 
 | 1771 |  | 
| Paul Turner | 671fd9d | 2011-07-21 09:43:34 -0700 | [diff] [blame] | 1772 | 	if (!throttled) { | 
 | 1773 | 		/* mark as potentially idle for the upcoming period */ | 
 | 1774 | 		cfs_b->idle = 1; | 
 | 1775 | 		goto out_unlock; | 
 | 1776 | 	} | 
| Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 1777 |  | 
| Nikhil Rao | e8da1b1 | 2011-07-21 09:43:40 -0700 | [diff] [blame] | 1778 | 	/* account preceding periods in which throttling occurred */ | 
 | 1779 | 	cfs_b->nr_throttled += overrun; | 
 | 1780 |  | 
| Paul Turner | 671fd9d | 2011-07-21 09:43:34 -0700 | [diff] [blame] | 1781 | 	/* | 
 | 1782 | 	 * There are throttled entities so we must first use the new bandwidth | 
 | 1783 | 	 * to unthrottle them before making it generally available.  This | 
 | 1784 | 	 * ensures that all existing debts will be paid before a new cfs_rq is | 
 | 1785 | 	 * allowed to run. | 
 | 1786 | 	 */ | 
 | 1787 | 	runtime = cfs_b->runtime; | 
 | 1788 | 	runtime_expires = cfs_b->runtime_expires; | 
 | 1789 | 	cfs_b->runtime = 0; | 
 | 1790 |  | 
 | 1791 | 	/* | 
 | 1792 | 	 * This check is repeated as we are holding onto the new bandwidth | 
 | 1793 | 	 * while we unthrottle.  This can potentially race with an unthrottled | 
 | 1794 | 	 * group trying to acquire new bandwidth from the global pool. | 
 | 1795 | 	 */ | 
 | 1796 | 	while (throttled && runtime > 0) { | 
 | 1797 | 		raw_spin_unlock(&cfs_b->lock); | 
 | 1798 | 		/* we can't nest cfs_b->lock while distributing bandwidth */ | 
 | 1799 | 		runtime = distribute_cfs_runtime(cfs_b, runtime, | 
 | 1800 | 						 runtime_expires); | 
 | 1801 | 		raw_spin_lock(&cfs_b->lock); | 
 | 1802 |  | 
 | 1803 | 		throttled = !list_empty(&cfs_b->throttled_cfs_rq); | 
 | 1804 | 	} | 
 | 1805 |  | 
 | 1806 | 	/* return (any) remaining runtime */ | 
 | 1807 | 	cfs_b->runtime = runtime; | 
 | 1808 | 	/* | 
 | 1809 | 	 * While we are ensured activity in the period following an | 
 | 1810 | 	 * unthrottle, this also covers the case in which the new bandwidth is | 
 | 1811 | 	 * insufficient to cover the existing bandwidth deficit.  (Forcing the | 
 | 1812 | 	 * timer to remain active while there are any throttled entities.) | 
 | 1813 | 	 */ | 
 | 1814 | 	cfs_b->idle = 0; | 
| Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 1815 | out_unlock: | 
 | 1816 | 	if (idle) | 
 | 1817 | 		cfs_b->timer_active = 0; | 
 | 1818 | 	raw_spin_unlock(&cfs_b->lock); | 
 | 1819 |  | 
 | 1820 | 	return idle; | 
 | 1821 | } | 
| Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 1822 |  | 
| Paul Turner | d8b4986 | 2011-07-21 09:43:41 -0700 | [diff] [blame] | 1823 | /* a cfs_rq won't donate quota below this amount */ | 
 | 1824 | static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC; | 
 | 1825 | /* minimum remaining period time to redistribute slack quota */ | 
 | 1826 | static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC; | 
 | 1827 | /* how long we wait to gather additional slack before distributing */ | 
 | 1828 | static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC; | 
 | 1829 |  | 
 | 1830 | /* are we near the end of the current quota period? */ | 
 | 1831 | static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) | 
 | 1832 | { | 
 | 1833 | 	struct hrtimer *refresh_timer = &cfs_b->period_timer; | 
 | 1834 | 	u64 remaining; | 
 | 1835 |  | 
 | 1836 | 	/* if the call-back is running a quota refresh is already occurring */ | 
 | 1837 | 	if (hrtimer_callback_running(refresh_timer)) | 
 | 1838 | 		return 1; | 
 | 1839 |  | 
 | 1840 | 	/* is a quota refresh about to occur? */ | 
 | 1841 | 	remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer)); | 
 | 1842 | 	if (remaining < min_expire) | 
 | 1843 | 		return 1; | 
 | 1844 |  | 
 | 1845 | 	return 0; | 
 | 1846 | } | 
 | 1847 |  | 
 | 1848 | static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b) | 
 | 1849 | { | 
 | 1850 | 	u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration; | 
 | 1851 |  | 
 | 1852 | 	/* if there's a quota refresh soon don't bother with slack */ | 
 | 1853 | 	if (runtime_refresh_within(cfs_b, min_left)) | 
 | 1854 | 		return; | 
 | 1855 |  | 
 | 1856 | 	start_bandwidth_timer(&cfs_b->slack_timer, | 
 | 1857 | 				ns_to_ktime(cfs_bandwidth_slack_period)); | 
 | 1858 | } | 
 | 1859 |  | 
 | 1860 | /* we know any runtime found here is valid as update_curr() precedes return */ | 
 | 1861 | static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) | 
 | 1862 | { | 
 | 1863 | 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); | 
 | 1864 | 	s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; | 
 | 1865 |  | 
 | 1866 | 	if (slack_runtime <= 0) | 
 | 1867 | 		return; | 
 | 1868 |  | 
 | 1869 | 	raw_spin_lock(&cfs_b->lock); | 
 | 1870 | 	if (cfs_b->quota != RUNTIME_INF && | 
 | 1871 | 	    cfs_rq->runtime_expires == cfs_b->runtime_expires) { | 
 | 1872 | 		cfs_b->runtime += slack_runtime; | 
 | 1873 |  | 
 | 1874 | 		/* we are under rq->lock, defer unthrottling using a timer */ | 
 | 1875 | 		if (cfs_b->runtime > sched_cfs_bandwidth_slice() && | 
 | 1876 | 		    !list_empty(&cfs_b->throttled_cfs_rq)) | 
 | 1877 | 			start_cfs_slack_bandwidth(cfs_b); | 
 | 1878 | 	} | 
 | 1879 | 	raw_spin_unlock(&cfs_b->lock); | 
 | 1880 |  | 
 | 1881 | 	/* even if it's not valid for return we don't want to try again */ | 
 | 1882 | 	cfs_rq->runtime_remaining -= slack_runtime; | 
 | 1883 | } | 
 | 1884 |  | 
 | 1885 | static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) | 
 | 1886 | { | 
| Paul Turner | 56f570e | 2011-11-07 20:26:33 -0800 | [diff] [blame] | 1887 | 	if (!cfs_bandwidth_used()) | 
 | 1888 | 		return; | 
 | 1889 |  | 
| Paul Turner | fccfdc6 | 2011-11-07 20:26:34 -0800 | [diff] [blame] | 1890 | 	if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) | 
| Paul Turner | d8b4986 | 2011-07-21 09:43:41 -0700 | [diff] [blame] | 1891 | 		return; | 
 | 1892 |  | 
 | 1893 | 	__return_cfs_rq_runtime(cfs_rq); | 
 | 1894 | } | 
 | 1895 |  | 
 | 1896 | /* | 
 | 1897 |  * This is done with a timer (instead of inline with bandwidth return) since | 
 | 1898 |  * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs. | 
 | 1899 |  */ | 
 | 1900 | static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) | 
 | 1901 | { | 
 | 1902 | 	u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); | 
 | 1903 | 	u64 expires; | 
 | 1904 |  | 
 | 1905 | 	/* confirm we're still not at a refresh boundary */ | 
 | 1906 | 	if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) | 
 | 1907 | 		return; | 
 | 1908 |  | 
 | 1909 | 	raw_spin_lock(&cfs_b->lock); | 
 | 1910 | 	if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) { | 
 | 1911 | 		runtime = cfs_b->runtime; | 
 | 1912 | 		cfs_b->runtime = 0; | 
 | 1913 | 	} | 
 | 1914 | 	expires = cfs_b->runtime_expires; | 
 | 1915 | 	raw_spin_unlock(&cfs_b->lock); | 
 | 1916 |  | 
 | 1917 | 	if (!runtime) | 
 | 1918 | 		return; | 
 | 1919 |  | 
 | 1920 | 	runtime = distribute_cfs_runtime(cfs_b, runtime, expires); | 
 | 1921 |  | 
 | 1922 | 	raw_spin_lock(&cfs_b->lock); | 
 | 1923 | 	if (expires == cfs_b->runtime_expires) | 
 | 1924 | 		cfs_b->runtime = runtime; | 
 | 1925 | 	raw_spin_unlock(&cfs_b->lock); | 
 | 1926 | } | 
 | 1927 |  | 
| Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 1928 | /* | 
 | 1929 |  * When a group wakes up we want to make sure that its quota is not already | 
 | 1930 |  * expired/exceeded, otherwise it may be allowed to steal additional ticks of | 
 | 1931 |  * runtime as update_curr() throttling can not not trigger until it's on-rq. | 
 | 1932 |  */ | 
 | 1933 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq) | 
 | 1934 | { | 
| Paul Turner | 56f570e | 2011-11-07 20:26:33 -0800 | [diff] [blame] | 1935 | 	if (!cfs_bandwidth_used()) | 
 | 1936 | 		return; | 
 | 1937 |  | 
| Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 1938 | 	/* an active group must be handled by the update_curr()->put() path */ | 
 | 1939 | 	if (!cfs_rq->runtime_enabled || cfs_rq->curr) | 
 | 1940 | 		return; | 
 | 1941 |  | 
 | 1942 | 	/* ensure the group is not already throttled */ | 
 | 1943 | 	if (cfs_rq_throttled(cfs_rq)) | 
 | 1944 | 		return; | 
 | 1945 |  | 
 | 1946 | 	/* update runtime allocation */ | 
 | 1947 | 	account_cfs_rq_runtime(cfs_rq, 0); | 
 | 1948 | 	if (cfs_rq->runtime_remaining <= 0) | 
 | 1949 | 		throttle_cfs_rq(cfs_rq); | 
 | 1950 | } | 
 | 1951 |  | 
 | 1952 | /* conditionally throttle active cfs_rq's from put_prev_entity() */ | 
 | 1953 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) | 
 | 1954 | { | 
| Paul Turner | 56f570e | 2011-11-07 20:26:33 -0800 | [diff] [blame] | 1955 | 	if (!cfs_bandwidth_used()) | 
 | 1956 | 		return; | 
 | 1957 |  | 
| Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 1958 | 	if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) | 
 | 1959 | 		return; | 
 | 1960 |  | 
 | 1961 | 	/* | 
 | 1962 | 	 * it's possible for a throttled entity to be forced into a running | 
 | 1963 | 	 * state (e.g. set_curr_task), in this case we're finished. | 
 | 1964 | 	 */ | 
 | 1965 | 	if (cfs_rq_throttled(cfs_rq)) | 
 | 1966 | 		return; | 
 | 1967 |  | 
 | 1968 | 	throttle_cfs_rq(cfs_rq); | 
 | 1969 | } | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1970 |  | 
 | 1971 | static inline u64 default_cfs_period(void); | 
 | 1972 | static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun); | 
 | 1973 | static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b); | 
 | 1974 |  | 
 | 1975 | static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) | 
 | 1976 | { | 
 | 1977 | 	struct cfs_bandwidth *cfs_b = | 
 | 1978 | 		container_of(timer, struct cfs_bandwidth, slack_timer); | 
 | 1979 | 	do_sched_cfs_slack_timer(cfs_b); | 
 | 1980 |  | 
 | 1981 | 	return HRTIMER_NORESTART; | 
 | 1982 | } | 
 | 1983 |  | 
 | 1984 | static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) | 
 | 1985 | { | 
 | 1986 | 	struct cfs_bandwidth *cfs_b = | 
 | 1987 | 		container_of(timer, struct cfs_bandwidth, period_timer); | 
 | 1988 | 	ktime_t now; | 
 | 1989 | 	int overrun; | 
 | 1990 | 	int idle = 0; | 
 | 1991 |  | 
 | 1992 | 	for (;;) { | 
 | 1993 | 		now = hrtimer_cb_get_time(timer); | 
 | 1994 | 		overrun = hrtimer_forward(timer, now, cfs_b->period); | 
 | 1995 |  | 
 | 1996 | 		if (!overrun) | 
 | 1997 | 			break; | 
 | 1998 |  | 
 | 1999 | 		idle = do_sched_cfs_period_timer(cfs_b, overrun); | 
 | 2000 | 	} | 
 | 2001 |  | 
 | 2002 | 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; | 
 | 2003 | } | 
 | 2004 |  | 
 | 2005 | void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) | 
 | 2006 | { | 
 | 2007 | 	raw_spin_lock_init(&cfs_b->lock); | 
 | 2008 | 	cfs_b->runtime = 0; | 
 | 2009 | 	cfs_b->quota = RUNTIME_INF; | 
 | 2010 | 	cfs_b->period = ns_to_ktime(default_cfs_period()); | 
 | 2011 |  | 
 | 2012 | 	INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); | 
 | 2013 | 	hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 
 | 2014 | 	cfs_b->period_timer.function = sched_cfs_period_timer; | 
 | 2015 | 	hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 
 | 2016 | 	cfs_b->slack_timer.function = sched_cfs_slack_timer; | 
 | 2017 | } | 
 | 2018 |  | 
 | 2019 | static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) | 
 | 2020 | { | 
 | 2021 | 	cfs_rq->runtime_enabled = 0; | 
 | 2022 | 	INIT_LIST_HEAD(&cfs_rq->throttled_list); | 
 | 2023 | } | 
 | 2024 |  | 
 | 2025 | /* requires cfs_b->lock, may release to reprogram timer */ | 
 | 2026 | void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) | 
 | 2027 | { | 
 | 2028 | 	/* | 
 | 2029 | 	 * The timer may be active because we're trying to set a new bandwidth | 
 | 2030 | 	 * period or because we're racing with the tear-down path | 
 | 2031 | 	 * (timer_active==0 becomes visible before the hrtimer call-back | 
 | 2032 | 	 * terminates).  In either case we ensure that it's re-programmed | 
 | 2033 | 	 */ | 
 | 2034 | 	while (unlikely(hrtimer_active(&cfs_b->period_timer))) { | 
 | 2035 | 		raw_spin_unlock(&cfs_b->lock); | 
 | 2036 | 		/* ensure cfs_b->lock is available while we wait */ | 
 | 2037 | 		hrtimer_cancel(&cfs_b->period_timer); | 
 | 2038 |  | 
 | 2039 | 		raw_spin_lock(&cfs_b->lock); | 
 | 2040 | 		/* if someone else restarted the timer then we're done */ | 
 | 2041 | 		if (cfs_b->timer_active) | 
 | 2042 | 			return; | 
 | 2043 | 	} | 
 | 2044 |  | 
 | 2045 | 	cfs_b->timer_active = 1; | 
 | 2046 | 	start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period); | 
 | 2047 | } | 
 | 2048 |  | 
 | 2049 | static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) | 
 | 2050 | { | 
 | 2051 | 	hrtimer_cancel(&cfs_b->period_timer); | 
 | 2052 | 	hrtimer_cancel(&cfs_b->slack_timer); | 
 | 2053 | } | 
 | 2054 |  | 
 | 2055 | void unthrottle_offline_cfs_rqs(struct rq *rq) | 
 | 2056 | { | 
 | 2057 | 	struct cfs_rq *cfs_rq; | 
 | 2058 |  | 
 | 2059 | 	for_each_leaf_cfs_rq(rq, cfs_rq) { | 
 | 2060 | 		struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); | 
 | 2061 |  | 
 | 2062 | 		if (!cfs_rq->runtime_enabled) | 
 | 2063 | 			continue; | 
 | 2064 |  | 
 | 2065 | 		/* | 
 | 2066 | 		 * clock_task is not advancing so we just need to make sure | 
 | 2067 | 		 * there's some valid quota amount | 
 | 2068 | 		 */ | 
 | 2069 | 		cfs_rq->runtime_remaining = cfs_b->quota; | 
 | 2070 | 		if (cfs_rq_throttled(cfs_rq)) | 
 | 2071 | 			unthrottle_cfs_rq(cfs_rq); | 
 | 2072 | 	} | 
 | 2073 | } | 
 | 2074 |  | 
 | 2075 | #else /* CONFIG_CFS_BANDWIDTH */ | 
| Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 2076 | static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | 
 | 2077 | 				     unsigned long delta_exec) {} | 
| Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 2078 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} | 
 | 2079 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} | 
| Paul Turner | d8b4986 | 2011-07-21 09:43:41 -0700 | [diff] [blame] | 2080 | static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} | 
| Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 2081 |  | 
 | 2082 | static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) | 
 | 2083 | { | 
 | 2084 | 	return 0; | 
 | 2085 | } | 
| Paul Turner | 64660c8 | 2011-07-21 09:43:36 -0700 | [diff] [blame] | 2086 |  | 
 | 2087 | static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) | 
 | 2088 | { | 
 | 2089 | 	return 0; | 
 | 2090 | } | 
 | 2091 |  | 
 | 2092 | static inline int throttled_lb_pair(struct task_group *tg, | 
 | 2093 | 				    int src_cpu, int dest_cpu) | 
 | 2094 | { | 
 | 2095 | 	return 0; | 
 | 2096 | } | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 2097 |  | 
 | 2098 | void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} | 
 | 2099 |  | 
 | 2100 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
 | 2101 | static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} | 
| Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 2102 | #endif | 
 | 2103 |  | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 2104 | static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) | 
 | 2105 | { | 
 | 2106 | 	return NULL; | 
 | 2107 | } | 
 | 2108 | static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} | 
 | 2109 | void unthrottle_offline_cfs_rqs(struct rq *rq) {} | 
 | 2110 |  | 
 | 2111 | #endif /* CONFIG_CFS_BANDWIDTH */ | 
 | 2112 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2113 | /************************************************** | 
 | 2114 |  * CFS operations on tasks: | 
 | 2115 |  */ | 
 | 2116 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 2117 | #ifdef CONFIG_SCHED_HRTICK | 
 | 2118 | static void hrtick_start_fair(struct rq *rq, struct task_struct *p) | 
 | 2119 | { | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 2120 | 	struct sched_entity *se = &p->se; | 
 | 2121 | 	struct cfs_rq *cfs_rq = cfs_rq_of(se); | 
 | 2122 |  | 
 | 2123 | 	WARN_ON(task_rq(p) != rq); | 
 | 2124 |  | 
| Mike Galbraith | b39e66e | 2011-11-22 15:20:07 +0100 | [diff] [blame] | 2125 | 	if (cfs_rq->nr_running > 1) { | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 2126 | 		u64 slice = sched_slice(cfs_rq, se); | 
 | 2127 | 		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; | 
 | 2128 | 		s64 delta = slice - ran; | 
 | 2129 |  | 
 | 2130 | 		if (delta < 0) { | 
 | 2131 | 			if (rq->curr == p) | 
 | 2132 | 				resched_task(p); | 
 | 2133 | 			return; | 
 | 2134 | 		} | 
 | 2135 |  | 
 | 2136 | 		/* | 
 | 2137 | 		 * Don't schedule slices shorter than 10000ns, that just | 
 | 2138 | 		 * doesn't make sense. Rely on vruntime for fairness. | 
 | 2139 | 		 */ | 
| Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 2140 | 		if (rq->curr != p) | 
| Peter Zijlstra | 157124c | 2008-07-28 11:53:11 +0200 | [diff] [blame] | 2141 | 			delta = max_t(s64, 10000LL, delta); | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 2142 |  | 
| Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 2143 | 		hrtick_start(rq, delta); | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 2144 | 	} | 
 | 2145 | } | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 2146 |  | 
 | 2147 | /* | 
 | 2148 |  * called from enqueue/dequeue and updates the hrtick when the | 
 | 2149 |  * current task is from our class and nr_running is low enough | 
 | 2150 |  * to matter. | 
 | 2151 |  */ | 
 | 2152 | static void hrtick_update(struct rq *rq) | 
 | 2153 | { | 
 | 2154 | 	struct task_struct *curr = rq->curr; | 
 | 2155 |  | 
| Mike Galbraith | b39e66e | 2011-11-22 15:20:07 +0100 | [diff] [blame] | 2156 | 	if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class) | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 2157 | 		return; | 
 | 2158 |  | 
 | 2159 | 	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) | 
 | 2160 | 		hrtick_start_fair(rq, curr); | 
 | 2161 | } | 
| Dhaval Giani | 55e12e5 | 2008-06-24 23:39:43 +0530 | [diff] [blame] | 2162 | #else /* !CONFIG_SCHED_HRTICK */ | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 2163 | static inline void | 
 | 2164 | hrtick_start_fair(struct rq *rq, struct task_struct *p) | 
 | 2165 | { | 
 | 2166 | } | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 2167 |  | 
 | 2168 | static inline void hrtick_update(struct rq *rq) | 
 | 2169 | { | 
 | 2170 | } | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 2171 | #endif | 
 | 2172 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2173 | /* | 
 | 2174 |  * The enqueue_task method is called before nr_running is | 
 | 2175 |  * increased. Here we update the fair scheduling stats and | 
 | 2176 |  * then put the task into the rbtree: | 
 | 2177 |  */ | 
| Thomas Gleixner | ea87bb7 | 2010-01-20 20:58:57 +0000 | [diff] [blame] | 2178 | static void | 
| Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 2179 | enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2180 | { | 
 | 2181 | 	struct cfs_rq *cfs_rq; | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 2182 | 	struct sched_entity *se = &p->se; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2183 |  | 
 | 2184 | 	for_each_sched_entity(se) { | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 2185 | 		if (se->on_rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2186 | 			break; | 
 | 2187 | 		cfs_rq = cfs_rq_of(se); | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 2188 | 		enqueue_entity(cfs_rq, se, flags); | 
| Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 2189 |  | 
 | 2190 | 		/* | 
 | 2191 | 		 * end evaluation on encountering a throttled cfs_rq | 
 | 2192 | 		 * | 
 | 2193 | 		 * note: in the case of encountering a throttled cfs_rq we will | 
 | 2194 | 		 * post the final h_nr_running increment below. | 
 | 2195 | 		*/ | 
 | 2196 | 		if (cfs_rq_throttled(cfs_rq)) | 
 | 2197 | 			break; | 
| Paul Turner | 953bfcd | 2011-07-21 09:43:27 -0700 | [diff] [blame] | 2198 | 		cfs_rq->h_nr_running++; | 
| Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 2199 |  | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 2200 | 		flags = ENQUEUE_WAKEUP; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2201 | 	} | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 2202 |  | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 2203 | 	for_each_sched_entity(se) { | 
| Lin Ming | 0f31714 | 2011-07-22 09:14:31 +0800 | [diff] [blame] | 2204 | 		cfs_rq = cfs_rq_of(se); | 
| Paul Turner | 953bfcd | 2011-07-21 09:43:27 -0700 | [diff] [blame] | 2205 | 		cfs_rq->h_nr_running++; | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 2206 |  | 
| Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 2207 | 		if (cfs_rq_throttled(cfs_rq)) | 
 | 2208 | 			break; | 
 | 2209 |  | 
| Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 2210 | 		update_cfs_load(cfs_rq, 0); | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 2211 | 		update_cfs_shares(cfs_rq); | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 2212 | 	} | 
 | 2213 |  | 
| Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 2214 | 	if (!se) | 
 | 2215 | 		inc_nr_running(rq); | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 2216 | 	hrtick_update(rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2217 | } | 
 | 2218 |  | 
| Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 2219 | static void set_next_buddy(struct sched_entity *se); | 
 | 2220 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2221 | /* | 
 | 2222 |  * The dequeue_task method is called before nr_running is | 
 | 2223 |  * decreased. We remove the task from the rbtree and | 
 | 2224 |  * update the fair scheduling stats: | 
 | 2225 |  */ | 
| Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 2226 | static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2227 | { | 
 | 2228 | 	struct cfs_rq *cfs_rq; | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 2229 | 	struct sched_entity *se = &p->se; | 
| Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 2230 | 	int task_sleep = flags & DEQUEUE_SLEEP; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2231 |  | 
 | 2232 | 	for_each_sched_entity(se) { | 
 | 2233 | 		cfs_rq = cfs_rq_of(se); | 
| Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 2234 | 		dequeue_entity(cfs_rq, se, flags); | 
| Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 2235 |  | 
 | 2236 | 		/* | 
 | 2237 | 		 * end evaluation on encountering a throttled cfs_rq | 
 | 2238 | 		 * | 
 | 2239 | 		 * note: in the case of encountering a throttled cfs_rq we will | 
 | 2240 | 		 * post the final h_nr_running decrement below. | 
 | 2241 | 		*/ | 
 | 2242 | 		if (cfs_rq_throttled(cfs_rq)) | 
 | 2243 | 			break; | 
| Paul Turner | 953bfcd | 2011-07-21 09:43:27 -0700 | [diff] [blame] | 2244 | 		cfs_rq->h_nr_running--; | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 2245 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2246 | 		/* Don't dequeue parent if it has other entities besides us */ | 
| Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 2247 | 		if (cfs_rq->load.weight) { | 
 | 2248 | 			/* | 
 | 2249 | 			 * Bias pick_next to pick a task from this cfs_rq, as | 
 | 2250 | 			 * p is sleeping when it is within its sched_slice. | 
 | 2251 | 			 */ | 
 | 2252 | 			if (task_sleep && parent_entity(se)) | 
 | 2253 | 				set_next_buddy(parent_entity(se)); | 
| Paul Turner | 9598c82 | 2011-07-06 22:30:37 -0700 | [diff] [blame] | 2254 |  | 
 | 2255 | 			/* avoid re-evaluating load for this entity */ | 
 | 2256 | 			se = parent_entity(se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2257 | 			break; | 
| Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 2258 | 		} | 
| Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 2259 | 		flags |= DEQUEUE_SLEEP; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2260 | 	} | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 2261 |  | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 2262 | 	for_each_sched_entity(se) { | 
| Lin Ming | 0f31714 | 2011-07-22 09:14:31 +0800 | [diff] [blame] | 2263 | 		cfs_rq = cfs_rq_of(se); | 
| Paul Turner | 953bfcd | 2011-07-21 09:43:27 -0700 | [diff] [blame] | 2264 | 		cfs_rq->h_nr_running--; | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 2265 |  | 
| Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 2266 | 		if (cfs_rq_throttled(cfs_rq)) | 
 | 2267 | 			break; | 
 | 2268 |  | 
| Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 2269 | 		update_cfs_load(cfs_rq, 0); | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 2270 | 		update_cfs_shares(cfs_rq); | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 2271 | 	} | 
 | 2272 |  | 
| Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 2273 | 	if (!se) | 
 | 2274 | 		dec_nr_running(rq); | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 2275 | 	hrtick_update(rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2276 | } | 
 | 2277 |  | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2278 | #ifdef CONFIG_SMP | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 2279 | /* Used instead of source_load when we know the type == 0 */ | 
 | 2280 | static unsigned long weighted_cpuload(const int cpu) | 
 | 2281 | { | 
 | 2282 | 	return cpu_rq(cpu)->load.weight; | 
 | 2283 | } | 
 | 2284 |  | 
 | 2285 | /* | 
 | 2286 |  * Return a low guess at the load of a migration-source cpu weighted | 
 | 2287 |  * according to the scheduling class and "nice" value. | 
 | 2288 |  * | 
 | 2289 |  * We want to under-estimate the load of migration sources, to | 
 | 2290 |  * balance conservatively. | 
 | 2291 |  */ | 
 | 2292 | static unsigned long source_load(int cpu, int type) | 
 | 2293 | { | 
 | 2294 | 	struct rq *rq = cpu_rq(cpu); | 
 | 2295 | 	unsigned long total = weighted_cpuload(cpu); | 
 | 2296 |  | 
 | 2297 | 	if (type == 0 || !sched_feat(LB_BIAS)) | 
 | 2298 | 		return total; | 
 | 2299 |  | 
 | 2300 | 	return min(rq->cpu_load[type-1], total); | 
 | 2301 | } | 
 | 2302 |  | 
 | 2303 | /* | 
 | 2304 |  * Return a high guess at the load of a migration-target cpu weighted | 
 | 2305 |  * according to the scheduling class and "nice" value. | 
 | 2306 |  */ | 
 | 2307 | static unsigned long target_load(int cpu, int type) | 
 | 2308 | { | 
 | 2309 | 	struct rq *rq = cpu_rq(cpu); | 
 | 2310 | 	unsigned long total = weighted_cpuload(cpu); | 
 | 2311 |  | 
 | 2312 | 	if (type == 0 || !sched_feat(LB_BIAS)) | 
 | 2313 | 		return total; | 
 | 2314 |  | 
 | 2315 | 	return max(rq->cpu_load[type-1], total); | 
 | 2316 | } | 
 | 2317 |  | 
 | 2318 | static unsigned long power_of(int cpu) | 
 | 2319 | { | 
 | 2320 | 	return cpu_rq(cpu)->cpu_power; | 
 | 2321 | } | 
 | 2322 |  | 
 | 2323 | static unsigned long cpu_avg_load_per_task(int cpu) | 
 | 2324 | { | 
 | 2325 | 	struct rq *rq = cpu_rq(cpu); | 
 | 2326 | 	unsigned long nr_running = ACCESS_ONCE(rq->nr_running); | 
 | 2327 |  | 
 | 2328 | 	if (nr_running) | 
 | 2329 | 		return rq->load.weight / nr_running; | 
 | 2330 |  | 
 | 2331 | 	return 0; | 
 | 2332 | } | 
 | 2333 |  | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 2334 |  | 
| Peter Zijlstra | 74f8e4b | 2011-04-05 17:23:47 +0200 | [diff] [blame] | 2335 | static void task_waking_fair(struct task_struct *p) | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 2336 | { | 
 | 2337 | 	struct sched_entity *se = &p->se; | 
 | 2338 | 	struct cfs_rq *cfs_rq = cfs_rq_of(se); | 
| Peter Zijlstra | 3fe1698 | 2011-04-05 17:23:48 +0200 | [diff] [blame] | 2339 | 	u64 min_vruntime; | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 2340 |  | 
| Peter Zijlstra | 3fe1698 | 2011-04-05 17:23:48 +0200 | [diff] [blame] | 2341 | #ifndef CONFIG_64BIT | 
 | 2342 | 	u64 min_vruntime_copy; | 
| Peter Zijlstra | 74f8e4b | 2011-04-05 17:23:47 +0200 | [diff] [blame] | 2343 |  | 
| Peter Zijlstra | 3fe1698 | 2011-04-05 17:23:48 +0200 | [diff] [blame] | 2344 | 	do { | 
 | 2345 | 		min_vruntime_copy = cfs_rq->min_vruntime_copy; | 
 | 2346 | 		smp_rmb(); | 
 | 2347 | 		min_vruntime = cfs_rq->min_vruntime; | 
 | 2348 | 	} while (min_vruntime != min_vruntime_copy); | 
 | 2349 | #else | 
 | 2350 | 	min_vruntime = cfs_rq->min_vruntime; | 
 | 2351 | #endif | 
 | 2352 |  | 
 | 2353 | 	se->vruntime -= min_vruntime; | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 2354 | } | 
 | 2355 |  | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 2356 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| Peter Zijlstra | f5bfb7d | 2008-06-27 13:41:39 +0200 | [diff] [blame] | 2357 | /* | 
 | 2358 |  * effective_load() calculates the load change as seen from the root_task_group | 
 | 2359 |  * | 
 | 2360 |  * Adding load to a group doesn't make a group heavier, but can cause movement | 
 | 2361 |  * of group shares between cpus. Assuming the shares were perfectly aligned one | 
 | 2362 |  * can calculate the shift in shares. | 
| Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 2363 |  * | 
 | 2364 |  * Calculate the effective load difference if @wl is added (subtracted) to @tg | 
 | 2365 |  * on this @cpu and results in a total addition (subtraction) of @wg to the | 
 | 2366 |  * total group weight. | 
 | 2367 |  * | 
 | 2368 |  * Given a runqueue weight distribution (rw_i) we can compute a shares | 
 | 2369 |  * distribution (s_i) using: | 
 | 2370 |  * | 
 | 2371 |  *   s_i = rw_i / \Sum rw_j						(1) | 
 | 2372 |  * | 
 | 2373 |  * Suppose we have 4 CPUs and our @tg is a direct child of the root group and | 
 | 2374 |  * has 7 equal weight tasks, distributed as below (rw_i), with the resulting | 
 | 2375 |  * shares distribution (s_i): | 
 | 2376 |  * | 
 | 2377 |  *   rw_i = {   2,   4,   1,   0 } | 
 | 2378 |  *   s_i  = { 2/7, 4/7, 1/7,   0 } | 
 | 2379 |  * | 
 | 2380 |  * As per wake_affine() we're interested in the load of two CPUs (the CPU the | 
 | 2381 |  * task used to run on and the CPU the waker is running on), we need to | 
 | 2382 |  * compute the effect of waking a task on either CPU and, in case of a sync | 
 | 2383 |  * wakeup, compute the effect of the current task going to sleep. | 
 | 2384 |  * | 
 | 2385 |  * So for a change of @wl to the local @cpu with an overall group weight change | 
 | 2386 |  * of @wl we can compute the new shares distribution (s'_i) using: | 
 | 2387 |  * | 
 | 2388 |  *   s'_i = (rw_i + @wl) / (@wg + \Sum rw_j)				(2) | 
 | 2389 |  * | 
 | 2390 |  * Suppose we're interested in CPUs 0 and 1, and want to compute the load | 
 | 2391 |  * differences in waking a task to CPU 0. The additional task changes the | 
 | 2392 |  * weight and shares distributions like: | 
 | 2393 |  * | 
 | 2394 |  *   rw'_i = {   3,   4,   1,   0 } | 
 | 2395 |  *   s'_i  = { 3/8, 4/8, 1/8,   0 } | 
 | 2396 |  * | 
 | 2397 |  * We can then compute the difference in effective weight by using: | 
 | 2398 |  * | 
 | 2399 |  *   dw_i = S * (s'_i - s_i)						(3) | 
 | 2400 |  * | 
 | 2401 |  * Where 'S' is the group weight as seen by its parent. | 
 | 2402 |  * | 
 | 2403 |  * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7) | 
 | 2404 |  * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 - | 
 | 2405 |  * 4/7) times the weight of the group. | 
| Peter Zijlstra | f5bfb7d | 2008-06-27 13:41:39 +0200 | [diff] [blame] | 2406 |  */ | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 2407 | static long effective_load(struct task_group *tg, int cpu, long wl, long wg) | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 2408 | { | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 2409 | 	struct sched_entity *se = tg->se[cpu]; | 
| Peter Zijlstra | f1d239f | 2008-06-27 13:41:38 +0200 | [diff] [blame] | 2410 |  | 
| Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 2411 | 	if (!tg->parent)	/* the trivial, non-cgroup case */ | 
| Peter Zijlstra | f1d239f | 2008-06-27 13:41:38 +0200 | [diff] [blame] | 2412 | 		return wl; | 
 | 2413 |  | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 2414 | 	for_each_sched_entity(se) { | 
| Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 2415 | 		long w, W; | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 2416 |  | 
| Paul Turner | 977dda7 | 2011-01-14 17:57:50 -0800 | [diff] [blame] | 2417 | 		tg = se->my_q->tg; | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 2418 |  | 
| Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 2419 | 		/* | 
 | 2420 | 		 * W = @wg + \Sum rw_j | 
 | 2421 | 		 */ | 
 | 2422 | 		W = wg + calc_tg_weight(tg, se->my_q); | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 2423 |  | 
| Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 2424 | 		/* | 
 | 2425 | 		 * w = rw_i + @wl | 
 | 2426 | 		 */ | 
 | 2427 | 		w = se->my_q->load.weight + wl; | 
| Peter Zijlstra | 940959e | 2008-09-23 15:33:42 +0200 | [diff] [blame] | 2428 |  | 
| Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 2429 | 		/* | 
 | 2430 | 		 * wl = S * s'_i; see (2) | 
 | 2431 | 		 */ | 
 | 2432 | 		if (W > 0 && w < W) | 
 | 2433 | 			wl = (w * tg->shares) / W; | 
| Paul Turner | 977dda7 | 2011-01-14 17:57:50 -0800 | [diff] [blame] | 2434 | 		else | 
 | 2435 | 			wl = tg->shares; | 
| Peter Zijlstra | 940959e | 2008-09-23 15:33:42 +0200 | [diff] [blame] | 2436 |  | 
| Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 2437 | 		/* | 
 | 2438 | 		 * Per the above, wl is the new se->load.weight value; since | 
 | 2439 | 		 * those are clipped to [MIN_SHARES, ...) do so now. See | 
 | 2440 | 		 * calc_cfs_shares(). | 
 | 2441 | 		 */ | 
| Paul Turner | 977dda7 | 2011-01-14 17:57:50 -0800 | [diff] [blame] | 2442 | 		if (wl < MIN_SHARES) | 
 | 2443 | 			wl = MIN_SHARES; | 
| Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 2444 |  | 
 | 2445 | 		/* | 
 | 2446 | 		 * wl = dw_i = S * (s'_i - s_i); see (3) | 
 | 2447 | 		 */ | 
| Paul Turner | 977dda7 | 2011-01-14 17:57:50 -0800 | [diff] [blame] | 2448 | 		wl -= se->load.weight; | 
| Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 2449 |  | 
 | 2450 | 		/* | 
 | 2451 | 		 * Recursively apply this logic to all parent groups to compute | 
 | 2452 | 		 * the final effective load change on the root group. Since | 
 | 2453 | 		 * only the @tg group gets extra weight, all parent groups can | 
 | 2454 | 		 * only redistribute existing shares. @wl is the shift in shares | 
 | 2455 | 		 * resulting from this level per the above. | 
 | 2456 | 		 */ | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 2457 | 		wg = 0; | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 2458 | 	} | 
 | 2459 |  | 
 | 2460 | 	return wl; | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 2461 | } | 
 | 2462 | #else | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 2463 |  | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 2464 | static inline unsigned long effective_load(struct task_group *tg, int cpu, | 
 | 2465 | 		unsigned long wl, unsigned long wg) | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 2466 | { | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 2467 | 	return wl; | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 2468 | } | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 2469 |  | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 2470 | #endif | 
 | 2471 |  | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2472 | static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 2473 | { | 
| Paul Turner | e37b6a7 | 2011-01-21 20:44:59 -0800 | [diff] [blame] | 2474 | 	s64 this_load, load; | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2475 | 	int idx, this_cpu, prev_cpu; | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 2476 | 	unsigned long tl_per_task; | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2477 | 	struct task_group *tg; | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 2478 | 	unsigned long weight; | 
| Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 2479 | 	int balanced; | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 2480 |  | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2481 | 	idx	  = sd->wake_idx; | 
 | 2482 | 	this_cpu  = smp_processor_id(); | 
 | 2483 | 	prev_cpu  = task_cpu(p); | 
 | 2484 | 	load	  = source_load(prev_cpu, idx); | 
 | 2485 | 	this_load = target_load(this_cpu, idx); | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 2486 |  | 
 | 2487 | 	/* | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 2488 | 	 * If sync wakeup then subtract the (maximum possible) | 
 | 2489 | 	 * effect of the currently running task from the load | 
 | 2490 | 	 * of the current CPU: | 
 | 2491 | 	 */ | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 2492 | 	if (sync) { | 
 | 2493 | 		tg = task_group(current); | 
 | 2494 | 		weight = current->se.load.weight; | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 2495 |  | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2496 | 		this_load += effective_load(tg, this_cpu, -weight, -weight); | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 2497 | 		load += effective_load(tg, prev_cpu, 0, -weight); | 
 | 2498 | 	} | 
 | 2499 |  | 
 | 2500 | 	tg = task_group(p); | 
 | 2501 | 	weight = p->se.load.weight; | 
 | 2502 |  | 
| Peter Zijlstra | 71a29aa | 2009-09-07 18:28:05 +0200 | [diff] [blame] | 2503 | 	/* | 
 | 2504 | 	 * In low-load situations, where prev_cpu is idle and this_cpu is idle | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2505 | 	 * due to the sync cause above having dropped this_load to 0, we'll | 
 | 2506 | 	 * always have an imbalance, but there's really nothing you can do | 
 | 2507 | 	 * about that, so that's good too. | 
| Peter Zijlstra | 71a29aa | 2009-09-07 18:28:05 +0200 | [diff] [blame] | 2508 | 	 * | 
 | 2509 | 	 * Otherwise check if either cpus are near enough in load to allow this | 
 | 2510 | 	 * task to be woken on this_cpu. | 
 | 2511 | 	 */ | 
| Paul Turner | e37b6a7 | 2011-01-21 20:44:59 -0800 | [diff] [blame] | 2512 | 	if (this_load > 0) { | 
 | 2513 | 		s64 this_eff_load, prev_eff_load; | 
| Peter Zijlstra | e51fd5e | 2010-05-31 12:37:30 +0200 | [diff] [blame] | 2514 |  | 
 | 2515 | 		this_eff_load = 100; | 
 | 2516 | 		this_eff_load *= power_of(prev_cpu); | 
 | 2517 | 		this_eff_load *= this_load + | 
 | 2518 | 			effective_load(tg, this_cpu, weight, weight); | 
 | 2519 |  | 
 | 2520 | 		prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2; | 
 | 2521 | 		prev_eff_load *= power_of(this_cpu); | 
 | 2522 | 		prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight); | 
 | 2523 |  | 
 | 2524 | 		balanced = this_eff_load <= prev_eff_load; | 
 | 2525 | 	} else | 
 | 2526 | 		balanced = true; | 
| Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 2527 |  | 
 | 2528 | 	/* | 
 | 2529 | 	 * If the currently running task will sleep within | 
 | 2530 | 	 * a reasonable amount of time then attract this newly | 
 | 2531 | 	 * woken task: | 
 | 2532 | 	 */ | 
| Peter Zijlstra | 2fb7635 | 2008-10-08 09:16:04 +0200 | [diff] [blame] | 2533 | 	if (sync && balanced) | 
 | 2534 | 		return 1; | 
| Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 2535 |  | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 2536 | 	schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts); | 
| Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 2537 | 	tl_per_task = cpu_avg_load_per_task(this_cpu); | 
 | 2538 |  | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2539 | 	if (balanced || | 
 | 2540 | 	    (this_load <= load && | 
 | 2541 | 	     this_load + target_load(prev_cpu, idx) <= tl_per_task)) { | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 2542 | 		/* | 
 | 2543 | 		 * This domain has SD_WAKE_AFFINE and | 
 | 2544 | 		 * p is cache cold in this domain, and | 
 | 2545 | 		 * there is no bad imbalance. | 
 | 2546 | 		 */ | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2547 | 		schedstat_inc(sd, ttwu_move_affine); | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 2548 | 		schedstat_inc(p, se.statistics.nr_wakeups_affine); | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 2549 |  | 
 | 2550 | 		return 1; | 
 | 2551 | 	} | 
 | 2552 | 	return 0; | 
 | 2553 | } | 
 | 2554 |  | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2555 | /* | 
 | 2556 |  * find_idlest_group finds and returns the least busy CPU group within the | 
 | 2557 |  * domain. | 
 | 2558 |  */ | 
 | 2559 | static struct sched_group * | 
| Peter Zijlstra | 78e7ed5 | 2009-09-03 13:16:51 +0200 | [diff] [blame] | 2560 | find_idlest_group(struct sched_domain *sd, struct task_struct *p, | 
| Peter Zijlstra | 5158f4e | 2009-09-16 13:46:59 +0200 | [diff] [blame] | 2561 | 		  int this_cpu, int load_idx) | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2562 | { | 
| Andi Kleen | b3bd3de | 2010-08-10 14:17:51 -0700 | [diff] [blame] | 2563 | 	struct sched_group *idlest = NULL, *group = sd->groups; | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2564 | 	unsigned long min_load = ULONG_MAX, this_load = 0; | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2565 | 	int imbalance = 100 + (sd->imbalance_pct-100)/2; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2566 |  | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2567 | 	do { | 
 | 2568 | 		unsigned long load, avg_load; | 
 | 2569 | 		int local_group; | 
 | 2570 | 		int i; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2571 |  | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2572 | 		/* Skip over this group if it has no CPUs allowed */ | 
 | 2573 | 		if (!cpumask_intersects(sched_group_cpus(group), | 
| Peter Zijlstra | fa17b50 | 2011-06-16 12:23:22 +0200 | [diff] [blame] | 2574 | 					tsk_cpus_allowed(p))) | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2575 | 			continue; | 
 | 2576 |  | 
 | 2577 | 		local_group = cpumask_test_cpu(this_cpu, | 
 | 2578 | 					       sched_group_cpus(group)); | 
 | 2579 |  | 
 | 2580 | 		/* Tally up the load of all CPUs in the group */ | 
 | 2581 | 		avg_load = 0; | 
 | 2582 |  | 
 | 2583 | 		for_each_cpu(i, sched_group_cpus(group)) { | 
 | 2584 | 			/* Bias balancing toward cpus of our domain */ | 
 | 2585 | 			if (local_group) | 
 | 2586 | 				load = source_load(i, load_idx); | 
 | 2587 | 			else | 
 | 2588 | 				load = target_load(i, load_idx); | 
 | 2589 |  | 
 | 2590 | 			avg_load += load; | 
 | 2591 | 		} | 
 | 2592 |  | 
 | 2593 | 		/* Adjust by relative CPU power of the group */ | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 2594 | 		avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power; | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2595 |  | 
 | 2596 | 		if (local_group) { | 
 | 2597 | 			this_load = avg_load; | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2598 | 		} else if (avg_load < min_load) { | 
 | 2599 | 			min_load = avg_load; | 
 | 2600 | 			idlest = group; | 
 | 2601 | 		} | 
 | 2602 | 	} while (group = group->next, group != sd->groups); | 
 | 2603 |  | 
 | 2604 | 	if (!idlest || 100*this_load < imbalance*min_load) | 
 | 2605 | 		return NULL; | 
 | 2606 | 	return idlest; | 
 | 2607 | } | 
 | 2608 |  | 
 | 2609 | /* | 
 | 2610 |  * find_idlest_cpu - find the idlest cpu among the cpus in group. | 
 | 2611 |  */ | 
 | 2612 | static int | 
 | 2613 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) | 
 | 2614 | { | 
 | 2615 | 	unsigned long load, min_load = ULONG_MAX; | 
 | 2616 | 	int idlest = -1; | 
 | 2617 | 	int i; | 
 | 2618 |  | 
 | 2619 | 	/* Traverse only the allowed CPUs */ | 
| Peter Zijlstra | fa17b50 | 2011-06-16 12:23:22 +0200 | [diff] [blame] | 2620 | 	for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) { | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2621 | 		load = weighted_cpuload(i); | 
 | 2622 |  | 
 | 2623 | 		if (load < min_load || (load == min_load && i == this_cpu)) { | 
 | 2624 | 			min_load = load; | 
 | 2625 | 			idlest = i; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2626 | 		} | 
 | 2627 | 	} | 
 | 2628 |  | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2629 | 	return idlest; | 
 | 2630 | } | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2631 |  | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2632 | /* | 
| Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 2633 |  * Try and locate an idle CPU in the sched_domain. | 
 | 2634 |  */ | 
| Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 2635 | static int select_idle_sibling(struct task_struct *p, int target) | 
| Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 2636 | { | 
 | 2637 | 	int cpu = smp_processor_id(); | 
 | 2638 | 	int prev_cpu = task_cpu(p); | 
| Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 2639 | 	struct sched_domain *sd; | 
| Peter Zijlstra | 4dcfe102 | 2011-11-10 13:01:10 +0100 | [diff] [blame] | 2640 | 	struct sched_group *sg; | 
| Suresh Siddha | 77e8136 | 2011-11-17 11:08:23 -0800 | [diff] [blame] | 2641 | 	int i; | 
| Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 2642 |  | 
 | 2643 | 	/* | 
| Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 2644 | 	 * If the task is going to be woken-up on this cpu and if it is | 
 | 2645 | 	 * already idle, then it is the right target. | 
| Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 2646 | 	 */ | 
| Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 2647 | 	if (target == cpu && idle_cpu(cpu)) | 
 | 2648 | 		return cpu; | 
 | 2649 |  | 
 | 2650 | 	/* | 
 | 2651 | 	 * If the task is going to be woken-up on the cpu where it previously | 
 | 2652 | 	 * ran and if it is currently idle, then it the right target. | 
 | 2653 | 	 */ | 
 | 2654 | 	if (target == prev_cpu && idle_cpu(prev_cpu)) | 
| Peter Zijlstra | fe3bcfe | 2009-11-12 15:55:29 +0100 | [diff] [blame] | 2655 | 		return prev_cpu; | 
| Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 2656 |  | 
 | 2657 | 	/* | 
| Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 2658 | 	 * Otherwise, iterate the domains and find an elegible idle cpu. | 
| Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 2659 | 	 */ | 
| Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 2660 | 	sd = rcu_dereference(per_cpu(sd_llc, target)); | 
| Suresh Siddha | 77e8136 | 2011-11-17 11:08:23 -0800 | [diff] [blame] | 2661 | 	for_each_lower_domain(sd) { | 
| Peter Zijlstra | 4dcfe102 | 2011-11-10 13:01:10 +0100 | [diff] [blame] | 2662 | 		sg = sd->groups; | 
 | 2663 | 		do { | 
 | 2664 | 			if (!cpumask_intersects(sched_group_cpus(sg), | 
 | 2665 | 						tsk_cpus_allowed(p))) | 
 | 2666 | 				goto next; | 
 | 2667 |  | 
 | 2668 | 			for_each_cpu(i, sched_group_cpus(sg)) { | 
 | 2669 | 				if (!idle_cpu(i)) | 
 | 2670 | 					goto next; | 
 | 2671 | 			} | 
 | 2672 |  | 
 | 2673 | 			target = cpumask_first_and(sched_group_cpus(sg), | 
 | 2674 | 					tsk_cpus_allowed(p)); | 
 | 2675 | 			goto done; | 
 | 2676 | next: | 
 | 2677 | 			sg = sg->next; | 
 | 2678 | 		} while (sg != sd->groups); | 
| Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 2679 | 	} | 
| Peter Zijlstra | 4dcfe102 | 2011-11-10 13:01:10 +0100 | [diff] [blame] | 2680 | done: | 
| Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 2681 | 	return target; | 
 | 2682 | } | 
 | 2683 |  | 
 | 2684 | /* | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2685 |  * sched_balance_self: balance the current task (running on cpu) in domains | 
 | 2686 |  * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and | 
 | 2687 |  * SD_BALANCE_EXEC. | 
 | 2688 |  * | 
 | 2689 |  * Balance, ie. select the least loaded group. | 
 | 2690 |  * | 
 | 2691 |  * Returns the target CPU number, or the same CPU if no balancing is needed. | 
 | 2692 |  * | 
 | 2693 |  * preempt must be disabled. | 
 | 2694 |  */ | 
| Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2695 | static int | 
| Peter Zijlstra | 7608dec | 2011-04-05 17:23:46 +0200 | [diff] [blame] | 2696 | select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2697 | { | 
| Peter Zijlstra | 29cd8ba | 2009-09-17 09:01:14 +0200 | [diff] [blame] | 2698 | 	struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2699 | 	int cpu = smp_processor_id(); | 
 | 2700 | 	int prev_cpu = task_cpu(p); | 
 | 2701 | 	int new_cpu = cpu; | 
| Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 2702 | 	int want_affine = 0; | 
| Peter Zijlstra | 29cd8ba | 2009-09-17 09:01:14 +0200 | [diff] [blame] | 2703 | 	int want_sd = 1; | 
| Peter Zijlstra | 5158f4e | 2009-09-16 13:46:59 +0200 | [diff] [blame] | 2704 | 	int sync = wake_flags & WF_SYNC; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2705 |  | 
| Mike Galbraith | 76854c7 | 2011-11-22 15:18:24 +0100 | [diff] [blame] | 2706 | 	if (p->rt.nr_cpus_allowed == 1) | 
 | 2707 | 		return prev_cpu; | 
 | 2708 |  | 
| Peter Zijlstra | 0763a66 | 2009-09-14 19:37:39 +0200 | [diff] [blame] | 2709 | 	if (sd_flag & SD_BALANCE_WAKE) { | 
| Peter Zijlstra | fa17b50 | 2011-06-16 12:23:22 +0200 | [diff] [blame] | 2710 | 		if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2711 | 			want_affine = 1; | 
 | 2712 | 		new_cpu = prev_cpu; | 
 | 2713 | 	} | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2714 |  | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 2715 | 	rcu_read_lock(); | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2716 | 	for_each_domain(cpu, tmp) { | 
| Peter Zijlstra | e4f4288 | 2009-12-16 18:04:34 +0100 | [diff] [blame] | 2717 | 		if (!(tmp->flags & SD_LOAD_BALANCE)) | 
 | 2718 | 			continue; | 
 | 2719 |  | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2720 | 		/* | 
| Peter Zijlstra | ae154be | 2009-09-10 14:40:57 +0200 | [diff] [blame] | 2721 | 		 * If power savings logic is enabled for a domain, see if we | 
 | 2722 | 		 * are not overloaded, if so, don't balance wider. | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2723 | 		 */ | 
| Peter Zijlstra | 59abf02 | 2009-09-16 08:28:30 +0200 | [diff] [blame] | 2724 | 		if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) { | 
| Peter Zijlstra | ae154be | 2009-09-10 14:40:57 +0200 | [diff] [blame] | 2725 | 			unsigned long power = 0; | 
 | 2726 | 			unsigned long nr_running = 0; | 
 | 2727 | 			unsigned long capacity; | 
 | 2728 | 			int i; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2729 |  | 
| Peter Zijlstra | ae154be | 2009-09-10 14:40:57 +0200 | [diff] [blame] | 2730 | 			for_each_cpu(i, sched_domain_span(tmp)) { | 
 | 2731 | 				power += power_of(i); | 
 | 2732 | 				nr_running += cpu_rq(i)->cfs.nr_running; | 
 | 2733 | 			} | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2734 |  | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 2735 | 			capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE); | 
| Ingo Molnar | 4ae7d5c | 2008-03-19 01:42:00 +0100 | [diff] [blame] | 2736 |  | 
| Peter Zijlstra | 59abf02 | 2009-09-16 08:28:30 +0200 | [diff] [blame] | 2737 | 			if (tmp->flags & SD_POWERSAVINGS_BALANCE) | 
 | 2738 | 				nr_running /= 2; | 
 | 2739 |  | 
 | 2740 | 			if (nr_running < capacity) | 
| Peter Zijlstra | 29cd8ba | 2009-09-17 09:01:14 +0200 | [diff] [blame] | 2741 | 				want_sd = 0; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2742 | 		} | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2743 |  | 
| Peter Zijlstra | fe3bcfe | 2009-11-12 15:55:29 +0100 | [diff] [blame] | 2744 | 		/* | 
| Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 2745 | 		 * If both cpu and prev_cpu are part of this domain, | 
 | 2746 | 		 * cpu is a valid SD_WAKE_AFFINE target. | 
| Peter Zijlstra | fe3bcfe | 2009-11-12 15:55:29 +0100 | [diff] [blame] | 2747 | 		 */ | 
| Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 2748 | 		if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && | 
 | 2749 | 		    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { | 
 | 2750 | 			affine_sd = tmp; | 
 | 2751 | 			want_affine = 0; | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2752 | 		} | 
 | 2753 |  | 
| Peter Zijlstra | 29cd8ba | 2009-09-17 09:01:14 +0200 | [diff] [blame] | 2754 | 		if (!want_sd && !want_affine) | 
 | 2755 | 			break; | 
 | 2756 |  | 
| Peter Zijlstra | 0763a66 | 2009-09-14 19:37:39 +0200 | [diff] [blame] | 2757 | 		if (!(tmp->flags & sd_flag)) | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2758 | 			continue; | 
 | 2759 |  | 
| Peter Zijlstra | 29cd8ba | 2009-09-17 09:01:14 +0200 | [diff] [blame] | 2760 | 		if (want_sd) | 
 | 2761 | 			sd = tmp; | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2762 | 	} | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2763 |  | 
| Mike Galbraith | 8b911ac | 2010-03-11 17:17:16 +0100 | [diff] [blame] | 2764 | 	if (affine_sd) { | 
| Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 2765 | 		if (cpu == prev_cpu || wake_affine(affine_sd, p, sync)) | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 2766 | 			prev_cpu = cpu; | 
 | 2767 |  | 
 | 2768 | 		new_cpu = select_idle_sibling(p, prev_cpu); | 
 | 2769 | 		goto unlock; | 
| Mike Galbraith | 8b911ac | 2010-03-11 17:17:16 +0100 | [diff] [blame] | 2770 | 	} | 
| Peter Zijlstra | 3b64089 | 2009-09-16 13:44:33 +0200 | [diff] [blame] | 2771 |  | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2772 | 	while (sd) { | 
| Peter Zijlstra | 5158f4e | 2009-09-16 13:46:59 +0200 | [diff] [blame] | 2773 | 		int load_idx = sd->forkexec_idx; | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2774 | 		struct sched_group *group; | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2775 | 		int weight; | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2776 |  | 
| Peter Zijlstra | 0763a66 | 2009-09-14 19:37:39 +0200 | [diff] [blame] | 2777 | 		if (!(sd->flags & sd_flag)) { | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2778 | 			sd = sd->child; | 
 | 2779 | 			continue; | 
 | 2780 | 		} | 
 | 2781 |  | 
| Peter Zijlstra | 5158f4e | 2009-09-16 13:46:59 +0200 | [diff] [blame] | 2782 | 		if (sd_flag & SD_BALANCE_WAKE) | 
 | 2783 | 			load_idx = sd->wake_idx; | 
 | 2784 |  | 
 | 2785 | 		group = find_idlest_group(sd, p, cpu, load_idx); | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2786 | 		if (!group) { | 
 | 2787 | 			sd = sd->child; | 
 | 2788 | 			continue; | 
 | 2789 | 		} | 
 | 2790 |  | 
| Peter Zijlstra | d7c33c4 | 2009-09-11 12:45:38 +0200 | [diff] [blame] | 2791 | 		new_cpu = find_idlest_cpu(group, p, cpu); | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2792 | 		if (new_cpu == -1 || new_cpu == cpu) { | 
 | 2793 | 			/* Now try balancing at a lower domain level of cpu */ | 
 | 2794 | 			sd = sd->child; | 
 | 2795 | 			continue; | 
 | 2796 | 		} | 
 | 2797 |  | 
 | 2798 | 		/* Now try balancing at a lower domain level of new_cpu */ | 
 | 2799 | 		cpu = new_cpu; | 
| Peter Zijlstra | 669c55e | 2010-04-16 14:59:29 +0200 | [diff] [blame] | 2800 | 		weight = sd->span_weight; | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2801 | 		sd = NULL; | 
 | 2802 | 		for_each_domain(cpu, tmp) { | 
| Peter Zijlstra | 669c55e | 2010-04-16 14:59:29 +0200 | [diff] [blame] | 2803 | 			if (weight <= tmp->span_weight) | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2804 | 				break; | 
| Peter Zijlstra | 0763a66 | 2009-09-14 19:37:39 +0200 | [diff] [blame] | 2805 | 			if (tmp->flags & sd_flag) | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2806 | 				sd = tmp; | 
 | 2807 | 		} | 
 | 2808 | 		/* while loop will break here if sd == NULL */ | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2809 | 	} | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 2810 | unlock: | 
 | 2811 | 	rcu_read_unlock(); | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2812 |  | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2813 | 	return new_cpu; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2814 | } | 
 | 2815 | #endif /* CONFIG_SMP */ | 
 | 2816 |  | 
| Peter Zijlstra | e52fb7c | 2009-01-14 12:39:19 +0100 | [diff] [blame] | 2817 | static unsigned long | 
 | 2818 | wakeup_gran(struct sched_entity *curr, struct sched_entity *se) | 
| Peter Zijlstra | 0bbd333 | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 2819 | { | 
 | 2820 | 	unsigned long gran = sysctl_sched_wakeup_granularity; | 
 | 2821 |  | 
 | 2822 | 	/* | 
| Peter Zijlstra | e52fb7c | 2009-01-14 12:39:19 +0100 | [diff] [blame] | 2823 | 	 * Since its curr running now, convert the gran from real-time | 
 | 2824 | 	 * to virtual-time in his units. | 
| Mike Galbraith | 13814d4 | 2010-03-11 17:17:04 +0100 | [diff] [blame] | 2825 | 	 * | 
 | 2826 | 	 * By using 'se' instead of 'curr' we penalize light tasks, so | 
 | 2827 | 	 * they get preempted easier. That is, if 'se' < 'curr' then | 
 | 2828 | 	 * the resulting gran will be larger, therefore penalizing the | 
 | 2829 | 	 * lighter, if otoh 'se' > 'curr' then the resulting gran will | 
 | 2830 | 	 * be smaller, again penalizing the lighter task. | 
 | 2831 | 	 * | 
 | 2832 | 	 * This is especially important for buddies when the leftmost | 
 | 2833 | 	 * task is higher priority than the buddy. | 
| Peter Zijlstra | 0bbd333 | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 2834 | 	 */ | 
| Shaohua Li | f4ad9bd | 2011-04-08 12:53:09 +0800 | [diff] [blame] | 2835 | 	return calc_delta_fair(gran, se); | 
| Peter Zijlstra | 0bbd333 | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 2836 | } | 
 | 2837 |  | 
 | 2838 | /* | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 2839 |  * Should 'se' preempt 'curr'. | 
 | 2840 |  * | 
 | 2841 |  *             |s1 | 
 | 2842 |  *        |s2 | 
 | 2843 |  *   |s3 | 
 | 2844 |  *         g | 
 | 2845 |  *      |<--->|c | 
 | 2846 |  * | 
 | 2847 |  *  w(c, s1) = -1 | 
 | 2848 |  *  w(c, s2) =  0 | 
 | 2849 |  *  w(c, s3) =  1 | 
 | 2850 |  * | 
 | 2851 |  */ | 
 | 2852 | static int | 
 | 2853 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) | 
 | 2854 | { | 
 | 2855 | 	s64 gran, vdiff = curr->vruntime - se->vruntime; | 
 | 2856 |  | 
 | 2857 | 	if (vdiff <= 0) | 
 | 2858 | 		return -1; | 
 | 2859 |  | 
| Peter Zijlstra | e52fb7c | 2009-01-14 12:39:19 +0100 | [diff] [blame] | 2860 | 	gran = wakeup_gran(curr, se); | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 2861 | 	if (vdiff > gran) | 
 | 2862 | 		return 1; | 
 | 2863 |  | 
 | 2864 | 	return 0; | 
 | 2865 | } | 
 | 2866 |  | 
| Peter Zijlstra | 0247909 | 2008-11-04 21:25:10 +0100 | [diff] [blame] | 2867 | static void set_last_buddy(struct sched_entity *se) | 
 | 2868 | { | 
| Venkatesh Pallipadi | 69c80f3 | 2011-04-13 18:21:09 -0700 | [diff] [blame] | 2869 | 	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) | 
 | 2870 | 		return; | 
 | 2871 |  | 
 | 2872 | 	for_each_sched_entity(se) | 
 | 2873 | 		cfs_rq_of(se)->last = se; | 
| Peter Zijlstra | 0247909 | 2008-11-04 21:25:10 +0100 | [diff] [blame] | 2874 | } | 
 | 2875 |  | 
 | 2876 | static void set_next_buddy(struct sched_entity *se) | 
 | 2877 | { | 
| Venkatesh Pallipadi | 69c80f3 | 2011-04-13 18:21:09 -0700 | [diff] [blame] | 2878 | 	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) | 
 | 2879 | 		return; | 
 | 2880 |  | 
 | 2881 | 	for_each_sched_entity(se) | 
 | 2882 | 		cfs_rq_of(se)->next = se; | 
| Peter Zijlstra | 0247909 | 2008-11-04 21:25:10 +0100 | [diff] [blame] | 2883 | } | 
 | 2884 |  | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 2885 | static void set_skip_buddy(struct sched_entity *se) | 
 | 2886 | { | 
| Venkatesh Pallipadi | 69c80f3 | 2011-04-13 18:21:09 -0700 | [diff] [blame] | 2887 | 	for_each_sched_entity(se) | 
 | 2888 | 		cfs_rq_of(se)->skip = se; | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 2889 | } | 
 | 2890 |  | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 2891 | /* | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2892 |  * Preempt the current task with a newly woken task if needed: | 
 | 2893 |  */ | 
| Peter Zijlstra | 5a9b86f | 2009-09-16 13:47:58 +0200 | [diff] [blame] | 2894 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2895 | { | 
 | 2896 | 	struct task_struct *curr = rq->curr; | 
| Srivatsa Vaddagiri | 8651a86 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 2897 | 	struct sched_entity *se = &curr->se, *pse = &p->se; | 
| Mike Galbraith | 03e89e4 | 2008-12-16 08:45:30 +0100 | [diff] [blame] | 2898 | 	struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 
| Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 2899 | 	int scale = cfs_rq->nr_running >= sched_nr_latency; | 
| Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 2900 | 	int next_buddy_marked = 0; | 
| Mike Galbraith | 03e89e4 | 2008-12-16 08:45:30 +0100 | [diff] [blame] | 2901 |  | 
| Ingo Molnar | 4ae7d5c | 2008-03-19 01:42:00 +0100 | [diff] [blame] | 2902 | 	if (unlikely(se == pse)) | 
 | 2903 | 		return; | 
 | 2904 |  | 
| Paul Turner | 5238cdd | 2011-07-21 09:43:37 -0700 | [diff] [blame] | 2905 | 	/* | 
| Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 2906 | 	 * This is possible from callers such as move_task(), in which we | 
| Paul Turner | 5238cdd | 2011-07-21 09:43:37 -0700 | [diff] [blame] | 2907 | 	 * unconditionally check_prempt_curr() after an enqueue (which may have | 
 | 2908 | 	 * lead to a throttle).  This both saves work and prevents false | 
 | 2909 | 	 * next-buddy nomination below. | 
 | 2910 | 	 */ | 
 | 2911 | 	if (unlikely(throttled_hierarchy(cfs_rq_of(pse)))) | 
 | 2912 | 		return; | 
 | 2913 |  | 
| Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 2914 | 	if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) { | 
| Mike Galbraith | 3cb63d5 | 2009-09-11 12:01:17 +0200 | [diff] [blame] | 2915 | 		set_next_buddy(pse); | 
| Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 2916 | 		next_buddy_marked = 1; | 
 | 2917 | 	} | 
| Peter Zijlstra | 57fdc26 | 2008-09-23 15:33:45 +0200 | [diff] [blame] | 2918 |  | 
| Bharata B Rao | aec0a51 | 2008-08-28 14:42:49 +0530 | [diff] [blame] | 2919 | 	/* | 
 | 2920 | 	 * We can come here with TIF_NEED_RESCHED already set from new task | 
 | 2921 | 	 * wake up path. | 
| Paul Turner | 5238cdd | 2011-07-21 09:43:37 -0700 | [diff] [blame] | 2922 | 	 * | 
 | 2923 | 	 * Note: this also catches the edge-case of curr being in a throttled | 
 | 2924 | 	 * group (e.g. via set_curr_task), since update_curr() (in the | 
 | 2925 | 	 * enqueue of curr) will have resulted in resched being set.  This | 
 | 2926 | 	 * prevents us from potentially nominating it as a false LAST_BUDDY | 
 | 2927 | 	 * below. | 
| Bharata B Rao | aec0a51 | 2008-08-28 14:42:49 +0530 | [diff] [blame] | 2928 | 	 */ | 
 | 2929 | 	if (test_tsk_need_resched(curr)) | 
 | 2930 | 		return; | 
 | 2931 |  | 
| Darren Hart | a2f5c9a | 2011-02-22 13:04:33 -0800 | [diff] [blame] | 2932 | 	/* Idle tasks are by definition preempted by non-idle tasks. */ | 
 | 2933 | 	if (unlikely(curr->policy == SCHED_IDLE) && | 
 | 2934 | 	    likely(p->policy != SCHED_IDLE)) | 
 | 2935 | 		goto preempt; | 
 | 2936 |  | 
| Ingo Molnar | 91c234b | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 2937 | 	/* | 
| Darren Hart | a2f5c9a | 2011-02-22 13:04:33 -0800 | [diff] [blame] | 2938 | 	 * Batch and idle tasks do not preempt non-idle tasks (their preemption | 
 | 2939 | 	 * is driven by the tick): | 
| Ingo Molnar | 91c234b | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 2940 | 	 */ | 
| Peter Zijlstra | 6bc912b | 2009-01-15 14:53:38 +0100 | [diff] [blame] | 2941 | 	if (unlikely(p->policy != SCHED_NORMAL)) | 
| Ingo Molnar | 91c234b | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 2942 | 		return; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2943 |  | 
| Peter Zijlstra | 3a7e73a | 2009-11-28 18:51:02 +0100 | [diff] [blame] | 2944 | 	find_matching_se(&se, &pse); | 
| Paul Turner | 9bbd737 | 2011-07-05 19:07:21 -0700 | [diff] [blame] | 2945 | 	update_curr(cfs_rq_of(se)); | 
| Peter Zijlstra | 3a7e73a | 2009-11-28 18:51:02 +0100 | [diff] [blame] | 2946 | 	BUG_ON(!pse); | 
| Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 2947 | 	if (wakeup_preempt_entity(se, pse) == 1) { | 
 | 2948 | 		/* | 
 | 2949 | 		 * Bias pick_next to pick the sched entity that is | 
 | 2950 | 		 * triggering this preemption. | 
 | 2951 | 		 */ | 
 | 2952 | 		if (!next_buddy_marked) | 
 | 2953 | 			set_next_buddy(pse); | 
| Peter Zijlstra | 3a7e73a | 2009-11-28 18:51:02 +0100 | [diff] [blame] | 2954 | 		goto preempt; | 
| Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 2955 | 	} | 
| Jupyung Lee | a65ac74 | 2009-11-17 18:51:40 +0900 | [diff] [blame] | 2956 |  | 
| Peter Zijlstra | 3a7e73a | 2009-11-28 18:51:02 +0100 | [diff] [blame] | 2957 | 	return; | 
 | 2958 |  | 
 | 2959 | preempt: | 
 | 2960 | 	resched_task(curr); | 
 | 2961 | 	/* | 
 | 2962 | 	 * Only set the backward buddy when the current task is still | 
 | 2963 | 	 * on the rq. This can happen when a wakeup gets interleaved | 
 | 2964 | 	 * with schedule on the ->pre_schedule() or idle_balance() | 
 | 2965 | 	 * point, either of which can * drop the rq lock. | 
 | 2966 | 	 * | 
 | 2967 | 	 * Also, during early boot the idle thread is in the fair class, | 
 | 2968 | 	 * for obvious reasons its a bad idea to schedule back to it. | 
 | 2969 | 	 */ | 
 | 2970 | 	if (unlikely(!se->on_rq || curr == rq->idle)) | 
 | 2971 | 		return; | 
 | 2972 |  | 
 | 2973 | 	if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) | 
 | 2974 | 		set_last_buddy(se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2975 | } | 
 | 2976 |  | 
| Ingo Molnar | fb8d472 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 2977 | static struct task_struct *pick_next_task_fair(struct rq *rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2978 | { | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 2979 | 	struct task_struct *p; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2980 | 	struct cfs_rq *cfs_rq = &rq->cfs; | 
 | 2981 | 	struct sched_entity *se; | 
 | 2982 |  | 
| Tim Blechmann | 36ace27 | 2009-11-24 11:55:45 +0100 | [diff] [blame] | 2983 | 	if (!cfs_rq->nr_running) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2984 | 		return NULL; | 
 | 2985 |  | 
 | 2986 | 	do { | 
| Ingo Molnar | 9948f4b | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 2987 | 		se = pick_next_entity(cfs_rq); | 
| Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 2988 | 		set_next_entity(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2989 | 		cfs_rq = group_cfs_rq(se); | 
 | 2990 | 	} while (cfs_rq); | 
 | 2991 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 2992 | 	p = task_of(se); | 
| Mike Galbraith | b39e66e | 2011-11-22 15:20:07 +0100 | [diff] [blame] | 2993 | 	if (hrtick_enabled(rq)) | 
 | 2994 | 		hrtick_start_fair(rq, p); | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 2995 |  | 
 | 2996 | 	return p; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2997 | } | 
 | 2998 |  | 
 | 2999 | /* | 
 | 3000 |  * Account for a descheduled task: | 
 | 3001 |  */ | 
| Ingo Molnar | 31ee529 | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 3002 | static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3003 | { | 
 | 3004 | 	struct sched_entity *se = &prev->se; | 
 | 3005 | 	struct cfs_rq *cfs_rq; | 
 | 3006 |  | 
 | 3007 | 	for_each_sched_entity(se) { | 
 | 3008 | 		cfs_rq = cfs_rq_of(se); | 
| Ingo Molnar | ab6cde2 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 3009 | 		put_prev_entity(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3010 | 	} | 
 | 3011 | } | 
 | 3012 |  | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 3013 | /* | 
 | 3014 |  * sched_yield() is very simple | 
 | 3015 |  * | 
 | 3016 |  * The magic of dealing with the ->skip buddy is in pick_next_entity. | 
 | 3017 |  */ | 
 | 3018 | static void yield_task_fair(struct rq *rq) | 
 | 3019 | { | 
 | 3020 | 	struct task_struct *curr = rq->curr; | 
 | 3021 | 	struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 
 | 3022 | 	struct sched_entity *se = &curr->se; | 
 | 3023 |  | 
 | 3024 | 	/* | 
 | 3025 | 	 * Are we the only task in the tree? | 
 | 3026 | 	 */ | 
 | 3027 | 	if (unlikely(rq->nr_running == 1)) | 
 | 3028 | 		return; | 
 | 3029 |  | 
 | 3030 | 	clear_buddies(cfs_rq, se); | 
 | 3031 |  | 
 | 3032 | 	if (curr->policy != SCHED_BATCH) { | 
 | 3033 | 		update_rq_clock(rq); | 
 | 3034 | 		/* | 
 | 3035 | 		 * Update run-time statistics of the 'current'. | 
 | 3036 | 		 */ | 
 | 3037 | 		update_curr(cfs_rq); | 
| Mike Galbraith | 916671c | 2011-11-22 15:21:26 +0100 | [diff] [blame] | 3038 | 		/* | 
 | 3039 | 		 * Tell update_rq_clock() that we've just updated, | 
 | 3040 | 		 * so we don't do microscopic update in schedule() | 
 | 3041 | 		 * and double the fastpath cost. | 
 | 3042 | 		 */ | 
 | 3043 | 		 rq->skip_clock_update = 1; | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 3044 | 	} | 
 | 3045 |  | 
 | 3046 | 	set_skip_buddy(se); | 
 | 3047 | } | 
 | 3048 |  | 
| Mike Galbraith | d95f412 | 2011-02-01 09:50:51 -0500 | [diff] [blame] | 3049 | static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt) | 
 | 3050 | { | 
 | 3051 | 	struct sched_entity *se = &p->se; | 
 | 3052 |  | 
| Paul Turner | 5238cdd | 2011-07-21 09:43:37 -0700 | [diff] [blame] | 3053 | 	/* throttled hierarchies are not runnable */ | 
 | 3054 | 	if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) | 
| Mike Galbraith | d95f412 | 2011-02-01 09:50:51 -0500 | [diff] [blame] | 3055 | 		return false; | 
 | 3056 |  | 
 | 3057 | 	/* Tell the scheduler that we'd really like pse to run next. */ | 
 | 3058 | 	set_next_buddy(se); | 
 | 3059 |  | 
| Mike Galbraith | d95f412 | 2011-02-01 09:50:51 -0500 | [diff] [blame] | 3060 | 	yield_task_fair(rq); | 
 | 3061 |  | 
 | 3062 | 	return true; | 
 | 3063 | } | 
 | 3064 |  | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 3065 | #ifdef CONFIG_SMP | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3066 | /************************************************** | 
 | 3067 |  * Fair scheduling class load-balancing methods: | 
 | 3068 |  */ | 
 | 3069 |  | 
| Hiroshi Shimamoto | ed387b7 | 2012-01-31 11:40:32 +0900 | [diff] [blame] | 3070 | static unsigned long __read_mostly max_load_balance_interval = HZ/10; | 
 | 3071 |  | 
| Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3072 | #define LBF_ALL_PINNED	0x01 | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3073 | #define LBF_NEED_BREAK	0x02 | 
| Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3074 |  | 
 | 3075 | struct lb_env { | 
 | 3076 | 	struct sched_domain	*sd; | 
 | 3077 |  | 
 | 3078 | 	int			src_cpu; | 
 | 3079 | 	struct rq		*src_rq; | 
| Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3080 |  | 
 | 3081 | 	int			dst_cpu; | 
 | 3082 | 	struct rq		*dst_rq; | 
 | 3083 |  | 
 | 3084 | 	enum cpu_idle_type	idle; | 
| Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3085 | 	long			load_move; | 
| Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3086 | 	unsigned int		flags; | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3087 |  | 
 | 3088 | 	unsigned int		loop; | 
 | 3089 | 	unsigned int		loop_break; | 
 | 3090 | 	unsigned int		loop_max; | 
| Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3091 | }; | 
 | 3092 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3093 | /* | 
| Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3094 |  * move_task - move a task from one runqueue to another runqueue. | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3095 |  * Both runqueues must be locked. | 
 | 3096 |  */ | 
| Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3097 | static void move_task(struct task_struct *p, struct lb_env *env) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3098 | { | 
| Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3099 | 	deactivate_task(env->src_rq, p, 0); | 
 | 3100 | 	set_task_cpu(p, env->dst_cpu); | 
 | 3101 | 	activate_task(env->dst_rq, p, 0); | 
 | 3102 | 	check_preempt_curr(env->dst_rq, p, 0); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3103 | } | 
 | 3104 |  | 
 | 3105 | /* | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 3106 |  * Is this task likely cache-hot: | 
 | 3107 |  */ | 
 | 3108 | static int | 
 | 3109 | task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | 
 | 3110 | { | 
 | 3111 | 	s64 delta; | 
 | 3112 |  | 
 | 3113 | 	if (p->sched_class != &fair_sched_class) | 
 | 3114 | 		return 0; | 
 | 3115 |  | 
 | 3116 | 	if (unlikely(p->policy == SCHED_IDLE)) | 
 | 3117 | 		return 0; | 
 | 3118 |  | 
 | 3119 | 	/* | 
 | 3120 | 	 * Buddy candidates are cache hot: | 
 | 3121 | 	 */ | 
 | 3122 | 	if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running && | 
 | 3123 | 			(&p->se == cfs_rq_of(&p->se)->next || | 
 | 3124 | 			 &p->se == cfs_rq_of(&p->se)->last)) | 
 | 3125 | 		return 1; | 
 | 3126 |  | 
 | 3127 | 	if (sysctl_sched_migration_cost == -1) | 
 | 3128 | 		return 1; | 
 | 3129 | 	if (sysctl_sched_migration_cost == 0) | 
 | 3130 | 		return 0; | 
 | 3131 |  | 
 | 3132 | 	delta = now - p->se.exec_start; | 
 | 3133 |  | 
 | 3134 | 	return delta < (s64)sysctl_sched_migration_cost; | 
 | 3135 | } | 
 | 3136 |  | 
 | 3137 | /* | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3138 |  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? | 
 | 3139 |  */ | 
 | 3140 | static | 
| Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 3141 | int can_migrate_task(struct task_struct *p, struct lb_env *env) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3142 | { | 
 | 3143 | 	int tsk_cache_hot = 0; | 
 | 3144 | 	/* | 
 | 3145 | 	 * We do not migrate tasks that are: | 
 | 3146 | 	 * 1) running (obviously), or | 
 | 3147 | 	 * 2) cannot be migrated to this CPU due to cpus_allowed, or | 
 | 3148 | 	 * 3) are cache-hot on their current CPU. | 
 | 3149 | 	 */ | 
| Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3150 | 	if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) { | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 3151 | 		schedstat_inc(p, se.statistics.nr_failed_migrations_affine); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3152 | 		return 0; | 
 | 3153 | 	} | 
| Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 3154 | 	env->flags &= ~LBF_ALL_PINNED; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3155 |  | 
| Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3156 | 	if (task_running(env->src_rq, p)) { | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 3157 | 		schedstat_inc(p, se.statistics.nr_failed_migrations_running); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3158 | 		return 0; | 
 | 3159 | 	} | 
 | 3160 |  | 
 | 3161 | 	/* | 
 | 3162 | 	 * Aggressive migration if: | 
 | 3163 | 	 * 1) task is cache cold, or | 
 | 3164 | 	 * 2) too many balance attempts have failed. | 
 | 3165 | 	 */ | 
 | 3166 |  | 
| Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3167 | 	tsk_cache_hot = task_hot(p, env->src_rq->clock_task, env->sd); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3168 | 	if (!tsk_cache_hot || | 
| Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 3169 | 		env->sd->nr_balance_failed > env->sd->cache_nice_tries) { | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3170 | #ifdef CONFIG_SCHEDSTATS | 
 | 3171 | 		if (tsk_cache_hot) { | 
| Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 3172 | 			schedstat_inc(env->sd, lb_hot_gained[env->idle]); | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 3173 | 			schedstat_inc(p, se.statistics.nr_forced_migrations); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3174 | 		} | 
 | 3175 | #endif | 
 | 3176 | 		return 1; | 
 | 3177 | 	} | 
 | 3178 |  | 
 | 3179 | 	if (tsk_cache_hot) { | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 3180 | 		schedstat_inc(p, se.statistics.nr_failed_migrations_hot); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3181 | 		return 0; | 
 | 3182 | 	} | 
 | 3183 | 	return 1; | 
 | 3184 | } | 
 | 3185 |  | 
| Peter Zijlstra | 897c395 | 2009-12-17 17:45:42 +0100 | [diff] [blame] | 3186 | /* | 
 | 3187 |  * move_one_task tries to move exactly one task from busiest to this_rq, as | 
 | 3188 |  * part of active balancing operations within "domain". | 
 | 3189 |  * Returns 1 if successful and 0 otherwise. | 
 | 3190 |  * | 
 | 3191 |  * Called with both runqueues locked. | 
 | 3192 |  */ | 
| Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 3193 | static int move_one_task(struct lb_env *env) | 
| Peter Zijlstra | 897c395 | 2009-12-17 17:45:42 +0100 | [diff] [blame] | 3194 | { | 
 | 3195 | 	struct task_struct *p, *n; | 
| Peter Zijlstra | 897c395 | 2009-12-17 17:45:42 +0100 | [diff] [blame] | 3196 |  | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3197 | 	list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) { | 
 | 3198 | 		if (throttled_lb_pair(task_group(p), env->src_rq->cpu, env->dst_cpu)) | 
 | 3199 | 			continue; | 
| Peter Zijlstra | 897c395 | 2009-12-17 17:45:42 +0100 | [diff] [blame] | 3200 |  | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3201 | 		if (!can_migrate_task(p, env)) | 
 | 3202 | 			continue; | 
| Peter Zijlstra | 897c395 | 2009-12-17 17:45:42 +0100 | [diff] [blame] | 3203 |  | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3204 | 		move_task(p, env); | 
 | 3205 | 		/* | 
 | 3206 | 		 * Right now, this is only the second place move_task() | 
 | 3207 | 		 * is called, so we can safely collect move_task() | 
 | 3208 | 		 * stats here rather than inside move_task(). | 
 | 3209 | 		 */ | 
 | 3210 | 		schedstat_inc(env->sd, lb_gained[env->idle]); | 
 | 3211 | 		return 1; | 
| Peter Zijlstra | 897c395 | 2009-12-17 17:45:42 +0100 | [diff] [blame] | 3212 | 	} | 
| Peter Zijlstra | 897c395 | 2009-12-17 17:45:42 +0100 | [diff] [blame] | 3213 | 	return 0; | 
 | 3214 | } | 
 | 3215 |  | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3216 | static unsigned long task_h_load(struct task_struct *p); | 
 | 3217 |  | 
| Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3218 | /* | 
 | 3219 |  * move_tasks tries to move up to load_move weighted load from busiest to | 
 | 3220 |  * this_rq, as part of a balancing operation within domain "sd". | 
 | 3221 |  * Returns 1 if successful and 0 otherwise. | 
 | 3222 |  * | 
 | 3223 |  * Called with both runqueues locked. | 
 | 3224 |  */ | 
 | 3225 | static int move_tasks(struct lb_env *env) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3226 | { | 
| Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3227 | 	struct list_head *tasks = &env->src_rq->cfs_tasks; | 
 | 3228 | 	struct task_struct *p; | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3229 | 	unsigned long load; | 
 | 3230 | 	int pulled = 0; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3231 |  | 
| Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3232 | 	if (env->load_move <= 0) | 
 | 3233 | 		return 0; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3234 |  | 
| Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3235 | 	while (!list_empty(tasks)) { | 
 | 3236 | 		p = list_first_entry(tasks, struct task_struct, se.group_node); | 
 | 3237 |  | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3238 | 		env->loop++; | 
 | 3239 | 		/* We've more or less seen every task there is, call it quits */ | 
| Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3240 | 		if (env->loop > env->loop_max) | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3241 | 			break; | 
| Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3242 |  | 
 | 3243 | 		/* take a breather every nr_migrate tasks */ | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3244 | 		if (env->loop > env->loop_break) { | 
 | 3245 | 			env->loop_break += sysctl_sched_nr_migrate; | 
| Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 3246 | 			env->flags |= LBF_NEED_BREAK; | 
| Peter Zijlstra | ee00e66 | 2009-12-17 17:25:20 +0100 | [diff] [blame] | 3247 | 			break; | 
| Peter Zijlstra | a195f00 | 2011-09-22 15:30:18 +0200 | [diff] [blame] | 3248 | 		} | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3249 |  | 
| Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3250 | 		if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3251 | 			goto next; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3252 |  | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3253 | 		load = task_h_load(p); | 
| Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3254 |  | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3255 | 		if (load < 16 && !env->sd->nr_balance_failed) | 
 | 3256 | 			goto next; | 
 | 3257 |  | 
| Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3258 | 		if ((load / 2) > env->load_move) | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3259 | 			goto next; | 
 | 3260 |  | 
 | 3261 | 		if (!can_migrate_task(p, env)) | 
 | 3262 | 			goto next; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3263 |  | 
| Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3264 | 		move_task(p, env); | 
| Peter Zijlstra | ee00e66 | 2009-12-17 17:25:20 +0100 | [diff] [blame] | 3265 | 		pulled++; | 
| Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3266 | 		env->load_move -= load; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3267 |  | 
 | 3268 | #ifdef CONFIG_PREEMPT | 
| Peter Zijlstra | ee00e66 | 2009-12-17 17:25:20 +0100 | [diff] [blame] | 3269 | 		/* | 
 | 3270 | 		 * NEWIDLE balancing is a source of latency, so preemptible | 
 | 3271 | 		 * kernels will stop after the first task is pulled to minimize | 
 | 3272 | 		 * the critical section. | 
 | 3273 | 		 */ | 
| Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3274 | 		if (env->idle == CPU_NEWLY_IDLE) | 
| Peter Zijlstra | ee00e66 | 2009-12-17 17:25:20 +0100 | [diff] [blame] | 3275 | 			break; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3276 | #endif | 
 | 3277 |  | 
| Peter Zijlstra | ee00e66 | 2009-12-17 17:25:20 +0100 | [diff] [blame] | 3278 | 		/* | 
 | 3279 | 		 * We only want to steal up to the prescribed amount of | 
 | 3280 | 		 * weighted load. | 
 | 3281 | 		 */ | 
| Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3282 | 		if (env->load_move <= 0) | 
| Peter Zijlstra | ee00e66 | 2009-12-17 17:25:20 +0100 | [diff] [blame] | 3283 | 			break; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3284 |  | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3285 | 		continue; | 
 | 3286 | next: | 
| Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3287 | 		list_move_tail(&p->se.group_node, tasks); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3288 | 	} | 
| Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3289 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3290 | 	/* | 
| Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3291 | 	 * Right now, this is one of only two places move_task() is called, | 
 | 3292 | 	 * so we can safely collect move_task() stats here rather than | 
 | 3293 | 	 * inside move_task(). | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3294 | 	 */ | 
| Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 3295 | 	schedstat_add(env->sd, lb_gained[env->idle], pulled); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3296 |  | 
| Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3297 | 	return pulled; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3298 | } | 
 | 3299 |  | 
| Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 3300 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| Peter Zijlstra | 9e3081c | 2010-11-15 15:47:02 -0800 | [diff] [blame] | 3301 | /* | 
 | 3302 |  * update tg->load_weight by folding this cpu's load_avg | 
 | 3303 |  */ | 
| Paul Turner | 67e8625 | 2010-11-15 15:47:05 -0800 | [diff] [blame] | 3304 | static int update_shares_cpu(struct task_group *tg, int cpu) | 
| Peter Zijlstra | 9e3081c | 2010-11-15 15:47:02 -0800 | [diff] [blame] | 3305 | { | 
 | 3306 | 	struct cfs_rq *cfs_rq; | 
 | 3307 | 	unsigned long flags; | 
 | 3308 | 	struct rq *rq; | 
| Peter Zijlstra | 9e3081c | 2010-11-15 15:47:02 -0800 | [diff] [blame] | 3309 |  | 
 | 3310 | 	if (!tg->se[cpu]) | 
 | 3311 | 		return 0; | 
 | 3312 |  | 
 | 3313 | 	rq = cpu_rq(cpu); | 
 | 3314 | 	cfs_rq = tg->cfs_rq[cpu]; | 
 | 3315 |  | 
 | 3316 | 	raw_spin_lock_irqsave(&rq->lock, flags); | 
 | 3317 |  | 
 | 3318 | 	update_rq_clock(rq); | 
| Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 3319 | 	update_cfs_load(cfs_rq, 1); | 
| Peter Zijlstra | 9e3081c | 2010-11-15 15:47:02 -0800 | [diff] [blame] | 3320 |  | 
 | 3321 | 	/* | 
 | 3322 | 	 * We need to update shares after updating tg->load_weight in | 
 | 3323 | 	 * order to adjust the weight of groups with long running tasks. | 
 | 3324 | 	 */ | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 3325 | 	update_cfs_shares(cfs_rq); | 
| Peter Zijlstra | 9e3081c | 2010-11-15 15:47:02 -0800 | [diff] [blame] | 3326 |  | 
 | 3327 | 	raw_spin_unlock_irqrestore(&rq->lock, flags); | 
 | 3328 |  | 
 | 3329 | 	return 0; | 
 | 3330 | } | 
 | 3331 |  | 
 | 3332 | static void update_shares(int cpu) | 
 | 3333 | { | 
 | 3334 | 	struct cfs_rq *cfs_rq; | 
 | 3335 | 	struct rq *rq = cpu_rq(cpu); | 
 | 3336 |  | 
 | 3337 | 	rcu_read_lock(); | 
| Peter Zijlstra | 9763b67 | 2011-07-13 13:09:25 +0200 | [diff] [blame] | 3338 | 	/* | 
 | 3339 | 	 * Iterates the task_group tree in a bottom up fashion, see | 
 | 3340 | 	 * list_add_leaf_cfs_rq() for details. | 
 | 3341 | 	 */ | 
| Paul Turner | 64660c8 | 2011-07-21 09:43:36 -0700 | [diff] [blame] | 3342 | 	for_each_leaf_cfs_rq(rq, cfs_rq) { | 
 | 3343 | 		/* throttled entities do not contribute to load */ | 
 | 3344 | 		if (throttled_hierarchy(cfs_rq)) | 
 | 3345 | 			continue; | 
 | 3346 |  | 
| Paul Turner | 67e8625 | 2010-11-15 15:47:05 -0800 | [diff] [blame] | 3347 | 		update_shares_cpu(cfs_rq->tg, cpu); | 
| Paul Turner | 64660c8 | 2011-07-21 09:43:36 -0700 | [diff] [blame] | 3348 | 	} | 
| Peter Zijlstra | 9e3081c | 2010-11-15 15:47:02 -0800 | [diff] [blame] | 3349 | 	rcu_read_unlock(); | 
 | 3350 | } | 
 | 3351 |  | 
| Peter Zijlstra | 9763b67 | 2011-07-13 13:09:25 +0200 | [diff] [blame] | 3352 | /* | 
 | 3353 |  * Compute the cpu's hierarchical load factor for each task group. | 
 | 3354 |  * This needs to be done in a top-down fashion because the load of a child | 
 | 3355 |  * group is a fraction of its parents load. | 
 | 3356 |  */ | 
 | 3357 | static int tg_load_down(struct task_group *tg, void *data) | 
 | 3358 | { | 
 | 3359 | 	unsigned long load; | 
 | 3360 | 	long cpu = (long)data; | 
 | 3361 |  | 
 | 3362 | 	if (!tg->parent) { | 
 | 3363 | 		load = cpu_rq(cpu)->load.weight; | 
 | 3364 | 	} else { | 
 | 3365 | 		load = tg->parent->cfs_rq[cpu]->h_load; | 
 | 3366 | 		load *= tg->se[cpu]->load.weight; | 
 | 3367 | 		load /= tg->parent->cfs_rq[cpu]->load.weight + 1; | 
 | 3368 | 	} | 
 | 3369 |  | 
 | 3370 | 	tg->cfs_rq[cpu]->h_load = load; | 
 | 3371 |  | 
 | 3372 | 	return 0; | 
 | 3373 | } | 
 | 3374 |  | 
 | 3375 | static void update_h_load(long cpu) | 
 | 3376 | { | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3377 | 	rcu_read_lock(); | 
| Peter Zijlstra | 9763b67 | 2011-07-13 13:09:25 +0200 | [diff] [blame] | 3378 | 	walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3379 | 	rcu_read_unlock(); | 
| Peter Zijlstra | 9763b67 | 2011-07-13 13:09:25 +0200 | [diff] [blame] | 3380 | } | 
 | 3381 |  | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3382 | static unsigned long task_h_load(struct task_struct *p) | 
| Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 3383 | { | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3384 | 	struct cfs_rq *cfs_rq = task_cfs_rq(p); | 
 | 3385 | 	unsigned long load; | 
| Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 3386 |  | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3387 | 	load = p->se.load.weight; | 
 | 3388 | 	load = div_u64(load * cfs_rq->h_load, cfs_rq->load.weight + 1); | 
| Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 3389 |  | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3390 | 	return load; | 
| Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 3391 | } | 
 | 3392 | #else | 
| Peter Zijlstra | 9e3081c | 2010-11-15 15:47:02 -0800 | [diff] [blame] | 3393 | static inline void update_shares(int cpu) | 
 | 3394 | { | 
 | 3395 | } | 
 | 3396 |  | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3397 | static inline void update_h_load(long cpu) | 
| Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 3398 | { | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3399 | } | 
 | 3400 |  | 
 | 3401 | static unsigned long task_h_load(struct task_struct *p) | 
 | 3402 | { | 
 | 3403 | 	return p->se.load.weight; | 
| Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 3404 | } | 
 | 3405 | #endif | 
 | 3406 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3407 | /********** Helpers for find_busiest_group ************************/ | 
 | 3408 | /* | 
 | 3409 |  * sd_lb_stats - Structure to store the statistics of a sched_domain | 
 | 3410 |  * 		during load balancing. | 
 | 3411 |  */ | 
 | 3412 | struct sd_lb_stats { | 
 | 3413 | 	struct sched_group *busiest; /* Busiest group in this sd */ | 
 | 3414 | 	struct sched_group *this;  /* Local group in this sd */ | 
 | 3415 | 	unsigned long total_load;  /* Total load of all groups in sd */ | 
 | 3416 | 	unsigned long total_pwr;   /*	Total power of all groups in sd */ | 
 | 3417 | 	unsigned long avg_load;	   /* Average load across all groups in sd */ | 
 | 3418 |  | 
 | 3419 | 	/** Statistics of this group */ | 
 | 3420 | 	unsigned long this_load; | 
 | 3421 | 	unsigned long this_load_per_task; | 
 | 3422 | 	unsigned long this_nr_running; | 
| Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 3423 | 	unsigned long this_has_capacity; | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 3424 | 	unsigned int  this_idle_cpus; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3425 |  | 
 | 3426 | 	/* Statistics of the busiest group */ | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 3427 | 	unsigned int  busiest_idle_cpus; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3428 | 	unsigned long max_load; | 
 | 3429 | 	unsigned long busiest_load_per_task; | 
 | 3430 | 	unsigned long busiest_nr_running; | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 3431 | 	unsigned long busiest_group_capacity; | 
| Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 3432 | 	unsigned long busiest_has_capacity; | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 3433 | 	unsigned int  busiest_group_weight; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3434 |  | 
 | 3435 | 	int group_imb; /* Is there imbalance in this sd */ | 
 | 3436 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 
 | 3437 | 	int power_savings_balance; /* Is powersave balance needed for this sd */ | 
 | 3438 | 	struct sched_group *group_min; /* Least loaded group in sd */ | 
 | 3439 | 	struct sched_group *group_leader; /* Group which relieves group_min */ | 
 | 3440 | 	unsigned long min_load_per_task; /* load_per_task in group_min */ | 
 | 3441 | 	unsigned long leader_nr_running; /* Nr running of group_leader */ | 
 | 3442 | 	unsigned long min_nr_running; /* Nr running of group_min */ | 
 | 3443 | #endif | 
 | 3444 | }; | 
 | 3445 |  | 
 | 3446 | /* | 
 | 3447 |  * sg_lb_stats - stats of a sched_group required for load_balancing | 
 | 3448 |  */ | 
 | 3449 | struct sg_lb_stats { | 
 | 3450 | 	unsigned long avg_load; /*Avg load across the CPUs of the group */ | 
 | 3451 | 	unsigned long group_load; /* Total load over the CPUs of the group */ | 
 | 3452 | 	unsigned long sum_nr_running; /* Nr tasks running in the group */ | 
 | 3453 | 	unsigned long sum_weighted_load; /* Weighted load of group's tasks */ | 
 | 3454 | 	unsigned long group_capacity; | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 3455 | 	unsigned long idle_cpus; | 
 | 3456 | 	unsigned long group_weight; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3457 | 	int group_imb; /* Is there an imbalance in the group ? */ | 
| Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 3458 | 	int group_has_capacity; /* Is there extra capacity in the group? */ | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3459 | }; | 
 | 3460 |  | 
 | 3461 | /** | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3462 |  * get_sd_load_idx - Obtain the load index for a given sched domain. | 
 | 3463 |  * @sd: The sched_domain whose load_idx is to be obtained. | 
 | 3464 |  * @idle: The Idle status of the CPU for whose sd load_icx is obtained. | 
 | 3465 |  */ | 
 | 3466 | static inline int get_sd_load_idx(struct sched_domain *sd, | 
 | 3467 | 					enum cpu_idle_type idle) | 
 | 3468 | { | 
 | 3469 | 	int load_idx; | 
 | 3470 |  | 
 | 3471 | 	switch (idle) { | 
 | 3472 | 	case CPU_NOT_IDLE: | 
 | 3473 | 		load_idx = sd->busy_idx; | 
 | 3474 | 		break; | 
 | 3475 |  | 
 | 3476 | 	case CPU_NEWLY_IDLE: | 
 | 3477 | 		load_idx = sd->newidle_idx; | 
 | 3478 | 		break; | 
 | 3479 | 	default: | 
 | 3480 | 		load_idx = sd->idle_idx; | 
 | 3481 | 		break; | 
 | 3482 | 	} | 
 | 3483 |  | 
 | 3484 | 	return load_idx; | 
 | 3485 | } | 
 | 3486 |  | 
 | 3487 |  | 
 | 3488 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 
 | 3489 | /** | 
 | 3490 |  * init_sd_power_savings_stats - Initialize power savings statistics for | 
 | 3491 |  * the given sched_domain, during load balancing. | 
 | 3492 |  * | 
 | 3493 |  * @sd: Sched domain whose power-savings statistics are to be initialized. | 
 | 3494 |  * @sds: Variable containing the statistics for sd. | 
 | 3495 |  * @idle: Idle status of the CPU at which we're performing load-balancing. | 
 | 3496 |  */ | 
 | 3497 | static inline void init_sd_power_savings_stats(struct sched_domain *sd, | 
 | 3498 | 	struct sd_lb_stats *sds, enum cpu_idle_type idle) | 
 | 3499 | { | 
 | 3500 | 	/* | 
 | 3501 | 	 * Busy processors will not participate in power savings | 
 | 3502 | 	 * balance. | 
 | 3503 | 	 */ | 
 | 3504 | 	if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE)) | 
 | 3505 | 		sds->power_savings_balance = 0; | 
 | 3506 | 	else { | 
 | 3507 | 		sds->power_savings_balance = 1; | 
 | 3508 | 		sds->min_nr_running = ULONG_MAX; | 
 | 3509 | 		sds->leader_nr_running = 0; | 
 | 3510 | 	} | 
 | 3511 | } | 
 | 3512 |  | 
 | 3513 | /** | 
 | 3514 |  * update_sd_power_savings_stats - Update the power saving stats for a | 
 | 3515 |  * sched_domain while performing load balancing. | 
 | 3516 |  * | 
 | 3517 |  * @group: sched_group belonging to the sched_domain under consideration. | 
 | 3518 |  * @sds: Variable containing the statistics of the sched_domain | 
 | 3519 |  * @local_group: Does group contain the CPU for which we're performing | 
 | 3520 |  * 		load balancing ? | 
 | 3521 |  * @sgs: Variable containing the statistics of the group. | 
 | 3522 |  */ | 
 | 3523 | static inline void update_sd_power_savings_stats(struct sched_group *group, | 
 | 3524 | 	struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs) | 
 | 3525 | { | 
 | 3526 |  | 
 | 3527 | 	if (!sds->power_savings_balance) | 
 | 3528 | 		return; | 
 | 3529 |  | 
 | 3530 | 	/* | 
 | 3531 | 	 * If the local group is idle or completely loaded | 
 | 3532 | 	 * no need to do power savings balance at this domain | 
 | 3533 | 	 */ | 
 | 3534 | 	if (local_group && (sds->this_nr_running >= sgs->group_capacity || | 
 | 3535 | 				!sds->this_nr_running)) | 
 | 3536 | 		sds->power_savings_balance = 0; | 
 | 3537 |  | 
 | 3538 | 	/* | 
 | 3539 | 	 * If a group is already running at full capacity or idle, | 
 | 3540 | 	 * don't include that group in power savings calculations | 
 | 3541 | 	 */ | 
 | 3542 | 	if (!sds->power_savings_balance || | 
 | 3543 | 		sgs->sum_nr_running >= sgs->group_capacity || | 
 | 3544 | 		!sgs->sum_nr_running) | 
 | 3545 | 		return; | 
 | 3546 |  | 
 | 3547 | 	/* | 
 | 3548 | 	 * Calculate the group which has the least non-idle load. | 
 | 3549 | 	 * This is the group from where we need to pick up the load | 
 | 3550 | 	 * for saving power | 
 | 3551 | 	 */ | 
 | 3552 | 	if ((sgs->sum_nr_running < sds->min_nr_running) || | 
 | 3553 | 	    (sgs->sum_nr_running == sds->min_nr_running && | 
 | 3554 | 	     group_first_cpu(group) > group_first_cpu(sds->group_min))) { | 
 | 3555 | 		sds->group_min = group; | 
 | 3556 | 		sds->min_nr_running = sgs->sum_nr_running; | 
 | 3557 | 		sds->min_load_per_task = sgs->sum_weighted_load / | 
 | 3558 | 						sgs->sum_nr_running; | 
 | 3559 | 	} | 
 | 3560 |  | 
 | 3561 | 	/* | 
 | 3562 | 	 * Calculate the group which is almost near its | 
 | 3563 | 	 * capacity but still has some space to pick up some load | 
 | 3564 | 	 * from other group and save more power | 
 | 3565 | 	 */ | 
 | 3566 | 	if (sgs->sum_nr_running + 1 > sgs->group_capacity) | 
 | 3567 | 		return; | 
 | 3568 |  | 
 | 3569 | 	if (sgs->sum_nr_running > sds->leader_nr_running || | 
 | 3570 | 	    (sgs->sum_nr_running == sds->leader_nr_running && | 
 | 3571 | 	     group_first_cpu(group) < group_first_cpu(sds->group_leader))) { | 
 | 3572 | 		sds->group_leader = group; | 
 | 3573 | 		sds->leader_nr_running = sgs->sum_nr_running; | 
 | 3574 | 	} | 
 | 3575 | } | 
 | 3576 |  | 
 | 3577 | /** | 
 | 3578 |  * check_power_save_busiest_group - see if there is potential for some power-savings balance | 
 | 3579 |  * @sds: Variable containing the statistics of the sched_domain | 
 | 3580 |  *	under consideration. | 
 | 3581 |  * @this_cpu: Cpu at which we're currently performing load-balancing. | 
 | 3582 |  * @imbalance: Variable to store the imbalance. | 
 | 3583 |  * | 
 | 3584 |  * Description: | 
 | 3585 |  * Check if we have potential to perform some power-savings balance. | 
 | 3586 |  * If yes, set the busiest group to be the least loaded group in the | 
 | 3587 |  * sched_domain, so that it's CPUs can be put to idle. | 
 | 3588 |  * | 
 | 3589 |  * Returns 1 if there is potential to perform power-savings balance. | 
 | 3590 |  * Else returns 0. | 
 | 3591 |  */ | 
 | 3592 | static inline int check_power_save_busiest_group(struct sd_lb_stats *sds, | 
 | 3593 | 					int this_cpu, unsigned long *imbalance) | 
 | 3594 | { | 
 | 3595 | 	if (!sds->power_savings_balance) | 
 | 3596 | 		return 0; | 
 | 3597 |  | 
 | 3598 | 	if (sds->this != sds->group_leader || | 
 | 3599 | 			sds->group_leader == sds->group_min) | 
 | 3600 | 		return 0; | 
 | 3601 |  | 
 | 3602 | 	*imbalance = sds->min_load_per_task; | 
 | 3603 | 	sds->busiest = sds->group_min; | 
 | 3604 |  | 
 | 3605 | 	return 1; | 
 | 3606 |  | 
 | 3607 | } | 
 | 3608 | #else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ | 
 | 3609 | static inline void init_sd_power_savings_stats(struct sched_domain *sd, | 
 | 3610 | 	struct sd_lb_stats *sds, enum cpu_idle_type idle) | 
 | 3611 | { | 
 | 3612 | 	return; | 
 | 3613 | } | 
 | 3614 |  | 
 | 3615 | static inline void update_sd_power_savings_stats(struct sched_group *group, | 
 | 3616 | 	struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs) | 
 | 3617 | { | 
 | 3618 | 	return; | 
 | 3619 | } | 
 | 3620 |  | 
 | 3621 | static inline int check_power_save_busiest_group(struct sd_lb_stats *sds, | 
 | 3622 | 					int this_cpu, unsigned long *imbalance) | 
 | 3623 | { | 
 | 3624 | 	return 0; | 
 | 3625 | } | 
 | 3626 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ | 
 | 3627 |  | 
 | 3628 |  | 
 | 3629 | unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu) | 
 | 3630 | { | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3631 | 	return SCHED_POWER_SCALE; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3632 | } | 
 | 3633 |  | 
 | 3634 | unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu) | 
 | 3635 | { | 
 | 3636 | 	return default_scale_freq_power(sd, cpu); | 
 | 3637 | } | 
 | 3638 |  | 
 | 3639 | unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu) | 
 | 3640 | { | 
| Peter Zijlstra | 669c55e | 2010-04-16 14:59:29 +0200 | [diff] [blame] | 3641 | 	unsigned long weight = sd->span_weight; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3642 | 	unsigned long smt_gain = sd->smt_gain; | 
 | 3643 |  | 
 | 3644 | 	smt_gain /= weight; | 
 | 3645 |  | 
 | 3646 | 	return smt_gain; | 
 | 3647 | } | 
 | 3648 |  | 
 | 3649 | unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu) | 
 | 3650 | { | 
 | 3651 | 	return default_scale_smt_power(sd, cpu); | 
 | 3652 | } | 
 | 3653 |  | 
 | 3654 | unsigned long scale_rt_power(int cpu) | 
 | 3655 | { | 
 | 3656 | 	struct rq *rq = cpu_rq(cpu); | 
 | 3657 | 	u64 total, available; | 
 | 3658 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3659 | 	total = sched_avg_period() + (rq->clock - rq->age_stamp); | 
| Venkatesh Pallipadi | aa48380 | 2010-10-04 17:03:22 -0700 | [diff] [blame] | 3660 |  | 
 | 3661 | 	if (unlikely(total < rq->rt_avg)) { | 
 | 3662 | 		/* Ensures that power won't end up being negative */ | 
 | 3663 | 		available = 0; | 
 | 3664 | 	} else { | 
 | 3665 | 		available = total - rq->rt_avg; | 
 | 3666 | 	} | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3667 |  | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3668 | 	if (unlikely((s64)total < SCHED_POWER_SCALE)) | 
 | 3669 | 		total = SCHED_POWER_SCALE; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3670 |  | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3671 | 	total >>= SCHED_POWER_SHIFT; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3672 |  | 
 | 3673 | 	return div_u64(available, total); | 
 | 3674 | } | 
 | 3675 |  | 
 | 3676 | static void update_cpu_power(struct sched_domain *sd, int cpu) | 
 | 3677 | { | 
| Peter Zijlstra | 669c55e | 2010-04-16 14:59:29 +0200 | [diff] [blame] | 3678 | 	unsigned long weight = sd->span_weight; | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3679 | 	unsigned long power = SCHED_POWER_SCALE; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3680 | 	struct sched_group *sdg = sd->groups; | 
 | 3681 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3682 | 	if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { | 
 | 3683 | 		if (sched_feat(ARCH_POWER)) | 
 | 3684 | 			power *= arch_scale_smt_power(sd, cpu); | 
 | 3685 | 		else | 
 | 3686 | 			power *= default_scale_smt_power(sd, cpu); | 
 | 3687 |  | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3688 | 		power >>= SCHED_POWER_SHIFT; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3689 | 	} | 
 | 3690 |  | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 3691 | 	sdg->sgp->power_orig = power; | 
| Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3692 |  | 
 | 3693 | 	if (sched_feat(ARCH_POWER)) | 
 | 3694 | 		power *= arch_scale_freq_power(sd, cpu); | 
 | 3695 | 	else | 
 | 3696 | 		power *= default_scale_freq_power(sd, cpu); | 
 | 3697 |  | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3698 | 	power >>= SCHED_POWER_SHIFT; | 
| Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3699 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3700 | 	power *= scale_rt_power(cpu); | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3701 | 	power >>= SCHED_POWER_SHIFT; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3702 |  | 
 | 3703 | 	if (!power) | 
 | 3704 | 		power = 1; | 
 | 3705 |  | 
| Peter Zijlstra | e51fd5e | 2010-05-31 12:37:30 +0200 | [diff] [blame] | 3706 | 	cpu_rq(cpu)->cpu_power = power; | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 3707 | 	sdg->sgp->power = power; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3708 | } | 
 | 3709 |  | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 3710 | void update_group_power(struct sched_domain *sd, int cpu) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3711 | { | 
 | 3712 | 	struct sched_domain *child = sd->child; | 
 | 3713 | 	struct sched_group *group, *sdg = sd->groups; | 
 | 3714 | 	unsigned long power; | 
| Vincent Guittot | 4ec4412 | 2011-12-12 20:21:08 +0100 | [diff] [blame] | 3715 | 	unsigned long interval; | 
 | 3716 |  | 
 | 3717 | 	interval = msecs_to_jiffies(sd->balance_interval); | 
 | 3718 | 	interval = clamp(interval, 1UL, max_load_balance_interval); | 
 | 3719 | 	sdg->sgp->next_update = jiffies + interval; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3720 |  | 
 | 3721 | 	if (!child) { | 
 | 3722 | 		update_cpu_power(sd, cpu); | 
 | 3723 | 		return; | 
 | 3724 | 	} | 
 | 3725 |  | 
 | 3726 | 	power = 0; | 
 | 3727 |  | 
 | 3728 | 	group = child->groups; | 
 | 3729 | 	do { | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 3730 | 		power += group->sgp->power; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3731 | 		group = group->next; | 
 | 3732 | 	} while (group != child->groups); | 
 | 3733 |  | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 3734 | 	sdg->sgp->power = power; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3735 | } | 
 | 3736 |  | 
| Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3737 | /* | 
 | 3738 |  * Try and fix up capacity for tiny siblings, this is needed when | 
 | 3739 |  * things like SD_ASYM_PACKING need f_b_g to select another sibling | 
 | 3740 |  * which on its own isn't powerful enough. | 
 | 3741 |  * | 
 | 3742 |  * See update_sd_pick_busiest() and check_asym_packing(). | 
 | 3743 |  */ | 
 | 3744 | static inline int | 
 | 3745 | fix_small_capacity(struct sched_domain *sd, struct sched_group *group) | 
 | 3746 | { | 
 | 3747 | 	/* | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3748 | 	 * Only siblings can have significantly less than SCHED_POWER_SCALE | 
| Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3749 | 	 */ | 
| Peter Zijlstra | a6c75f2 | 2011-04-07 14:09:52 +0200 | [diff] [blame] | 3750 | 	if (!(sd->flags & SD_SHARE_CPUPOWER)) | 
| Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3751 | 		return 0; | 
 | 3752 |  | 
 | 3753 | 	/* | 
 | 3754 | 	 * If ~90% of the cpu_power is still there, we're good. | 
 | 3755 | 	 */ | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 3756 | 	if (group->sgp->power * 32 > group->sgp->power_orig * 29) | 
| Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3757 | 		return 1; | 
 | 3758 |  | 
 | 3759 | 	return 0; | 
 | 3760 | } | 
 | 3761 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3762 | /** | 
 | 3763 |  * update_sg_lb_stats - Update sched_group's statistics for load balancing. | 
 | 3764 |  * @sd: The sched_domain whose statistics are to be updated. | 
 | 3765 |  * @group: sched_group whose statistics are to be updated. | 
 | 3766 |  * @this_cpu: Cpu for which load balance is currently performed. | 
 | 3767 |  * @idle: Idle status of this_cpu | 
 | 3768 |  * @load_idx: Load index of sched_domain of this_cpu for load calc. | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3769 |  * @local_group: Does group contain this_cpu. | 
 | 3770 |  * @cpus: Set of cpus considered for load balancing. | 
 | 3771 |  * @balance: Should we balance. | 
 | 3772 |  * @sgs: variable to hold the statistics for this group. | 
 | 3773 |  */ | 
 | 3774 | static inline void update_sg_lb_stats(struct sched_domain *sd, | 
 | 3775 | 			struct sched_group *group, int this_cpu, | 
| Venkatesh Pallipadi | 46e49b3 | 2011-02-14 14:38:50 -0800 | [diff] [blame] | 3776 | 			enum cpu_idle_type idle, int load_idx, | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3777 | 			int local_group, const struct cpumask *cpus, | 
 | 3778 | 			int *balance, struct sg_lb_stats *sgs) | 
 | 3779 | { | 
| Nikhil Rao | 2582f0e | 2010-10-13 12:09:36 -0700 | [diff] [blame] | 3780 | 	unsigned long load, max_cpu_load, min_cpu_load, max_nr_running; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3781 | 	int i; | 
 | 3782 | 	unsigned int balance_cpu = -1, first_idle_cpu = 0; | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 3783 | 	unsigned long avg_load_per_task = 0; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3784 |  | 
| Gautham R Shenoy | 871e35b | 2010-01-20 14:02:44 -0600 | [diff] [blame] | 3785 | 	if (local_group) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3786 | 		balance_cpu = group_first_cpu(group); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3787 |  | 
 | 3788 | 	/* Tally up the load of all CPUs in the group */ | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3789 | 	max_cpu_load = 0; | 
 | 3790 | 	min_cpu_load = ~0UL; | 
| Nikhil Rao | 2582f0e | 2010-10-13 12:09:36 -0700 | [diff] [blame] | 3791 | 	max_nr_running = 0; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3792 |  | 
 | 3793 | 	for_each_cpu_and(i, sched_group_cpus(group), cpus) { | 
 | 3794 | 		struct rq *rq = cpu_rq(i); | 
 | 3795 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3796 | 		/* Bias balancing toward cpus of our domain */ | 
 | 3797 | 		if (local_group) { | 
 | 3798 | 			if (idle_cpu(i) && !first_idle_cpu) { | 
 | 3799 | 				first_idle_cpu = 1; | 
 | 3800 | 				balance_cpu = i; | 
 | 3801 | 			} | 
 | 3802 |  | 
 | 3803 | 			load = target_load(i, load_idx); | 
 | 3804 | 		} else { | 
 | 3805 | 			load = source_load(i, load_idx); | 
| Nikhil Rao | 2582f0e | 2010-10-13 12:09:36 -0700 | [diff] [blame] | 3806 | 			if (load > max_cpu_load) { | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3807 | 				max_cpu_load = load; | 
| Nikhil Rao | 2582f0e | 2010-10-13 12:09:36 -0700 | [diff] [blame] | 3808 | 				max_nr_running = rq->nr_running; | 
 | 3809 | 			} | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3810 | 			if (min_cpu_load > load) | 
 | 3811 | 				min_cpu_load = load; | 
 | 3812 | 		} | 
 | 3813 |  | 
 | 3814 | 		sgs->group_load += load; | 
 | 3815 | 		sgs->sum_nr_running += rq->nr_running; | 
 | 3816 | 		sgs->sum_weighted_load += weighted_cpuload(i); | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 3817 | 		if (idle_cpu(i)) | 
 | 3818 | 			sgs->idle_cpus++; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3819 | 	} | 
 | 3820 |  | 
 | 3821 | 	/* | 
 | 3822 | 	 * First idle cpu or the first cpu(busiest) in this sched group | 
 | 3823 | 	 * is eligible for doing load balancing at this and above | 
 | 3824 | 	 * domains. In the newly idle case, we will allow all the cpu's | 
 | 3825 | 	 * to do the newly idle load balance. | 
 | 3826 | 	 */ | 
| Vincent Guittot | 4ec4412 | 2011-12-12 20:21:08 +0100 | [diff] [blame] | 3827 | 	if (local_group) { | 
 | 3828 | 		if (idle != CPU_NEWLY_IDLE) { | 
 | 3829 | 			if (balance_cpu != this_cpu) { | 
 | 3830 | 				*balance = 0; | 
 | 3831 | 				return; | 
 | 3832 | 			} | 
 | 3833 | 			update_group_power(sd, this_cpu); | 
 | 3834 | 		} else if (time_after_eq(jiffies, group->sgp->next_update)) | 
 | 3835 | 			update_group_power(sd, this_cpu); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3836 | 	} | 
 | 3837 |  | 
 | 3838 | 	/* Adjust by relative CPU power of the group */ | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 3839 | 	sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3840 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3841 | 	/* | 
 | 3842 | 	 * Consider the group unbalanced when the imbalance is larger | 
| Peter Zijlstra | 866ab43 | 2011-02-21 18:56:47 +0100 | [diff] [blame] | 3843 | 	 * than the average weight of a task. | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3844 | 	 * | 
 | 3845 | 	 * APZ: with cgroup the avg task weight can vary wildly and | 
 | 3846 | 	 *      might not be a suitable number - should we keep a | 
 | 3847 | 	 *      normalized nr_running number somewhere that negates | 
 | 3848 | 	 *      the hierarchy? | 
 | 3849 | 	 */ | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 3850 | 	if (sgs->sum_nr_running) | 
 | 3851 | 		avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3852 |  | 
| Peter Zijlstra | 866ab43 | 2011-02-21 18:56:47 +0100 | [diff] [blame] | 3853 | 	if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3854 | 		sgs->group_imb = 1; | 
 | 3855 |  | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 3856 | 	sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power, | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3857 | 						SCHED_POWER_SCALE); | 
| Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3858 | 	if (!sgs->group_capacity) | 
 | 3859 | 		sgs->group_capacity = fix_small_capacity(sd, group); | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 3860 | 	sgs->group_weight = group->group_weight; | 
| Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 3861 |  | 
 | 3862 | 	if (sgs->group_capacity > sgs->sum_nr_running) | 
 | 3863 | 		sgs->group_has_capacity = 1; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3864 | } | 
 | 3865 |  | 
 | 3866 | /** | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3867 |  * update_sd_pick_busiest - return 1 on busiest group | 
 | 3868 |  * @sd: sched_domain whose statistics are to be checked | 
 | 3869 |  * @sds: sched_domain statistics | 
 | 3870 |  * @sg: sched_group candidate to be checked for being the busiest | 
| Michael Neuling | b6b1229 | 2010-06-10 12:06:21 +1000 | [diff] [blame] | 3871 |  * @sgs: sched_group statistics | 
 | 3872 |  * @this_cpu: the current cpu | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3873 |  * | 
 | 3874 |  * Determine if @sg is a busier group than the previously selected | 
 | 3875 |  * busiest group. | 
 | 3876 |  */ | 
 | 3877 | static bool update_sd_pick_busiest(struct sched_domain *sd, | 
 | 3878 | 				   struct sd_lb_stats *sds, | 
 | 3879 | 				   struct sched_group *sg, | 
 | 3880 | 				   struct sg_lb_stats *sgs, | 
 | 3881 | 				   int this_cpu) | 
 | 3882 | { | 
 | 3883 | 	if (sgs->avg_load <= sds->max_load) | 
 | 3884 | 		return false; | 
 | 3885 |  | 
 | 3886 | 	if (sgs->sum_nr_running > sgs->group_capacity) | 
 | 3887 | 		return true; | 
 | 3888 |  | 
 | 3889 | 	if (sgs->group_imb) | 
 | 3890 | 		return true; | 
 | 3891 |  | 
 | 3892 | 	/* | 
 | 3893 | 	 * ASYM_PACKING needs to move all the work to the lowest | 
 | 3894 | 	 * numbered CPUs in the group, therefore mark all groups | 
 | 3895 | 	 * higher than ourself as busy. | 
 | 3896 | 	 */ | 
 | 3897 | 	if ((sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running && | 
 | 3898 | 	    this_cpu < group_first_cpu(sg)) { | 
 | 3899 | 		if (!sds->busiest) | 
 | 3900 | 			return true; | 
 | 3901 |  | 
 | 3902 | 		if (group_first_cpu(sds->busiest) > group_first_cpu(sg)) | 
 | 3903 | 			return true; | 
 | 3904 | 	} | 
 | 3905 |  | 
 | 3906 | 	return false; | 
 | 3907 | } | 
 | 3908 |  | 
 | 3909 | /** | 
| Hui Kang | 461819a | 2011-10-11 23:00:59 -0400 | [diff] [blame] | 3910 |  * update_sd_lb_stats - Update sched_domain's statistics for load balancing. | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3911 |  * @sd: sched_domain whose statistics are to be updated. | 
 | 3912 |  * @this_cpu: Cpu for which load balance is currently performed. | 
 | 3913 |  * @idle: Idle status of this_cpu | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3914 |  * @cpus: Set of cpus considered for load balancing. | 
 | 3915 |  * @balance: Should we balance. | 
 | 3916 |  * @sds: variable to hold the statistics for this sched_domain. | 
 | 3917 |  */ | 
 | 3918 | static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, | 
| Venkatesh Pallipadi | 46e49b3 | 2011-02-14 14:38:50 -0800 | [diff] [blame] | 3919 | 			enum cpu_idle_type idle, const struct cpumask *cpus, | 
 | 3920 | 			int *balance, struct sd_lb_stats *sds) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3921 | { | 
 | 3922 | 	struct sched_domain *child = sd->child; | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3923 | 	struct sched_group *sg = sd->groups; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3924 | 	struct sg_lb_stats sgs; | 
 | 3925 | 	int load_idx, prefer_sibling = 0; | 
 | 3926 |  | 
 | 3927 | 	if (child && child->flags & SD_PREFER_SIBLING) | 
 | 3928 | 		prefer_sibling = 1; | 
 | 3929 |  | 
 | 3930 | 	init_sd_power_savings_stats(sd, sds, idle); | 
 | 3931 | 	load_idx = get_sd_load_idx(sd, idle); | 
 | 3932 |  | 
 | 3933 | 	do { | 
 | 3934 | 		int local_group; | 
 | 3935 |  | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3936 | 		local_group = cpumask_test_cpu(this_cpu, sched_group_cpus(sg)); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3937 | 		memset(&sgs, 0, sizeof(sgs)); | 
| Venkatesh Pallipadi | 46e49b3 | 2011-02-14 14:38:50 -0800 | [diff] [blame] | 3938 | 		update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx, | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3939 | 				local_group, cpus, balance, &sgs); | 
 | 3940 |  | 
| Peter Zijlstra | 8f190fb | 2009-12-24 14:18:21 +0100 | [diff] [blame] | 3941 | 		if (local_group && !(*balance)) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3942 | 			return; | 
 | 3943 |  | 
 | 3944 | 		sds->total_load += sgs.group_load; | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 3945 | 		sds->total_pwr += sg->sgp->power; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3946 |  | 
 | 3947 | 		/* | 
 | 3948 | 		 * In case the child domain prefers tasks go to siblings | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3949 | 		 * first, lower the sg capacity to one so that we'll try | 
| Nikhil Rao | 75dd321 | 2010-10-15 13:12:30 -0700 | [diff] [blame] | 3950 | 		 * and move all the excess tasks away. We lower the capacity | 
 | 3951 | 		 * of a group only if the local group has the capacity to fit | 
 | 3952 | 		 * these excess tasks, i.e. nr_running < group_capacity. The | 
 | 3953 | 		 * extra check prevents the case where you always pull from the | 
 | 3954 | 		 * heaviest group when it is already under-utilized (possible | 
 | 3955 | 		 * with a large weight task outweighs the tasks on the system). | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3956 | 		 */ | 
| Nikhil Rao | 75dd321 | 2010-10-15 13:12:30 -0700 | [diff] [blame] | 3957 | 		if (prefer_sibling && !local_group && sds->this_has_capacity) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3958 | 			sgs.group_capacity = min(sgs.group_capacity, 1UL); | 
 | 3959 |  | 
 | 3960 | 		if (local_group) { | 
 | 3961 | 			sds->this_load = sgs.avg_load; | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3962 | 			sds->this = sg; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3963 | 			sds->this_nr_running = sgs.sum_nr_running; | 
 | 3964 | 			sds->this_load_per_task = sgs.sum_weighted_load; | 
| Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 3965 | 			sds->this_has_capacity = sgs.group_has_capacity; | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 3966 | 			sds->this_idle_cpus = sgs.idle_cpus; | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3967 | 		} else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) { | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3968 | 			sds->max_load = sgs.avg_load; | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3969 | 			sds->busiest = sg; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3970 | 			sds->busiest_nr_running = sgs.sum_nr_running; | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 3971 | 			sds->busiest_idle_cpus = sgs.idle_cpus; | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 3972 | 			sds->busiest_group_capacity = sgs.group_capacity; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3973 | 			sds->busiest_load_per_task = sgs.sum_weighted_load; | 
| Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 3974 | 			sds->busiest_has_capacity = sgs.group_has_capacity; | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 3975 | 			sds->busiest_group_weight = sgs.group_weight; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3976 | 			sds->group_imb = sgs.group_imb; | 
 | 3977 | 		} | 
 | 3978 |  | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3979 | 		update_sd_power_savings_stats(sg, sds, local_group, &sgs); | 
 | 3980 | 		sg = sg->next; | 
 | 3981 | 	} while (sg != sd->groups); | 
 | 3982 | } | 
 | 3983 |  | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3984 | /** | 
 | 3985 |  * check_asym_packing - Check to see if the group is packed into the | 
 | 3986 |  *			sched doman. | 
 | 3987 |  * | 
 | 3988 |  * This is primarily intended to used at the sibling level.  Some | 
 | 3989 |  * cores like POWER7 prefer to use lower numbered SMT threads.  In the | 
 | 3990 |  * case of POWER7, it can move to lower SMT modes only when higher | 
 | 3991 |  * threads are idle.  When in lower SMT modes, the threads will | 
 | 3992 |  * perform better since they share less core resources.  Hence when we | 
 | 3993 |  * have idle threads, we want them to be the higher ones. | 
 | 3994 |  * | 
 | 3995 |  * This packing function is run on idle threads.  It checks to see if | 
 | 3996 |  * the busiest CPU in this domain (core in the P7 case) has a higher | 
 | 3997 |  * CPU number than the packing function is being run on.  Here we are | 
 | 3998 |  * assuming lower CPU number will be equivalent to lower a SMT thread | 
 | 3999 |  * number. | 
 | 4000 |  * | 
| Michael Neuling | b6b1229 | 2010-06-10 12:06:21 +1000 | [diff] [blame] | 4001 |  * Returns 1 when packing is required and a task should be moved to | 
 | 4002 |  * this CPU.  The amount of the imbalance is returned in *imbalance. | 
 | 4003 |  * | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4004 |  * @sd: The sched_domain whose packing is to be checked. | 
 | 4005 |  * @sds: Statistics of the sched_domain which is to be packed | 
 | 4006 |  * @this_cpu: The cpu at whose sched_domain we're performing load-balance. | 
 | 4007 |  * @imbalance: returns amount of imbalanced due to packing. | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4008 |  */ | 
 | 4009 | static int check_asym_packing(struct sched_domain *sd, | 
 | 4010 | 			      struct sd_lb_stats *sds, | 
 | 4011 | 			      int this_cpu, unsigned long *imbalance) | 
 | 4012 | { | 
 | 4013 | 	int busiest_cpu; | 
 | 4014 |  | 
 | 4015 | 	if (!(sd->flags & SD_ASYM_PACKING)) | 
 | 4016 | 		return 0; | 
 | 4017 |  | 
 | 4018 | 	if (!sds->busiest) | 
 | 4019 | 		return 0; | 
 | 4020 |  | 
 | 4021 | 	busiest_cpu = group_first_cpu(sds->busiest); | 
 | 4022 | 	if (this_cpu > busiest_cpu) | 
 | 4023 | 		return 0; | 
 | 4024 |  | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4025 | 	*imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power, | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4026 | 				       SCHED_POWER_SCALE); | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4027 | 	return 1; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4028 | } | 
 | 4029 |  | 
 | 4030 | /** | 
 | 4031 |  * fix_small_imbalance - Calculate the minor imbalance that exists | 
 | 4032 |  *			amongst the groups of a sched_domain, during | 
 | 4033 |  *			load balancing. | 
 | 4034 |  * @sds: Statistics of the sched_domain whose imbalance is to be calculated. | 
 | 4035 |  * @this_cpu: The cpu at whose sched_domain we're performing load-balance. | 
 | 4036 |  * @imbalance: Variable to store the imbalance. | 
 | 4037 |  */ | 
 | 4038 | static inline void fix_small_imbalance(struct sd_lb_stats *sds, | 
 | 4039 | 				int this_cpu, unsigned long *imbalance) | 
 | 4040 | { | 
 | 4041 | 	unsigned long tmp, pwr_now = 0, pwr_move = 0; | 
 | 4042 | 	unsigned int imbn = 2; | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 4043 | 	unsigned long scaled_busy_load_per_task; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4044 |  | 
 | 4045 | 	if (sds->this_nr_running) { | 
 | 4046 | 		sds->this_load_per_task /= sds->this_nr_running; | 
 | 4047 | 		if (sds->busiest_load_per_task > | 
 | 4048 | 				sds->this_load_per_task) | 
 | 4049 | 			imbn = 1; | 
 | 4050 | 	} else | 
 | 4051 | 		sds->this_load_per_task = | 
 | 4052 | 			cpu_avg_load_per_task(this_cpu); | 
 | 4053 |  | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 4054 | 	scaled_busy_load_per_task = sds->busiest_load_per_task | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4055 | 					 * SCHED_POWER_SCALE; | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4056 | 	scaled_busy_load_per_task /= sds->busiest->sgp->power; | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 4057 |  | 
 | 4058 | 	if (sds->max_load - sds->this_load + scaled_busy_load_per_task >= | 
 | 4059 | 			(scaled_busy_load_per_task * imbn)) { | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4060 | 		*imbalance = sds->busiest_load_per_task; | 
 | 4061 | 		return; | 
 | 4062 | 	} | 
 | 4063 |  | 
 | 4064 | 	/* | 
 | 4065 | 	 * OK, we don't have enough imbalance to justify moving tasks, | 
 | 4066 | 	 * however we may be able to increase total CPU power used by | 
 | 4067 | 	 * moving them. | 
 | 4068 | 	 */ | 
 | 4069 |  | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4070 | 	pwr_now += sds->busiest->sgp->power * | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4071 | 			min(sds->busiest_load_per_task, sds->max_load); | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4072 | 	pwr_now += sds->this->sgp->power * | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4073 | 			min(sds->this_load_per_task, sds->this_load); | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4074 | 	pwr_now /= SCHED_POWER_SCALE; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4075 |  | 
 | 4076 | 	/* Amount of load we'd subtract */ | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4077 | 	tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4078 | 		sds->busiest->sgp->power; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4079 | 	if (sds->max_load > tmp) | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4080 | 		pwr_move += sds->busiest->sgp->power * | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4081 | 			min(sds->busiest_load_per_task, sds->max_load - tmp); | 
 | 4082 |  | 
 | 4083 | 	/* Amount of load we'd add */ | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4084 | 	if (sds->max_load * sds->busiest->sgp->power < | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4085 | 		sds->busiest_load_per_task * SCHED_POWER_SCALE) | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4086 | 		tmp = (sds->max_load * sds->busiest->sgp->power) / | 
 | 4087 | 			sds->this->sgp->power; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4088 | 	else | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4089 | 		tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4090 | 			sds->this->sgp->power; | 
 | 4091 | 	pwr_move += sds->this->sgp->power * | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4092 | 			min(sds->this_load_per_task, sds->this_load + tmp); | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4093 | 	pwr_move /= SCHED_POWER_SCALE; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4094 |  | 
 | 4095 | 	/* Move if we gain throughput */ | 
 | 4096 | 	if (pwr_move > pwr_now) | 
 | 4097 | 		*imbalance = sds->busiest_load_per_task; | 
 | 4098 | } | 
 | 4099 |  | 
 | 4100 | /** | 
 | 4101 |  * calculate_imbalance - Calculate the amount of imbalance present within the | 
 | 4102 |  *			 groups of a given sched_domain during load balance. | 
 | 4103 |  * @sds: statistics of the sched_domain whose imbalance is to be calculated. | 
 | 4104 |  * @this_cpu: Cpu for which currently load balance is being performed. | 
 | 4105 |  * @imbalance: The variable to store the imbalance. | 
 | 4106 |  */ | 
 | 4107 | static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu, | 
 | 4108 | 		unsigned long *imbalance) | 
 | 4109 | { | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 4110 | 	unsigned long max_pull, load_above_capacity = ~0UL; | 
 | 4111 |  | 
 | 4112 | 	sds->busiest_load_per_task /= sds->busiest_nr_running; | 
 | 4113 | 	if (sds->group_imb) { | 
 | 4114 | 		sds->busiest_load_per_task = | 
 | 4115 | 			min(sds->busiest_load_per_task, sds->avg_load); | 
 | 4116 | 	} | 
 | 4117 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4118 | 	/* | 
 | 4119 | 	 * In the presence of smp nice balancing, certain scenarios can have | 
 | 4120 | 	 * max load less than avg load(as we skip the groups at or below | 
 | 4121 | 	 * its cpu_power, while calculating max_load..) | 
 | 4122 | 	 */ | 
 | 4123 | 	if (sds->max_load < sds->avg_load) { | 
 | 4124 | 		*imbalance = 0; | 
 | 4125 | 		return fix_small_imbalance(sds, this_cpu, imbalance); | 
 | 4126 | 	} | 
 | 4127 |  | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 4128 | 	if (!sds->group_imb) { | 
 | 4129 | 		/* | 
 | 4130 | 		 * Don't want to pull so many tasks that a group would go idle. | 
 | 4131 | 		 */ | 
 | 4132 | 		load_above_capacity = (sds->busiest_nr_running - | 
 | 4133 | 						sds->busiest_group_capacity); | 
 | 4134 |  | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4135 | 		load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE); | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 4136 |  | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4137 | 		load_above_capacity /= sds->busiest->sgp->power; | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 4138 | 	} | 
 | 4139 |  | 
 | 4140 | 	/* | 
 | 4141 | 	 * We're trying to get all the cpus to the average_load, so we don't | 
 | 4142 | 	 * want to push ourselves above the average load, nor do we wish to | 
 | 4143 | 	 * reduce the max loaded cpu below the average load. At the same time, | 
 | 4144 | 	 * we also don't want to reduce the group load below the group capacity | 
 | 4145 | 	 * (so that we can implement power-savings policies etc). Thus we look | 
 | 4146 | 	 * for the minimum possible imbalance. | 
 | 4147 | 	 * Be careful of negative numbers as they'll appear as very large values | 
 | 4148 | 	 * with unsigned longs. | 
 | 4149 | 	 */ | 
 | 4150 | 	max_pull = min(sds->max_load - sds->avg_load, load_above_capacity); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4151 |  | 
 | 4152 | 	/* How much load to actually move to equalise the imbalance */ | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4153 | 	*imbalance = min(max_pull * sds->busiest->sgp->power, | 
 | 4154 | 		(sds->avg_load - sds->this_load) * sds->this->sgp->power) | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4155 | 			/ SCHED_POWER_SCALE; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4156 |  | 
 | 4157 | 	/* | 
 | 4158 | 	 * if *imbalance is less than the average load per runnable task | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 4159 | 	 * there is no guarantee that any tasks will be moved so we'll have | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4160 | 	 * a think about bumping its value to force at least one task to be | 
 | 4161 | 	 * moved | 
 | 4162 | 	 */ | 
 | 4163 | 	if (*imbalance < sds->busiest_load_per_task) | 
 | 4164 | 		return fix_small_imbalance(sds, this_cpu, imbalance); | 
 | 4165 |  | 
 | 4166 | } | 
| Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 4167 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4168 | /******* find_busiest_group() helpers end here *********************/ | 
 | 4169 |  | 
 | 4170 | /** | 
 | 4171 |  * find_busiest_group - Returns the busiest group within the sched_domain | 
 | 4172 |  * if there is an imbalance. If there isn't an imbalance, and | 
 | 4173 |  * the user has opted for power-savings, it returns a group whose | 
 | 4174 |  * CPUs can be put to idle by rebalancing those tasks elsewhere, if | 
 | 4175 |  * such a group exists. | 
 | 4176 |  * | 
 | 4177 |  * Also calculates the amount of weighted load which should be moved | 
 | 4178 |  * to restore balance. | 
 | 4179 |  * | 
 | 4180 |  * @sd: The sched_domain whose busiest group is to be returned. | 
 | 4181 |  * @this_cpu: The cpu for which load balancing is currently being performed. | 
 | 4182 |  * @imbalance: Variable which stores amount of weighted load which should | 
 | 4183 |  *		be moved to restore balance/put a group to idle. | 
 | 4184 |  * @idle: The idle status of this_cpu. | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4185 |  * @cpus: The set of CPUs under consideration for load-balancing. | 
 | 4186 |  * @balance: Pointer to a variable indicating if this_cpu | 
 | 4187 |  *	is the appropriate cpu to perform load balancing at this_level. | 
 | 4188 |  * | 
 | 4189 |  * Returns:	- the busiest group if imbalance exists. | 
 | 4190 |  *		- If no imbalance and user has opted for power-savings balance, | 
 | 4191 |  *		   return the least loaded group whose CPUs can be | 
 | 4192 |  *		   put to idle by rebalancing its tasks onto our group. | 
 | 4193 |  */ | 
 | 4194 | static struct sched_group * | 
 | 4195 | find_busiest_group(struct sched_domain *sd, int this_cpu, | 
 | 4196 | 		   unsigned long *imbalance, enum cpu_idle_type idle, | 
| Venkatesh Pallipadi | 46e49b3 | 2011-02-14 14:38:50 -0800 | [diff] [blame] | 4197 | 		   const struct cpumask *cpus, int *balance) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4198 | { | 
 | 4199 | 	struct sd_lb_stats sds; | 
 | 4200 |  | 
 | 4201 | 	memset(&sds, 0, sizeof(sds)); | 
 | 4202 |  | 
 | 4203 | 	/* | 
 | 4204 | 	 * Compute the various statistics relavent for load balancing at | 
 | 4205 | 	 * this level. | 
 | 4206 | 	 */ | 
| Venkatesh Pallipadi | 46e49b3 | 2011-02-14 14:38:50 -0800 | [diff] [blame] | 4207 | 	update_sd_lb_stats(sd, this_cpu, idle, cpus, balance, &sds); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4208 |  | 
| Peter Zijlstra | cc57aa8 | 2011-02-21 18:55:32 +0100 | [diff] [blame] | 4209 | 	/* | 
 | 4210 | 	 * this_cpu is not the appropriate cpu to perform load balancing at | 
 | 4211 | 	 * this level. | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4212 | 	 */ | 
| Peter Zijlstra | 8f190fb | 2009-12-24 14:18:21 +0100 | [diff] [blame] | 4213 | 	if (!(*balance)) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4214 | 		goto ret; | 
 | 4215 |  | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4216 | 	if ((idle == CPU_IDLE || idle == CPU_NEWLY_IDLE) && | 
 | 4217 | 	    check_asym_packing(sd, &sds, this_cpu, imbalance)) | 
 | 4218 | 		return sds.busiest; | 
 | 4219 |  | 
| Peter Zijlstra | cc57aa8 | 2011-02-21 18:55:32 +0100 | [diff] [blame] | 4220 | 	/* There is no busy sibling group to pull tasks from */ | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4221 | 	if (!sds.busiest || sds.busiest_nr_running == 0) | 
 | 4222 | 		goto out_balanced; | 
 | 4223 |  | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4224 | 	sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr; | 
| Ken Chen | b0432d8 | 2011-04-07 17:23:22 -0700 | [diff] [blame] | 4225 |  | 
| Peter Zijlstra | 866ab43 | 2011-02-21 18:56:47 +0100 | [diff] [blame] | 4226 | 	/* | 
 | 4227 | 	 * If the busiest group is imbalanced the below checks don't | 
 | 4228 | 	 * work because they assumes all things are equal, which typically | 
 | 4229 | 	 * isn't true due to cpus_allowed constraints and the like. | 
 | 4230 | 	 */ | 
 | 4231 | 	if (sds.group_imb) | 
 | 4232 | 		goto force_balance; | 
 | 4233 |  | 
| Peter Zijlstra | cc57aa8 | 2011-02-21 18:55:32 +0100 | [diff] [blame] | 4234 | 	/* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */ | 
| Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 4235 | 	if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity && | 
 | 4236 | 			!sds.busiest_has_capacity) | 
 | 4237 | 		goto force_balance; | 
 | 4238 |  | 
| Peter Zijlstra | cc57aa8 | 2011-02-21 18:55:32 +0100 | [diff] [blame] | 4239 | 	/* | 
 | 4240 | 	 * If the local group is more busy than the selected busiest group | 
 | 4241 | 	 * don't try and pull any tasks. | 
 | 4242 | 	 */ | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4243 | 	if (sds.this_load >= sds.max_load) | 
 | 4244 | 		goto out_balanced; | 
 | 4245 |  | 
| Peter Zijlstra | cc57aa8 | 2011-02-21 18:55:32 +0100 | [diff] [blame] | 4246 | 	/* | 
 | 4247 | 	 * Don't pull any tasks if this group is already above the domain | 
 | 4248 | 	 * average load. | 
 | 4249 | 	 */ | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4250 | 	if (sds.this_load >= sds.avg_load) | 
 | 4251 | 		goto out_balanced; | 
 | 4252 |  | 
| Peter Zijlstra | c186faf | 2011-02-21 18:52:53 +0100 | [diff] [blame] | 4253 | 	if (idle == CPU_IDLE) { | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 4254 | 		/* | 
 | 4255 | 		 * This cpu is idle. If the busiest group load doesn't | 
 | 4256 | 		 * have more tasks than the number of available cpu's and | 
 | 4257 | 		 * there is no imbalance between this and busiest group | 
 | 4258 | 		 * wrt to idle cpu's, it is balanced. | 
 | 4259 | 		 */ | 
| Peter Zijlstra | c186faf | 2011-02-21 18:52:53 +0100 | [diff] [blame] | 4260 | 		if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) && | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 4261 | 		    sds.busiest_nr_running <= sds.busiest_group_weight) | 
 | 4262 | 			goto out_balanced; | 
| Peter Zijlstra | c186faf | 2011-02-21 18:52:53 +0100 | [diff] [blame] | 4263 | 	} else { | 
 | 4264 | 		/* | 
 | 4265 | 		 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use | 
 | 4266 | 		 * imbalance_pct to be conservative. | 
 | 4267 | 		 */ | 
 | 4268 | 		if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load) | 
 | 4269 | 			goto out_balanced; | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 4270 | 	} | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4271 |  | 
| Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 4272 | force_balance: | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4273 | 	/* Looks like there is an imbalance. Compute it */ | 
 | 4274 | 	calculate_imbalance(&sds, this_cpu, imbalance); | 
 | 4275 | 	return sds.busiest; | 
 | 4276 |  | 
 | 4277 | out_balanced: | 
 | 4278 | 	/* | 
 | 4279 | 	 * There is no obvious imbalance. But check if we can do some balancing | 
 | 4280 | 	 * to save power. | 
 | 4281 | 	 */ | 
 | 4282 | 	if (check_power_save_busiest_group(&sds, this_cpu, imbalance)) | 
 | 4283 | 		return sds.busiest; | 
 | 4284 | ret: | 
 | 4285 | 	*imbalance = 0; | 
 | 4286 | 	return NULL; | 
 | 4287 | } | 
 | 4288 |  | 
 | 4289 | /* | 
 | 4290 |  * find_busiest_queue - find the busiest runqueue among the cpus in group. | 
 | 4291 |  */ | 
 | 4292 | static struct rq * | 
| Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4293 | find_busiest_queue(struct sched_domain *sd, struct sched_group *group, | 
 | 4294 | 		   enum cpu_idle_type idle, unsigned long imbalance, | 
 | 4295 | 		   const struct cpumask *cpus) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4296 | { | 
 | 4297 | 	struct rq *busiest = NULL, *rq; | 
 | 4298 | 	unsigned long max_load = 0; | 
 | 4299 | 	int i; | 
 | 4300 |  | 
 | 4301 | 	for_each_cpu(i, sched_group_cpus(group)) { | 
 | 4302 | 		unsigned long power = power_of(i); | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4303 | 		unsigned long capacity = DIV_ROUND_CLOSEST(power, | 
 | 4304 | 							   SCHED_POWER_SCALE); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4305 | 		unsigned long wl; | 
 | 4306 |  | 
| Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4307 | 		if (!capacity) | 
 | 4308 | 			capacity = fix_small_capacity(sd, group); | 
 | 4309 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4310 | 		if (!cpumask_test_cpu(i, cpus)) | 
 | 4311 | 			continue; | 
 | 4312 |  | 
 | 4313 | 		rq = cpu_rq(i); | 
| Thomas Gleixner | 6e40f5b | 2010-02-16 16:48:56 +0100 | [diff] [blame] | 4314 | 		wl = weighted_cpuload(i); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4315 |  | 
| Thomas Gleixner | 6e40f5b | 2010-02-16 16:48:56 +0100 | [diff] [blame] | 4316 | 		/* | 
 | 4317 | 		 * When comparing with imbalance, use weighted_cpuload() | 
 | 4318 | 		 * which is not scaled with the cpu power. | 
 | 4319 | 		 */ | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4320 | 		if (capacity && rq->nr_running == 1 && wl > imbalance) | 
 | 4321 | 			continue; | 
 | 4322 |  | 
| Thomas Gleixner | 6e40f5b | 2010-02-16 16:48:56 +0100 | [diff] [blame] | 4323 | 		/* | 
 | 4324 | 		 * For the load comparisons with the other cpu's, consider | 
 | 4325 | 		 * the weighted_cpuload() scaled with the cpu power, so that | 
 | 4326 | 		 * the load can be moved away from the cpu that is potentially | 
 | 4327 | 		 * running at a lower capacity. | 
 | 4328 | 		 */ | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4329 | 		wl = (wl * SCHED_POWER_SCALE) / power; | 
| Thomas Gleixner | 6e40f5b | 2010-02-16 16:48:56 +0100 | [diff] [blame] | 4330 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4331 | 		if (wl > max_load) { | 
 | 4332 | 			max_load = wl; | 
 | 4333 | 			busiest = rq; | 
 | 4334 | 		} | 
 | 4335 | 	} | 
 | 4336 |  | 
 | 4337 | 	return busiest; | 
 | 4338 | } | 
 | 4339 |  | 
 | 4340 | /* | 
 | 4341 |  * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but | 
 | 4342 |  * so long as it is large enough. | 
 | 4343 |  */ | 
 | 4344 | #define MAX_PINNED_INTERVAL	512 | 
 | 4345 |  | 
 | 4346 | /* Working cpumask for load_balance and load_balance_newidle. */ | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 4347 | DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4348 |  | 
| Venkatesh Pallipadi | 46e49b3 | 2011-02-14 14:38:50 -0800 | [diff] [blame] | 4349 | static int need_active_balance(struct sched_domain *sd, int idle, | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4350 | 			       int busiest_cpu, int this_cpu) | 
| Peter Zijlstra | 1af3ed3 | 2009-12-23 15:10:31 +0100 | [diff] [blame] | 4351 | { | 
 | 4352 | 	if (idle == CPU_NEWLY_IDLE) { | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4353 |  | 
 | 4354 | 		/* | 
 | 4355 | 		 * ASYM_PACKING needs to force migrate tasks from busy but | 
 | 4356 | 		 * higher numbered CPUs in order to pack all tasks in the | 
 | 4357 | 		 * lowest numbered CPUs. | 
 | 4358 | 		 */ | 
 | 4359 | 		if ((sd->flags & SD_ASYM_PACKING) && busiest_cpu > this_cpu) | 
 | 4360 | 			return 1; | 
 | 4361 |  | 
| Peter Zijlstra | 1af3ed3 | 2009-12-23 15:10:31 +0100 | [diff] [blame] | 4362 | 		/* | 
 | 4363 | 		 * The only task running in a non-idle cpu can be moved to this | 
 | 4364 | 		 * cpu in an attempt to completely freeup the other CPU | 
 | 4365 | 		 * package. | 
 | 4366 | 		 * | 
 | 4367 | 		 * The package power saving logic comes from | 
 | 4368 | 		 * find_busiest_group(). If there are no imbalance, then | 
 | 4369 | 		 * f_b_g() will return NULL. However when sched_mc={1,2} then | 
 | 4370 | 		 * f_b_g() will select a group from which a running task may be | 
 | 4371 | 		 * pulled to this cpu in order to make the other package idle. | 
 | 4372 | 		 * If there is no opportunity to make a package idle and if | 
 | 4373 | 		 * there are no imbalance, then f_b_g() will return NULL and no | 
 | 4374 | 		 * action will be taken in load_balance_newidle(). | 
 | 4375 | 		 * | 
 | 4376 | 		 * Under normal task pull operation due to imbalance, there | 
 | 4377 | 		 * will be more than one task in the source run queue and | 
 | 4378 | 		 * move_tasks() will succeed.  ld_moved will be true and this | 
 | 4379 | 		 * active balance code will not be triggered. | 
 | 4380 | 		 */ | 
| Peter Zijlstra | 1af3ed3 | 2009-12-23 15:10:31 +0100 | [diff] [blame] | 4381 | 		if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP) | 
 | 4382 | 			return 0; | 
 | 4383 | 	} | 
 | 4384 |  | 
 | 4385 | 	return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2); | 
 | 4386 | } | 
 | 4387 |  | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4388 | static int active_load_balance_cpu_stop(void *data); | 
 | 4389 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4390 | /* | 
 | 4391 |  * Check this_cpu to ensure it is balanced within domain. Attempt to move | 
 | 4392 |  * tasks if there is an imbalance. | 
 | 4393 |  */ | 
 | 4394 | static int load_balance(int this_cpu, struct rq *this_rq, | 
 | 4395 | 			struct sched_domain *sd, enum cpu_idle_type idle, | 
 | 4396 | 			int *balance) | 
 | 4397 | { | 
| Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4398 | 	int ld_moved, active_balance = 0; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4399 | 	struct sched_group *group; | 
 | 4400 | 	unsigned long imbalance; | 
 | 4401 | 	struct rq *busiest; | 
 | 4402 | 	unsigned long flags; | 
 | 4403 | 	struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); | 
 | 4404 |  | 
| Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4405 | 	struct lb_env env = { | 
 | 4406 | 		.sd		= sd, | 
| Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 4407 | 		.dst_cpu	= this_cpu, | 
 | 4408 | 		.dst_rq		= this_rq, | 
| Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4409 | 		.idle		= idle, | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 4410 | 		.loop_break	= sysctl_sched_nr_migrate, | 
| Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4411 | 	}; | 
 | 4412 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4413 | 	cpumask_copy(cpus, cpu_active_mask); | 
 | 4414 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4415 | 	schedstat_inc(sd, lb_count[idle]); | 
 | 4416 |  | 
 | 4417 | redo: | 
| Venkatesh Pallipadi | 46e49b3 | 2011-02-14 14:38:50 -0800 | [diff] [blame] | 4418 | 	group = find_busiest_group(sd, this_cpu, &imbalance, idle, | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4419 | 				   cpus, balance); | 
 | 4420 |  | 
 | 4421 | 	if (*balance == 0) | 
 | 4422 | 		goto out_balanced; | 
 | 4423 |  | 
 | 4424 | 	if (!group) { | 
 | 4425 | 		schedstat_inc(sd, lb_nobusyg[idle]); | 
 | 4426 | 		goto out_balanced; | 
 | 4427 | 	} | 
 | 4428 |  | 
| Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4429 | 	busiest = find_busiest_queue(sd, group, idle, imbalance, cpus); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4430 | 	if (!busiest) { | 
 | 4431 | 		schedstat_inc(sd, lb_nobusyq[idle]); | 
 | 4432 | 		goto out_balanced; | 
 | 4433 | 	} | 
 | 4434 |  | 
 | 4435 | 	BUG_ON(busiest == this_rq); | 
 | 4436 |  | 
 | 4437 | 	schedstat_add(sd, lb_imbalance[idle], imbalance); | 
 | 4438 |  | 
 | 4439 | 	ld_moved = 0; | 
 | 4440 | 	if (busiest->nr_running > 1) { | 
 | 4441 | 		/* | 
 | 4442 | 		 * Attempt to move tasks. If find_busiest_group has found | 
 | 4443 | 		 * an imbalance but busiest->nr_running <= 1, the group is | 
 | 4444 | 		 * still unbalanced. ld_moved simply stays zero, so it is | 
 | 4445 | 		 * correctly treated as an imbalance. | 
 | 4446 | 		 */ | 
| Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4447 | 		env.flags |= LBF_ALL_PINNED; | 
| Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 4448 | 		env.load_move = imbalance; | 
| Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 4449 | 		env.src_cpu = busiest->cpu; | 
 | 4450 | 		env.src_rq = busiest; | 
| Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 4451 | 		env.loop_max = busiest->nr_running; | 
| Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4452 |  | 
| Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 4453 | more_balance: | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4454 | 		local_irq_save(flags); | 
 | 4455 | 		double_rq_lock(this_rq, busiest); | 
| Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 4456 | 		if (!env.loop) | 
 | 4457 | 			update_h_load(env.src_cpu); | 
 | 4458 | 		ld_moved += move_tasks(&env); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4459 | 		double_rq_unlock(this_rq, busiest); | 
 | 4460 | 		local_irq_restore(flags); | 
 | 4461 |  | 
| Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 4462 | 		if (env.flags & LBF_NEED_BREAK) { | 
 | 4463 | 			env.flags &= ~LBF_NEED_BREAK; | 
 | 4464 | 			goto more_balance; | 
 | 4465 | 		} | 
 | 4466 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4467 | 		/* | 
 | 4468 | 		 * some other cpu did the load balance for us. | 
 | 4469 | 		 */ | 
 | 4470 | 		if (ld_moved && this_cpu != smp_processor_id()) | 
 | 4471 | 			resched_cpu(this_cpu); | 
 | 4472 |  | 
 | 4473 | 		/* All tasks on this runqueue were pinned by CPU affinity */ | 
| Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4474 | 		if (unlikely(env.flags & LBF_ALL_PINNED)) { | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4475 | 			cpumask_clear_cpu(cpu_of(busiest), cpus); | 
 | 4476 | 			if (!cpumask_empty(cpus)) | 
 | 4477 | 				goto redo; | 
 | 4478 | 			goto out_balanced; | 
 | 4479 | 		} | 
 | 4480 | 	} | 
 | 4481 |  | 
 | 4482 | 	if (!ld_moved) { | 
 | 4483 | 		schedstat_inc(sd, lb_failed[idle]); | 
| Venkatesh Pallipadi | 58b26c4 | 2010-09-10 18:19:17 -0700 | [diff] [blame] | 4484 | 		/* | 
 | 4485 | 		 * Increment the failure counter only on periodic balance. | 
 | 4486 | 		 * We do not want newidle balance, which can be very | 
 | 4487 | 		 * frequent, pollute the failure counter causing | 
 | 4488 | 		 * excessive cache_hot migrations and active balances. | 
 | 4489 | 		 */ | 
 | 4490 | 		if (idle != CPU_NEWLY_IDLE) | 
 | 4491 | 			sd->nr_balance_failed++; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4492 |  | 
| Venkatesh Pallipadi | 46e49b3 | 2011-02-14 14:38:50 -0800 | [diff] [blame] | 4493 | 		if (need_active_balance(sd, idle, cpu_of(busiest), this_cpu)) { | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4494 | 			raw_spin_lock_irqsave(&busiest->lock, flags); | 
 | 4495 |  | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4496 | 			/* don't kick the active_load_balance_cpu_stop, | 
 | 4497 | 			 * if the curr task on busiest cpu can't be | 
 | 4498 | 			 * moved to this_cpu | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4499 | 			 */ | 
 | 4500 | 			if (!cpumask_test_cpu(this_cpu, | 
| Peter Zijlstra | fa17b50 | 2011-06-16 12:23:22 +0200 | [diff] [blame] | 4501 | 					tsk_cpus_allowed(busiest->curr))) { | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4502 | 				raw_spin_unlock_irqrestore(&busiest->lock, | 
 | 4503 | 							    flags); | 
| Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4504 | 				env.flags |= LBF_ALL_PINNED; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4505 | 				goto out_one_pinned; | 
 | 4506 | 			} | 
 | 4507 |  | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4508 | 			/* | 
 | 4509 | 			 * ->active_balance synchronizes accesses to | 
 | 4510 | 			 * ->active_balance_work.  Once set, it's cleared | 
 | 4511 | 			 * only after active load balance is finished. | 
 | 4512 | 			 */ | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4513 | 			if (!busiest->active_balance) { | 
 | 4514 | 				busiest->active_balance = 1; | 
 | 4515 | 				busiest->push_cpu = this_cpu; | 
 | 4516 | 				active_balance = 1; | 
 | 4517 | 			} | 
 | 4518 | 			raw_spin_unlock_irqrestore(&busiest->lock, flags); | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4519 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4520 | 			if (active_balance) | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4521 | 				stop_one_cpu_nowait(cpu_of(busiest), | 
 | 4522 | 					active_load_balance_cpu_stop, busiest, | 
 | 4523 | 					&busiest->active_balance_work); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4524 |  | 
 | 4525 | 			/* | 
 | 4526 | 			 * We've kicked active balancing, reset the failure | 
 | 4527 | 			 * counter. | 
 | 4528 | 			 */ | 
 | 4529 | 			sd->nr_balance_failed = sd->cache_nice_tries+1; | 
 | 4530 | 		} | 
 | 4531 | 	} else | 
 | 4532 | 		sd->nr_balance_failed = 0; | 
 | 4533 |  | 
 | 4534 | 	if (likely(!active_balance)) { | 
 | 4535 | 		/* We were unbalanced, so reset the balancing interval */ | 
 | 4536 | 		sd->balance_interval = sd->min_interval; | 
 | 4537 | 	} else { | 
 | 4538 | 		/* | 
 | 4539 | 		 * If we've begun active balancing, start to back off. This | 
 | 4540 | 		 * case may not be covered by the all_pinned logic if there | 
 | 4541 | 		 * is only 1 task on the busy runqueue (because we don't call | 
 | 4542 | 		 * move_tasks). | 
 | 4543 | 		 */ | 
 | 4544 | 		if (sd->balance_interval < sd->max_interval) | 
 | 4545 | 			sd->balance_interval *= 2; | 
 | 4546 | 	} | 
 | 4547 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4548 | 	goto out; | 
 | 4549 |  | 
 | 4550 | out_balanced: | 
 | 4551 | 	schedstat_inc(sd, lb_balanced[idle]); | 
 | 4552 |  | 
 | 4553 | 	sd->nr_balance_failed = 0; | 
 | 4554 |  | 
 | 4555 | out_one_pinned: | 
 | 4556 | 	/* tune up the balancing interval */ | 
| Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4557 | 	if (((env.flags & LBF_ALL_PINNED) && | 
| Peter Zijlstra | 5b54b56 | 2011-09-22 15:23:13 +0200 | [diff] [blame] | 4558 | 			sd->balance_interval < MAX_PINNED_INTERVAL) || | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4559 | 			(sd->balance_interval < sd->max_interval)) | 
 | 4560 | 		sd->balance_interval *= 2; | 
 | 4561 |  | 
| Venkatesh Pallipadi | 46e49b3 | 2011-02-14 14:38:50 -0800 | [diff] [blame] | 4562 | 	ld_moved = 0; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4563 | out: | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4564 | 	return ld_moved; | 
 | 4565 | } | 
 | 4566 |  | 
 | 4567 | /* | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4568 |  * idle_balance is called by schedule() if this_cpu is about to become | 
 | 4569 |  * idle. Attempts to pull tasks from other CPUs. | 
 | 4570 |  */ | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 4571 | void idle_balance(int this_cpu, struct rq *this_rq) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4572 | { | 
 | 4573 | 	struct sched_domain *sd; | 
 | 4574 | 	int pulled_task = 0; | 
 | 4575 | 	unsigned long next_balance = jiffies + HZ; | 
 | 4576 |  | 
 | 4577 | 	this_rq->idle_stamp = this_rq->clock; | 
 | 4578 |  | 
 | 4579 | 	if (this_rq->avg_idle < sysctl_sched_migration_cost) | 
 | 4580 | 		return; | 
 | 4581 |  | 
| Peter Zijlstra | f492e12 | 2009-12-23 15:29:42 +0100 | [diff] [blame] | 4582 | 	/* | 
 | 4583 | 	 * Drop the rq->lock, but keep IRQ/preempt disabled. | 
 | 4584 | 	 */ | 
 | 4585 | 	raw_spin_unlock(&this_rq->lock); | 
 | 4586 |  | 
| Paul Turner | c66eaf6 | 2010-11-15 15:47:07 -0800 | [diff] [blame] | 4587 | 	update_shares(this_cpu); | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 4588 | 	rcu_read_lock(); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4589 | 	for_each_domain(this_cpu, sd) { | 
 | 4590 | 		unsigned long interval; | 
| Peter Zijlstra | f492e12 | 2009-12-23 15:29:42 +0100 | [diff] [blame] | 4591 | 		int balance = 1; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4592 |  | 
 | 4593 | 		if (!(sd->flags & SD_LOAD_BALANCE)) | 
 | 4594 | 			continue; | 
 | 4595 |  | 
| Peter Zijlstra | f492e12 | 2009-12-23 15:29:42 +0100 | [diff] [blame] | 4596 | 		if (sd->flags & SD_BALANCE_NEWIDLE) { | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4597 | 			/* If we've pulled tasks over stop searching: */ | 
| Peter Zijlstra | f492e12 | 2009-12-23 15:29:42 +0100 | [diff] [blame] | 4598 | 			pulled_task = load_balance(this_cpu, this_rq, | 
 | 4599 | 						   sd, CPU_NEWLY_IDLE, &balance); | 
 | 4600 | 		} | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4601 |  | 
 | 4602 | 		interval = msecs_to_jiffies(sd->balance_interval); | 
 | 4603 | 		if (time_after(next_balance, sd->last_balance + interval)) | 
 | 4604 | 			next_balance = sd->last_balance + interval; | 
| Nikhil Rao | d5ad140 | 2010-11-17 11:42:04 -0800 | [diff] [blame] | 4605 | 		if (pulled_task) { | 
 | 4606 | 			this_rq->idle_stamp = 0; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4607 | 			break; | 
| Nikhil Rao | d5ad140 | 2010-11-17 11:42:04 -0800 | [diff] [blame] | 4608 | 		} | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4609 | 	} | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 4610 | 	rcu_read_unlock(); | 
| Peter Zijlstra | f492e12 | 2009-12-23 15:29:42 +0100 | [diff] [blame] | 4611 |  | 
 | 4612 | 	raw_spin_lock(&this_rq->lock); | 
 | 4613 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4614 | 	if (pulled_task || time_after(jiffies, this_rq->next_balance)) { | 
 | 4615 | 		/* | 
 | 4616 | 		 * We are going idle. next_balance may be set based on | 
 | 4617 | 		 * a busy processor. So reset next_balance. | 
 | 4618 | 		 */ | 
 | 4619 | 		this_rq->next_balance = next_balance; | 
 | 4620 | 	} | 
 | 4621 | } | 
 | 4622 |  | 
 | 4623 | /* | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4624 |  * active_load_balance_cpu_stop is run by cpu stopper. It pushes | 
 | 4625 |  * running tasks off the busiest CPU onto idle CPUs. It requires at | 
 | 4626 |  * least 1 task to be running on each physical CPU where possible, and | 
 | 4627 |  * avoids physical / logical imbalances. | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4628 |  */ | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4629 | static int active_load_balance_cpu_stop(void *data) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4630 | { | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4631 | 	struct rq *busiest_rq = data; | 
 | 4632 | 	int busiest_cpu = cpu_of(busiest_rq); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4633 | 	int target_cpu = busiest_rq->push_cpu; | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4634 | 	struct rq *target_rq = cpu_rq(target_cpu); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4635 | 	struct sched_domain *sd; | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4636 |  | 
 | 4637 | 	raw_spin_lock_irq(&busiest_rq->lock); | 
 | 4638 |  | 
 | 4639 | 	/* make sure the requested cpu hasn't gone down in the meantime */ | 
 | 4640 | 	if (unlikely(busiest_cpu != smp_processor_id() || | 
 | 4641 | 		     !busiest_rq->active_balance)) | 
 | 4642 | 		goto out_unlock; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4643 |  | 
 | 4644 | 	/* Is there any task to move? */ | 
 | 4645 | 	if (busiest_rq->nr_running <= 1) | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4646 | 		goto out_unlock; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4647 |  | 
 | 4648 | 	/* | 
 | 4649 | 	 * This condition is "impossible", if it occurs | 
 | 4650 | 	 * we need to fix it. Originally reported by | 
 | 4651 | 	 * Bjorn Helgaas on a 128-cpu setup. | 
 | 4652 | 	 */ | 
 | 4653 | 	BUG_ON(busiest_rq == target_rq); | 
 | 4654 |  | 
 | 4655 | 	/* move a task from busiest_rq to target_rq */ | 
 | 4656 | 	double_lock_balance(busiest_rq, target_rq); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4657 |  | 
 | 4658 | 	/* Search for an sd spanning us and the target CPU. */ | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 4659 | 	rcu_read_lock(); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4660 | 	for_each_domain(target_cpu, sd) { | 
 | 4661 | 		if ((sd->flags & SD_LOAD_BALANCE) && | 
 | 4662 | 		    cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) | 
 | 4663 | 				break; | 
 | 4664 | 	} | 
 | 4665 |  | 
 | 4666 | 	if (likely(sd)) { | 
| Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4667 | 		struct lb_env env = { | 
 | 4668 | 			.sd		= sd, | 
| Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 4669 | 			.dst_cpu	= target_cpu, | 
 | 4670 | 			.dst_rq		= target_rq, | 
 | 4671 | 			.src_cpu	= busiest_rq->cpu, | 
 | 4672 | 			.src_rq		= busiest_rq, | 
| Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4673 | 			.idle		= CPU_IDLE, | 
 | 4674 | 		}; | 
 | 4675 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4676 | 		schedstat_inc(sd, alb_count); | 
 | 4677 |  | 
| Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4678 | 		if (move_one_task(&env)) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4679 | 			schedstat_inc(sd, alb_pushed); | 
 | 4680 | 		else | 
 | 4681 | 			schedstat_inc(sd, alb_failed); | 
 | 4682 | 	} | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 4683 | 	rcu_read_unlock(); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4684 | 	double_unlock_balance(busiest_rq, target_rq); | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4685 | out_unlock: | 
 | 4686 | 	busiest_rq->active_balance = 0; | 
 | 4687 | 	raw_spin_unlock_irq(&busiest_rq->lock); | 
 | 4688 | 	return 0; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4689 | } | 
 | 4690 |  | 
 | 4691 | #ifdef CONFIG_NO_HZ | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4692 | /* | 
 | 4693 |  * idle load balancing details | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4694 |  * - When one of the busy CPUs notice that there may be an idle rebalancing | 
 | 4695 |  *   needed, they will kick the idle load balancer, which then does idle | 
 | 4696 |  *   load balancing for all the idle CPUs. | 
 | 4697 |  */ | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4698 | static struct { | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4699 | 	cpumask_var_t idle_cpus_mask; | 
| Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 4700 | 	atomic_t nr_cpus; | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4701 | 	unsigned long next_balance;     /* in jiffy units */ | 
 | 4702 | } nohz ____cacheline_aligned; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4703 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4704 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 
 | 4705 | /** | 
 | 4706 |  * lowest_flag_domain - Return lowest sched_domain containing flag. | 
 | 4707 |  * @cpu:	The cpu whose lowest level of sched domain is to | 
 | 4708 |  *		be returned. | 
 | 4709 |  * @flag:	The flag to check for the lowest sched_domain | 
 | 4710 |  *		for the given cpu. | 
 | 4711 |  * | 
 | 4712 |  * Returns the lowest sched_domain of a cpu which contains the given flag. | 
 | 4713 |  */ | 
 | 4714 | static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) | 
 | 4715 | { | 
 | 4716 | 	struct sched_domain *sd; | 
 | 4717 |  | 
 | 4718 | 	for_each_domain(cpu, sd) | 
| Hillf Danton | 0835471 | 2011-06-16 21:55:19 -0400 | [diff] [blame] | 4719 | 		if (sd->flags & flag) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4720 | 			break; | 
 | 4721 |  | 
 | 4722 | 	return sd; | 
 | 4723 | } | 
 | 4724 |  | 
 | 4725 | /** | 
 | 4726 |  * for_each_flag_domain - Iterates over sched_domains containing the flag. | 
 | 4727 |  * @cpu:	The cpu whose domains we're iterating over. | 
 | 4728 |  * @sd:		variable holding the value of the power_savings_sd | 
 | 4729 |  *		for cpu. | 
 | 4730 |  * @flag:	The flag to filter the sched_domains to be iterated. | 
 | 4731 |  * | 
 | 4732 |  * Iterates over all the scheduler domains for a given cpu that has the 'flag' | 
 | 4733 |  * set, starting from the lowest sched_domain to the highest. | 
 | 4734 |  */ | 
 | 4735 | #define for_each_flag_domain(cpu, sd, flag) \ | 
 | 4736 | 	for (sd = lowest_flag_domain(cpu, flag); \ | 
 | 4737 | 		(sd && (sd->flags & flag)); sd = sd->parent) | 
 | 4738 |  | 
 | 4739 | /** | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4740 |  * find_new_ilb - Finds the optimum idle load balancer for nomination. | 
 | 4741 |  * @cpu:	The cpu which is nominating a new idle_load_balancer. | 
 | 4742 |  * | 
 | 4743 |  * Returns:	Returns the id of the idle load balancer if it exists, | 
 | 4744 |  *		Else, returns >= nr_cpu_ids. | 
 | 4745 |  * | 
 | 4746 |  * This algorithm picks the idle load balancer such that it belongs to a | 
 | 4747 |  * semi-idle powersavings sched_domain. The idea is to try and avoid | 
 | 4748 |  * completely idle packages/cores just for the purpose of idle load balancing | 
 | 4749 |  * when there are other idle cpu's which are better suited for that job. | 
 | 4750 |  */ | 
 | 4751 | static int find_new_ilb(int cpu) | 
 | 4752 | { | 
| Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 4753 | 	int ilb = cpumask_first(nohz.idle_cpus_mask); | 
| Suresh Siddha | 786d6dc | 2011-12-01 17:07:35 -0800 | [diff] [blame] | 4754 | 	struct sched_group *ilbg; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4755 | 	struct sched_domain *sd; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4756 |  | 
 | 4757 | 	/* | 
 | 4758 | 	 * Have idle load balancer selection from semi-idle packages only | 
 | 4759 | 	 * when power-aware load balancing is enabled | 
 | 4760 | 	 */ | 
 | 4761 | 	if (!(sched_smt_power_savings || sched_mc_power_savings)) | 
 | 4762 | 		goto out_done; | 
 | 4763 |  | 
 | 4764 | 	/* | 
 | 4765 | 	 * Optimize for the case when we have no idle CPUs or only one | 
 | 4766 | 	 * idle CPU. Don't walk the sched_domain hierarchy in such cases | 
 | 4767 | 	 */ | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4768 | 	if (cpumask_weight(nohz.idle_cpus_mask) < 2) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4769 | 		goto out_done; | 
 | 4770 |  | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 4771 | 	rcu_read_lock(); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4772 | 	for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) { | 
| Suresh Siddha | 786d6dc | 2011-12-01 17:07:35 -0800 | [diff] [blame] | 4773 | 		ilbg = sd->groups; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4774 |  | 
 | 4775 | 		do { | 
| Suresh Siddha | 786d6dc | 2011-12-01 17:07:35 -0800 | [diff] [blame] | 4776 | 			if (ilbg->group_weight != | 
 | 4777 | 				atomic_read(&ilbg->sgp->nr_busy_cpus)) { | 
 | 4778 | 				ilb = cpumask_first_and(nohz.idle_cpus_mask, | 
 | 4779 | 							sched_group_cpus(ilbg)); | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 4780 | 				goto unlock; | 
 | 4781 | 			} | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4782 |  | 
| Suresh Siddha | 786d6dc | 2011-12-01 17:07:35 -0800 | [diff] [blame] | 4783 | 			ilbg = ilbg->next; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4784 |  | 
| Suresh Siddha | 786d6dc | 2011-12-01 17:07:35 -0800 | [diff] [blame] | 4785 | 		} while (ilbg != sd->groups); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4786 | 	} | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 4787 | unlock: | 
 | 4788 | 	rcu_read_unlock(); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4789 |  | 
 | 4790 | out_done: | 
| Suresh Siddha | 786d6dc | 2011-12-01 17:07:35 -0800 | [diff] [blame] | 4791 | 	if (ilb < nr_cpu_ids && idle_cpu(ilb)) | 
 | 4792 | 		return ilb; | 
 | 4793 |  | 
 | 4794 | 	return nr_cpu_ids; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4795 | } | 
 | 4796 | #else /*  (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */ | 
 | 4797 | static inline int find_new_ilb(int call_cpu) | 
 | 4798 | { | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4799 | 	return nr_cpu_ids; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4800 | } | 
 | 4801 | #endif | 
 | 4802 |  | 
 | 4803 | /* | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4804 |  * Kick a CPU to do the nohz balancing, if it is time for it. We pick the | 
 | 4805 |  * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle | 
 | 4806 |  * CPU (if there is one). | 
 | 4807 |  */ | 
 | 4808 | static void nohz_balancer_kick(int cpu) | 
 | 4809 | { | 
 | 4810 | 	int ilb_cpu; | 
 | 4811 |  | 
 | 4812 | 	nohz.next_balance++; | 
 | 4813 |  | 
| Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 4814 | 	ilb_cpu = find_new_ilb(cpu); | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4815 |  | 
| Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 4816 | 	if (ilb_cpu >= nr_cpu_ids) | 
 | 4817 | 		return; | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4818 |  | 
| Suresh Siddha | cd490c5 | 2011-12-06 11:26:34 -0800 | [diff] [blame] | 4819 | 	if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu))) | 
| Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 4820 | 		return; | 
 | 4821 | 	/* | 
 | 4822 | 	 * Use smp_send_reschedule() instead of resched_cpu(). | 
 | 4823 | 	 * This way we generate a sched IPI on the target cpu which | 
 | 4824 | 	 * is idle. And the softirq performing nohz idle load balance | 
 | 4825 | 	 * will be run before returning from the IPI. | 
 | 4826 | 	 */ | 
 | 4827 | 	smp_send_reschedule(ilb_cpu); | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4828 | 	return; | 
 | 4829 | } | 
 | 4830 |  | 
| Suresh Siddha | 7132596 | 2012-01-19 18:28:57 -0800 | [diff] [blame] | 4831 | static inline void clear_nohz_tick_stopped(int cpu) | 
 | 4832 | { | 
 | 4833 | 	if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { | 
 | 4834 | 		cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); | 
 | 4835 | 		atomic_dec(&nohz.nr_cpus); | 
 | 4836 | 		clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); | 
 | 4837 | 	} | 
 | 4838 | } | 
 | 4839 |  | 
| Suresh Siddha | 69e1e81 | 2011-12-01 17:07:33 -0800 | [diff] [blame] | 4840 | static inline void set_cpu_sd_state_busy(void) | 
 | 4841 | { | 
 | 4842 | 	struct sched_domain *sd; | 
 | 4843 | 	int cpu = smp_processor_id(); | 
 | 4844 |  | 
 | 4845 | 	if (!test_bit(NOHZ_IDLE, nohz_flags(cpu))) | 
 | 4846 | 		return; | 
 | 4847 | 	clear_bit(NOHZ_IDLE, nohz_flags(cpu)); | 
 | 4848 |  | 
 | 4849 | 	rcu_read_lock(); | 
 | 4850 | 	for_each_domain(cpu, sd) | 
 | 4851 | 		atomic_inc(&sd->groups->sgp->nr_busy_cpus); | 
 | 4852 | 	rcu_read_unlock(); | 
 | 4853 | } | 
 | 4854 |  | 
 | 4855 | void set_cpu_sd_state_idle(void) | 
 | 4856 | { | 
 | 4857 | 	struct sched_domain *sd; | 
 | 4858 | 	int cpu = smp_processor_id(); | 
 | 4859 |  | 
 | 4860 | 	if (test_bit(NOHZ_IDLE, nohz_flags(cpu))) | 
 | 4861 | 		return; | 
 | 4862 | 	set_bit(NOHZ_IDLE, nohz_flags(cpu)); | 
 | 4863 |  | 
 | 4864 | 	rcu_read_lock(); | 
 | 4865 | 	for_each_domain(cpu, sd) | 
 | 4866 | 		atomic_dec(&sd->groups->sgp->nr_busy_cpus); | 
 | 4867 | 	rcu_read_unlock(); | 
 | 4868 | } | 
 | 4869 |  | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4870 | /* | 
| Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 4871 |  * This routine will record that this cpu is going idle with tick stopped. | 
 | 4872 |  * This info will be used in performing idle load balancing in the future. | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4873 |  */ | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4874 | void select_nohz_load_balancer(int stop_tick) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4875 | { | 
 | 4876 | 	int cpu = smp_processor_id(); | 
 | 4877 |  | 
| Suresh Siddha | 7132596 | 2012-01-19 18:28:57 -0800 | [diff] [blame] | 4878 | 	/* | 
 | 4879 | 	 * If this cpu is going down, then nothing needs to be done. | 
 | 4880 | 	 */ | 
 | 4881 | 	if (!cpu_active(cpu)) | 
 | 4882 | 		return; | 
 | 4883 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4884 | 	if (stop_tick) { | 
| Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 4885 | 		if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4886 | 			return; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4887 |  | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4888 | 		cpumask_set_cpu(cpu, nohz.idle_cpus_mask); | 
| Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 4889 | 		atomic_inc(&nohz.nr_cpus); | 
| Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 4890 | 		set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4891 | 	} | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4892 | 	return; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4893 | } | 
| Suresh Siddha | 7132596 | 2012-01-19 18:28:57 -0800 | [diff] [blame] | 4894 |  | 
 | 4895 | static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb, | 
 | 4896 | 					unsigned long action, void *hcpu) | 
 | 4897 | { | 
 | 4898 | 	switch (action & ~CPU_TASKS_FROZEN) { | 
 | 4899 | 	case CPU_DYING: | 
 | 4900 | 		clear_nohz_tick_stopped(smp_processor_id()); | 
 | 4901 | 		return NOTIFY_OK; | 
 | 4902 | 	default: | 
 | 4903 | 		return NOTIFY_DONE; | 
 | 4904 | 	} | 
 | 4905 | } | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4906 | #endif | 
 | 4907 |  | 
 | 4908 | static DEFINE_SPINLOCK(balancing); | 
 | 4909 |  | 
| Peter Zijlstra | 49c022e | 2011-04-05 10:14:25 +0200 | [diff] [blame] | 4910 | /* | 
 | 4911 |  * Scale the max load_balance interval with the number of CPUs in the system. | 
 | 4912 |  * This trades load-balance latency on larger machines for less cross talk. | 
 | 4913 |  */ | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 4914 | void update_max_interval(void) | 
| Peter Zijlstra | 49c022e | 2011-04-05 10:14:25 +0200 | [diff] [blame] | 4915 | { | 
 | 4916 | 	max_load_balance_interval = HZ*num_online_cpus()/10; | 
 | 4917 | } | 
 | 4918 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4919 | /* | 
 | 4920 |  * It checks each scheduling domain to see if it is due to be balanced, | 
 | 4921 |  * and initiates a balancing operation if so. | 
 | 4922 |  * | 
 | 4923 |  * Balancing parameters are set up in arch_init_sched_domains. | 
 | 4924 |  */ | 
 | 4925 | static void rebalance_domains(int cpu, enum cpu_idle_type idle) | 
 | 4926 | { | 
 | 4927 | 	int balance = 1; | 
 | 4928 | 	struct rq *rq = cpu_rq(cpu); | 
 | 4929 | 	unsigned long interval; | 
 | 4930 | 	struct sched_domain *sd; | 
 | 4931 | 	/* Earliest time when we have to do rebalance again */ | 
 | 4932 | 	unsigned long next_balance = jiffies + 60*HZ; | 
 | 4933 | 	int update_next_balance = 0; | 
 | 4934 | 	int need_serialize; | 
 | 4935 |  | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 4936 | 	update_shares(cpu); | 
 | 4937 |  | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 4938 | 	rcu_read_lock(); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4939 | 	for_each_domain(cpu, sd) { | 
 | 4940 | 		if (!(sd->flags & SD_LOAD_BALANCE)) | 
 | 4941 | 			continue; | 
 | 4942 |  | 
 | 4943 | 		interval = sd->balance_interval; | 
 | 4944 | 		if (idle != CPU_IDLE) | 
 | 4945 | 			interval *= sd->busy_factor; | 
 | 4946 |  | 
 | 4947 | 		/* scale ms to jiffies */ | 
 | 4948 | 		interval = msecs_to_jiffies(interval); | 
| Peter Zijlstra | 49c022e | 2011-04-05 10:14:25 +0200 | [diff] [blame] | 4949 | 		interval = clamp(interval, 1UL, max_load_balance_interval); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4950 |  | 
 | 4951 | 		need_serialize = sd->flags & SD_SERIALIZE; | 
 | 4952 |  | 
 | 4953 | 		if (need_serialize) { | 
 | 4954 | 			if (!spin_trylock(&balancing)) | 
 | 4955 | 				goto out; | 
 | 4956 | 		} | 
 | 4957 |  | 
 | 4958 | 		if (time_after_eq(jiffies, sd->last_balance + interval)) { | 
 | 4959 | 			if (load_balance(cpu, rq, sd, idle, &balance)) { | 
 | 4960 | 				/* | 
 | 4961 | 				 * We've pulled tasks over so either we're no | 
| Peter Zijlstra | c186faf | 2011-02-21 18:52:53 +0100 | [diff] [blame] | 4962 | 				 * longer idle. | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4963 | 				 */ | 
 | 4964 | 				idle = CPU_NOT_IDLE; | 
 | 4965 | 			} | 
 | 4966 | 			sd->last_balance = jiffies; | 
 | 4967 | 		} | 
 | 4968 | 		if (need_serialize) | 
 | 4969 | 			spin_unlock(&balancing); | 
 | 4970 | out: | 
 | 4971 | 		if (time_after(next_balance, sd->last_balance + interval)) { | 
 | 4972 | 			next_balance = sd->last_balance + interval; | 
 | 4973 | 			update_next_balance = 1; | 
 | 4974 | 		} | 
 | 4975 |  | 
 | 4976 | 		/* | 
 | 4977 | 		 * Stop the load balance at this level. There is another | 
 | 4978 | 		 * CPU in our sched group which is doing load balancing more | 
 | 4979 | 		 * actively. | 
 | 4980 | 		 */ | 
 | 4981 | 		if (!balance) | 
 | 4982 | 			break; | 
 | 4983 | 	} | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 4984 | 	rcu_read_unlock(); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4985 |  | 
 | 4986 | 	/* | 
 | 4987 | 	 * next_balance will be updated only when there is a need. | 
 | 4988 | 	 * When the cpu is attached to null domain for ex, it will not be | 
 | 4989 | 	 * updated. | 
 | 4990 | 	 */ | 
 | 4991 | 	if (likely(update_next_balance)) | 
 | 4992 | 		rq->next_balance = next_balance; | 
 | 4993 | } | 
 | 4994 |  | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4995 | #ifdef CONFIG_NO_HZ | 
 | 4996 | /* | 
 | 4997 |  * In CONFIG_NO_HZ case, the idle balance kickee will do the | 
 | 4998 |  * rebalancing for all the cpus for whom scheduler ticks are stopped. | 
 | 4999 |  */ | 
 | 5000 | static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) | 
 | 5001 | { | 
 | 5002 | 	struct rq *this_rq = cpu_rq(this_cpu); | 
 | 5003 | 	struct rq *rq; | 
 | 5004 | 	int balance_cpu; | 
 | 5005 |  | 
| Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 5006 | 	if (idle != CPU_IDLE || | 
 | 5007 | 	    !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu))) | 
 | 5008 | 		goto end; | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5009 |  | 
 | 5010 | 	for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { | 
| Suresh Siddha | 8a6d42d | 2011-12-06 11:19:37 -0800 | [diff] [blame] | 5011 | 		if (balance_cpu == this_cpu || !idle_cpu(balance_cpu)) | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5012 | 			continue; | 
 | 5013 |  | 
 | 5014 | 		/* | 
 | 5015 | 		 * If this cpu gets work to do, stop the load balancing | 
 | 5016 | 		 * work being done for other cpus. Next load | 
 | 5017 | 		 * balancing owner will pick it up. | 
 | 5018 | 		 */ | 
| Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 5019 | 		if (need_resched()) | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5020 | 			break; | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5021 |  | 
 | 5022 | 		raw_spin_lock_irq(&this_rq->lock); | 
| Suresh Siddha | 5343bdb | 2010-07-09 15:19:54 +0200 | [diff] [blame] | 5023 | 		update_rq_clock(this_rq); | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5024 | 		update_cpu_load(this_rq); | 
 | 5025 | 		raw_spin_unlock_irq(&this_rq->lock); | 
 | 5026 |  | 
 | 5027 | 		rebalance_domains(balance_cpu, CPU_IDLE); | 
 | 5028 |  | 
 | 5029 | 		rq = cpu_rq(balance_cpu); | 
 | 5030 | 		if (time_after(this_rq->next_balance, rq->next_balance)) | 
 | 5031 | 			this_rq->next_balance = rq->next_balance; | 
 | 5032 | 	} | 
 | 5033 | 	nohz.next_balance = this_rq->next_balance; | 
| Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 5034 | end: | 
 | 5035 | 	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)); | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5036 | } | 
 | 5037 |  | 
 | 5038 | /* | 
| Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 5039 |  * Current heuristic for kicking the idle load balancer in the presence | 
 | 5040 |  * of an idle cpu is the system. | 
 | 5041 |  *   - This rq has more than one task. | 
 | 5042 |  *   - At any scheduler domain level, this cpu's scheduler group has multiple | 
 | 5043 |  *     busy cpu's exceeding the group's power. | 
 | 5044 |  *   - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler | 
 | 5045 |  *     domain span are idle. | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5046 |  */ | 
 | 5047 | static inline int nohz_kick_needed(struct rq *rq, int cpu) | 
 | 5048 | { | 
 | 5049 | 	unsigned long now = jiffies; | 
| Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 5050 | 	struct sched_domain *sd; | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5051 |  | 
| Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 5052 | 	if (unlikely(idle_cpu(cpu))) | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5053 | 		return 0; | 
 | 5054 |  | 
| Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 5055 |        /* | 
 | 5056 | 	* We may be recently in ticked or tickless idle mode. At the first | 
 | 5057 | 	* busy tick after returning from idle, we will update the busy stats. | 
 | 5058 | 	*/ | 
| Suresh Siddha | 69e1e81 | 2011-12-01 17:07:33 -0800 | [diff] [blame] | 5059 | 	set_cpu_sd_state_busy(); | 
| Suresh Siddha | 7132596 | 2012-01-19 18:28:57 -0800 | [diff] [blame] | 5060 | 	clear_nohz_tick_stopped(cpu); | 
| Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 5061 |  | 
 | 5062 | 	/* | 
 | 5063 | 	 * None are in tickless mode and hence no need for NOHZ idle load | 
 | 5064 | 	 * balancing. | 
 | 5065 | 	 */ | 
 | 5066 | 	if (likely(!atomic_read(&nohz.nr_cpus))) | 
 | 5067 | 		return 0; | 
| Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 5068 |  | 
 | 5069 | 	if (time_before(now, nohz.next_balance)) | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5070 | 		return 0; | 
 | 5071 |  | 
| Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 5072 | 	if (rq->nr_running >= 2) | 
 | 5073 | 		goto need_kick; | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5074 |  | 
| Peter Zijlstra | 067491b | 2011-12-07 14:32:08 +0100 | [diff] [blame] | 5075 | 	rcu_read_lock(); | 
| Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 5076 | 	for_each_domain(cpu, sd) { | 
 | 5077 | 		struct sched_group *sg = sd->groups; | 
 | 5078 | 		struct sched_group_power *sgp = sg->sgp; | 
 | 5079 | 		int nr_busy = atomic_read(&sgp->nr_busy_cpus); | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5080 |  | 
| Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 5081 | 		if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1) | 
| Peter Zijlstra | 067491b | 2011-12-07 14:32:08 +0100 | [diff] [blame] | 5082 | 			goto need_kick_unlock; | 
| Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 5083 |  | 
 | 5084 | 		if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight | 
 | 5085 | 		    && (cpumask_first_and(nohz.idle_cpus_mask, | 
 | 5086 | 					  sched_domain_span(sd)) < cpu)) | 
| Peter Zijlstra | 067491b | 2011-12-07 14:32:08 +0100 | [diff] [blame] | 5087 | 			goto need_kick_unlock; | 
| Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 5088 |  | 
 | 5089 | 		if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING))) | 
 | 5090 | 			break; | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5091 | 	} | 
| Peter Zijlstra | 067491b | 2011-12-07 14:32:08 +0100 | [diff] [blame] | 5092 | 	rcu_read_unlock(); | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5093 | 	return 0; | 
| Peter Zijlstra | 067491b | 2011-12-07 14:32:08 +0100 | [diff] [blame] | 5094 |  | 
 | 5095 | need_kick_unlock: | 
 | 5096 | 	rcu_read_unlock(); | 
| Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 5097 | need_kick: | 
 | 5098 | 	return 1; | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5099 | } | 
 | 5100 | #else | 
 | 5101 | static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { } | 
 | 5102 | #endif | 
 | 5103 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5104 | /* | 
 | 5105 |  * run_rebalance_domains is triggered when needed from the scheduler tick. | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5106 |  * Also triggered for nohz idle balancing (with nohz_balancing_kick set). | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5107 |  */ | 
 | 5108 | static void run_rebalance_domains(struct softirq_action *h) | 
 | 5109 | { | 
 | 5110 | 	int this_cpu = smp_processor_id(); | 
 | 5111 | 	struct rq *this_rq = cpu_rq(this_cpu); | 
| Suresh Siddha | 6eb57e0 | 2011-10-03 15:09:01 -0700 | [diff] [blame] | 5112 | 	enum cpu_idle_type idle = this_rq->idle_balance ? | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5113 | 						CPU_IDLE : CPU_NOT_IDLE; | 
 | 5114 |  | 
 | 5115 | 	rebalance_domains(this_cpu, idle); | 
 | 5116 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5117 | 	/* | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5118 | 	 * If this cpu has a pending nohz_balance_kick, then do the | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5119 | 	 * balancing on behalf of the other idle cpus whose ticks are | 
 | 5120 | 	 * stopped. | 
 | 5121 | 	 */ | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5122 | 	nohz_idle_balance(this_cpu, idle); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5123 | } | 
 | 5124 |  | 
 | 5125 | static inline int on_null_domain(int cpu) | 
 | 5126 | { | 
| Paul E. McKenney | 90a6501 | 2010-02-28 08:32:18 -0800 | [diff] [blame] | 5127 | 	return !rcu_dereference_sched(cpu_rq(cpu)->sd); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5128 | } | 
 | 5129 |  | 
 | 5130 | /* | 
 | 5131 |  * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5132 |  */ | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5133 | void trigger_load_balance(struct rq *rq, int cpu) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5134 | { | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5135 | 	/* Don't need to rebalance while attached to NULL domain */ | 
 | 5136 | 	if (time_after_eq(jiffies, rq->next_balance) && | 
 | 5137 | 	    likely(!on_null_domain(cpu))) | 
 | 5138 | 		raise_softirq(SCHED_SOFTIRQ); | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5139 | #ifdef CONFIG_NO_HZ | 
| Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 5140 | 	if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu))) | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5141 | 		nohz_balancer_kick(cpu); | 
 | 5142 | #endif | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5143 | } | 
 | 5144 |  | 
| Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 5145 | static void rq_online_fair(struct rq *rq) | 
 | 5146 | { | 
 | 5147 | 	update_sysctl(); | 
 | 5148 | } | 
 | 5149 |  | 
 | 5150 | static void rq_offline_fair(struct rq *rq) | 
 | 5151 | { | 
 | 5152 | 	update_sysctl(); | 
 | 5153 | } | 
 | 5154 |  | 
| Dhaval Giani | 55e12e5 | 2008-06-24 23:39:43 +0530 | [diff] [blame] | 5155 | #endif /* CONFIG_SMP */ | 
| Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 5156 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5157 | /* | 
 | 5158 |  * scheduler tick hitting a task of our scheduling class: | 
 | 5159 |  */ | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 5160 | static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5161 | { | 
 | 5162 | 	struct cfs_rq *cfs_rq; | 
 | 5163 | 	struct sched_entity *se = &curr->se; | 
 | 5164 |  | 
 | 5165 | 	for_each_sched_entity(se) { | 
 | 5166 | 		cfs_rq = cfs_rq_of(se); | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 5167 | 		entity_tick(cfs_rq, se, queued); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5168 | 	} | 
 | 5169 | } | 
 | 5170 |  | 
 | 5171 | /* | 
| Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 5172 |  * called on fork with the child task as argument from the parent's context | 
 | 5173 |  *  - child not yet on the tasklist | 
 | 5174 |  *  - preemption disabled | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5175 |  */ | 
| Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 5176 | static void task_fork_fair(struct task_struct *p) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5177 | { | 
| Daisuke Nishimura | 4fc420c | 2011-12-15 14:36:55 +0900 | [diff] [blame] | 5178 | 	struct cfs_rq *cfs_rq; | 
 | 5179 | 	struct sched_entity *se = &p->se, *curr; | 
| Ingo Molnar | 00bf7bf | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 5180 | 	int this_cpu = smp_processor_id(); | 
| Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 5181 | 	struct rq *rq = this_rq(); | 
 | 5182 | 	unsigned long flags; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5183 |  | 
| Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 5184 | 	raw_spin_lock_irqsave(&rq->lock, flags); | 
| Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 5185 |  | 
| Peter Zijlstra | 861d034 | 2010-08-19 13:31:43 +0200 | [diff] [blame] | 5186 | 	update_rq_clock(rq); | 
 | 5187 |  | 
| Daisuke Nishimura | 4fc420c | 2011-12-15 14:36:55 +0900 | [diff] [blame] | 5188 | 	cfs_rq = task_cfs_rq(current); | 
 | 5189 | 	curr = cfs_rq->curr; | 
 | 5190 |  | 
| Paul E. McKenney | b0a0f66 | 2010-10-06 17:32:51 -0700 | [diff] [blame] | 5191 | 	if (unlikely(task_cpu(p) != this_cpu)) { | 
 | 5192 | 		rcu_read_lock(); | 
| Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 5193 | 		__set_task_cpu(p, this_cpu); | 
| Paul E. McKenney | b0a0f66 | 2010-10-06 17:32:51 -0700 | [diff] [blame] | 5194 | 		rcu_read_unlock(); | 
 | 5195 | 	} | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5196 |  | 
| Ting Yang | 7109c44 | 2007-08-28 12:53:24 +0200 | [diff] [blame] | 5197 | 	update_curr(cfs_rq); | 
| Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 5198 |  | 
| Mike Galbraith | b5d9d73 | 2009-09-08 11:12:28 +0200 | [diff] [blame] | 5199 | 	if (curr) | 
 | 5200 | 		se->vruntime = curr->vruntime; | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 5201 | 	place_entity(cfs_rq, se, 1); | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 5202 |  | 
| Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 5203 | 	if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { | 
| Dmitry Adamushko | 87fefa3 | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 5204 | 		/* | 
| Ingo Molnar | edcb60a | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 5205 | 		 * Upon rescheduling, sched_class::put_prev_task() will place | 
 | 5206 | 		 * 'current' within the tree based on its new key value. | 
 | 5207 | 		 */ | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 5208 | 		swap(curr->vruntime, se->vruntime); | 
| Bharata B Rao | aec0a51 | 2008-08-28 14:42:49 +0530 | [diff] [blame] | 5209 | 		resched_task(rq->curr); | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 5210 | 	} | 
 | 5211 |  | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 5212 | 	se->vruntime -= cfs_rq->min_vruntime; | 
 | 5213 |  | 
| Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 5214 | 	raw_spin_unlock_irqrestore(&rq->lock, flags); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5215 | } | 
 | 5216 |  | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5217 | /* | 
 | 5218 |  * Priority of the task has changed. Check to see if we preempt | 
 | 5219 |  * the current task. | 
 | 5220 |  */ | 
| Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 5221 | static void | 
 | 5222 | prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5223 | { | 
| Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 5224 | 	if (!p->se.on_rq) | 
 | 5225 | 		return; | 
 | 5226 |  | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5227 | 	/* | 
 | 5228 | 	 * Reschedule if we are currently running on this runqueue and | 
 | 5229 | 	 * our priority decreased, or if we are not currently running on | 
 | 5230 | 	 * this runqueue and our priority is higher than the current's | 
 | 5231 | 	 */ | 
| Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 5232 | 	if (rq->curr == p) { | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5233 | 		if (p->prio > oldprio) | 
 | 5234 | 			resched_task(rq->curr); | 
 | 5235 | 	} else | 
| Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 5236 | 		check_preempt_curr(rq, p, 0); | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5237 | } | 
 | 5238 |  | 
| Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 5239 | static void switched_from_fair(struct rq *rq, struct task_struct *p) | 
 | 5240 | { | 
 | 5241 | 	struct sched_entity *se = &p->se; | 
 | 5242 | 	struct cfs_rq *cfs_rq = cfs_rq_of(se); | 
 | 5243 |  | 
 | 5244 | 	/* | 
 | 5245 | 	 * Ensure the task's vruntime is normalized, so that when its | 
 | 5246 | 	 * switched back to the fair class the enqueue_entity(.flags=0) will | 
 | 5247 | 	 * do the right thing. | 
 | 5248 | 	 * | 
 | 5249 | 	 * If it was on_rq, then the dequeue_entity(.flags=0) will already | 
 | 5250 | 	 * have normalized the vruntime, if it was !on_rq, then only when | 
 | 5251 | 	 * the task is sleeping will it still have non-normalized vruntime. | 
 | 5252 | 	 */ | 
 | 5253 | 	if (!se->on_rq && p->state != TASK_RUNNING) { | 
 | 5254 | 		/* | 
 | 5255 | 		 * Fix up our vruntime so that the current sleep doesn't | 
 | 5256 | 		 * cause 'unlimited' sleep bonus. | 
 | 5257 | 		 */ | 
 | 5258 | 		place_entity(cfs_rq, se, 0); | 
 | 5259 | 		se->vruntime -= cfs_rq->min_vruntime; | 
 | 5260 | 	} | 
 | 5261 | } | 
 | 5262 |  | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5263 | /* | 
 | 5264 |  * We switched to the sched_fair class. | 
 | 5265 |  */ | 
| Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 5266 | static void switched_to_fair(struct rq *rq, struct task_struct *p) | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5267 | { | 
| Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 5268 | 	if (!p->se.on_rq) | 
 | 5269 | 		return; | 
 | 5270 |  | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5271 | 	/* | 
 | 5272 | 	 * We were most likely switched from sched_rt, so | 
 | 5273 | 	 * kick off the schedule if running, otherwise just see | 
 | 5274 | 	 * if we can still preempt the current task. | 
 | 5275 | 	 */ | 
| Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 5276 | 	if (rq->curr == p) | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5277 | 		resched_task(rq->curr); | 
 | 5278 | 	else | 
| Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 5279 | 		check_preempt_curr(rq, p, 0); | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5280 | } | 
 | 5281 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 5282 | /* Account for a task changing its policy or group. | 
 | 5283 |  * | 
 | 5284 |  * This routine is mostly called to set cfs_rq->curr field when a task | 
 | 5285 |  * migrates between groups/classes. | 
 | 5286 |  */ | 
 | 5287 | static void set_curr_task_fair(struct rq *rq) | 
 | 5288 | { | 
 | 5289 | 	struct sched_entity *se = &rq->curr->se; | 
 | 5290 |  | 
| Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 5291 | 	for_each_sched_entity(se) { | 
 | 5292 | 		struct cfs_rq *cfs_rq = cfs_rq_of(se); | 
 | 5293 |  | 
 | 5294 | 		set_next_entity(cfs_rq, se); | 
 | 5295 | 		/* ensure bandwidth has been allocated on our new cfs_rq */ | 
 | 5296 | 		account_cfs_rq_runtime(cfs_rq, 0); | 
 | 5297 | 	} | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 5298 | } | 
 | 5299 |  | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5300 | void init_cfs_rq(struct cfs_rq *cfs_rq) | 
 | 5301 | { | 
 | 5302 | 	cfs_rq->tasks_timeline = RB_ROOT; | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5303 | 	cfs_rq->min_vruntime = (u64)(-(1LL << 20)); | 
 | 5304 | #ifndef CONFIG_64BIT | 
 | 5305 | 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; | 
 | 5306 | #endif | 
 | 5307 | } | 
 | 5308 |  | 
| Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 5309 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| Peter Zijlstra | b2b5ce0 | 2010-10-15 15:24:15 +0200 | [diff] [blame] | 5310 | static void task_move_group_fair(struct task_struct *p, int on_rq) | 
| Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 5311 | { | 
| Peter Zijlstra | b2b5ce0 | 2010-10-15 15:24:15 +0200 | [diff] [blame] | 5312 | 	/* | 
 | 5313 | 	 * If the task was not on the rq at the time of this cgroup movement | 
 | 5314 | 	 * it must have been asleep, sleeping tasks keep their ->vruntime | 
 | 5315 | 	 * absolute on their old rq until wakeup (needed for the fair sleeper | 
 | 5316 | 	 * bonus in place_entity()). | 
 | 5317 | 	 * | 
 | 5318 | 	 * If it was on the rq, we've just 'preempted' it, which does convert | 
 | 5319 | 	 * ->vruntime to a relative base. | 
 | 5320 | 	 * | 
 | 5321 | 	 * Make sure both cases convert their relative position when migrating | 
 | 5322 | 	 * to another cgroup's rq. This does somewhat interfere with the | 
 | 5323 | 	 * fair sleeper stuff for the first placement, but who cares. | 
 | 5324 | 	 */ | 
| Daisuke Nishimura | 7ceff01 | 2011-12-15 14:36:07 +0900 | [diff] [blame] | 5325 | 	/* | 
 | 5326 | 	 * When !on_rq, vruntime of the task has usually NOT been normalized. | 
 | 5327 | 	 * But there are some cases where it has already been normalized: | 
 | 5328 | 	 * | 
 | 5329 | 	 * - Moving a forked child which is waiting for being woken up by | 
 | 5330 | 	 *   wake_up_new_task(). | 
| Daisuke Nishimura | 62af378 | 2011-12-15 14:37:41 +0900 | [diff] [blame] | 5331 | 	 * - Moving a task which has been woken up by try_to_wake_up() and | 
 | 5332 | 	 *   waiting for actually being woken up by sched_ttwu_pending(). | 
| Daisuke Nishimura | 7ceff01 | 2011-12-15 14:36:07 +0900 | [diff] [blame] | 5333 | 	 * | 
 | 5334 | 	 * To prevent boost or penalty in the new cfs_rq caused by delta | 
 | 5335 | 	 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment. | 
 | 5336 | 	 */ | 
| Daisuke Nishimura | 62af378 | 2011-12-15 14:37:41 +0900 | [diff] [blame] | 5337 | 	if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING)) | 
| Daisuke Nishimura | 7ceff01 | 2011-12-15 14:36:07 +0900 | [diff] [blame] | 5338 | 		on_rq = 1; | 
 | 5339 |  | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 5340 | 	if (!on_rq) | 
| Peter Zijlstra | b2b5ce0 | 2010-10-15 15:24:15 +0200 | [diff] [blame] | 5341 | 		p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime; | 
 | 5342 | 	set_task_rq(p, task_cpu(p)); | 
 | 5343 | 	if (!on_rq) | 
 | 5344 | 		p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime; | 
| Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 5345 | } | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5346 |  | 
 | 5347 | void free_fair_sched_group(struct task_group *tg) | 
 | 5348 | { | 
 | 5349 | 	int i; | 
 | 5350 |  | 
 | 5351 | 	destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); | 
 | 5352 |  | 
 | 5353 | 	for_each_possible_cpu(i) { | 
 | 5354 | 		if (tg->cfs_rq) | 
 | 5355 | 			kfree(tg->cfs_rq[i]); | 
 | 5356 | 		if (tg->se) | 
 | 5357 | 			kfree(tg->se[i]); | 
 | 5358 | 	} | 
 | 5359 |  | 
 | 5360 | 	kfree(tg->cfs_rq); | 
 | 5361 | 	kfree(tg->se); | 
 | 5362 | } | 
 | 5363 |  | 
 | 5364 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | 
 | 5365 | { | 
 | 5366 | 	struct cfs_rq *cfs_rq; | 
 | 5367 | 	struct sched_entity *se; | 
 | 5368 | 	int i; | 
 | 5369 |  | 
 | 5370 | 	tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); | 
 | 5371 | 	if (!tg->cfs_rq) | 
 | 5372 | 		goto err; | 
 | 5373 | 	tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); | 
 | 5374 | 	if (!tg->se) | 
 | 5375 | 		goto err; | 
 | 5376 |  | 
 | 5377 | 	tg->shares = NICE_0_LOAD; | 
 | 5378 |  | 
 | 5379 | 	init_cfs_bandwidth(tg_cfs_bandwidth(tg)); | 
 | 5380 |  | 
 | 5381 | 	for_each_possible_cpu(i) { | 
 | 5382 | 		cfs_rq = kzalloc_node(sizeof(struct cfs_rq), | 
 | 5383 | 				      GFP_KERNEL, cpu_to_node(i)); | 
 | 5384 | 		if (!cfs_rq) | 
 | 5385 | 			goto err; | 
 | 5386 |  | 
 | 5387 | 		se = kzalloc_node(sizeof(struct sched_entity), | 
 | 5388 | 				  GFP_KERNEL, cpu_to_node(i)); | 
 | 5389 | 		if (!se) | 
 | 5390 | 			goto err_free_rq; | 
 | 5391 |  | 
 | 5392 | 		init_cfs_rq(cfs_rq); | 
 | 5393 | 		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); | 
 | 5394 | 	} | 
 | 5395 |  | 
 | 5396 | 	return 1; | 
 | 5397 |  | 
 | 5398 | err_free_rq: | 
 | 5399 | 	kfree(cfs_rq); | 
 | 5400 | err: | 
 | 5401 | 	return 0; | 
 | 5402 | } | 
 | 5403 |  | 
 | 5404 | void unregister_fair_sched_group(struct task_group *tg, int cpu) | 
 | 5405 | { | 
 | 5406 | 	struct rq *rq = cpu_rq(cpu); | 
 | 5407 | 	unsigned long flags; | 
 | 5408 |  | 
 | 5409 | 	/* | 
 | 5410 | 	* Only empty task groups can be destroyed; so we can speculatively | 
 | 5411 | 	* check on_list without danger of it being re-added. | 
 | 5412 | 	*/ | 
 | 5413 | 	if (!tg->cfs_rq[cpu]->on_list) | 
 | 5414 | 		return; | 
 | 5415 |  | 
 | 5416 | 	raw_spin_lock_irqsave(&rq->lock, flags); | 
 | 5417 | 	list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); | 
 | 5418 | 	raw_spin_unlock_irqrestore(&rq->lock, flags); | 
 | 5419 | } | 
 | 5420 |  | 
 | 5421 | void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, | 
 | 5422 | 			struct sched_entity *se, int cpu, | 
 | 5423 | 			struct sched_entity *parent) | 
 | 5424 | { | 
 | 5425 | 	struct rq *rq = cpu_rq(cpu); | 
 | 5426 |  | 
 | 5427 | 	cfs_rq->tg = tg; | 
 | 5428 | 	cfs_rq->rq = rq; | 
 | 5429 | #ifdef CONFIG_SMP | 
 | 5430 | 	/* allow initial update_cfs_load() to truncate */ | 
 | 5431 | 	cfs_rq->load_stamp = 1; | 
| Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 5432 | #endif | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5433 | 	init_cfs_rq_runtime(cfs_rq); | 
 | 5434 |  | 
 | 5435 | 	tg->cfs_rq[cpu] = cfs_rq; | 
 | 5436 | 	tg->se[cpu] = se; | 
 | 5437 |  | 
 | 5438 | 	/* se could be NULL for root_task_group */ | 
 | 5439 | 	if (!se) | 
 | 5440 | 		return; | 
 | 5441 |  | 
 | 5442 | 	if (!parent) | 
 | 5443 | 		se->cfs_rq = &rq->cfs; | 
 | 5444 | 	else | 
 | 5445 | 		se->cfs_rq = parent->my_q; | 
 | 5446 |  | 
 | 5447 | 	se->my_q = cfs_rq; | 
 | 5448 | 	update_load_set(&se->load, 0); | 
 | 5449 | 	se->parent = parent; | 
 | 5450 | } | 
 | 5451 |  | 
 | 5452 | static DEFINE_MUTEX(shares_mutex); | 
 | 5453 |  | 
 | 5454 | int sched_group_set_shares(struct task_group *tg, unsigned long shares) | 
 | 5455 | { | 
 | 5456 | 	int i; | 
 | 5457 | 	unsigned long flags; | 
 | 5458 |  | 
 | 5459 | 	/* | 
 | 5460 | 	 * We can't change the weight of the root cgroup. | 
 | 5461 | 	 */ | 
 | 5462 | 	if (!tg->se[0]) | 
 | 5463 | 		return -EINVAL; | 
 | 5464 |  | 
 | 5465 | 	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES)); | 
 | 5466 |  | 
 | 5467 | 	mutex_lock(&shares_mutex); | 
 | 5468 | 	if (tg->shares == shares) | 
 | 5469 | 		goto done; | 
 | 5470 |  | 
 | 5471 | 	tg->shares = shares; | 
 | 5472 | 	for_each_possible_cpu(i) { | 
 | 5473 | 		struct rq *rq = cpu_rq(i); | 
 | 5474 | 		struct sched_entity *se; | 
 | 5475 |  | 
 | 5476 | 		se = tg->se[i]; | 
 | 5477 | 		/* Propagate contribution to hierarchy */ | 
 | 5478 | 		raw_spin_lock_irqsave(&rq->lock, flags); | 
 | 5479 | 		for_each_sched_entity(se) | 
 | 5480 | 			update_cfs_shares(group_cfs_rq(se)); | 
 | 5481 | 		raw_spin_unlock_irqrestore(&rq->lock, flags); | 
 | 5482 | 	} | 
 | 5483 |  | 
 | 5484 | done: | 
 | 5485 | 	mutex_unlock(&shares_mutex); | 
 | 5486 | 	return 0; | 
 | 5487 | } | 
 | 5488 | #else /* CONFIG_FAIR_GROUP_SCHED */ | 
 | 5489 |  | 
 | 5490 | void free_fair_sched_group(struct task_group *tg) { } | 
 | 5491 |  | 
 | 5492 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | 
 | 5493 | { | 
 | 5494 | 	return 1; | 
 | 5495 | } | 
 | 5496 |  | 
 | 5497 | void unregister_fair_sched_group(struct task_group *tg, int cpu) { } | 
 | 5498 |  | 
 | 5499 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 
 | 5500 |  | 
| Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 5501 |  | 
| H Hartley Sweeten | 6d686f4 | 2010-01-13 20:21:52 -0700 | [diff] [blame] | 5502 | static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) | 
| Peter Williams | 0d721ce | 2009-09-21 01:31:53 +0000 | [diff] [blame] | 5503 | { | 
 | 5504 | 	struct sched_entity *se = &task->se; | 
| Peter Williams | 0d721ce | 2009-09-21 01:31:53 +0000 | [diff] [blame] | 5505 | 	unsigned int rr_interval = 0; | 
 | 5506 |  | 
 | 5507 | 	/* | 
 | 5508 | 	 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise | 
 | 5509 | 	 * idle runqueue: | 
 | 5510 | 	 */ | 
| Peter Williams | 0d721ce | 2009-09-21 01:31:53 +0000 | [diff] [blame] | 5511 | 	if (rq->cfs.load.weight) | 
 | 5512 | 		rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se)); | 
| Peter Williams | 0d721ce | 2009-09-21 01:31:53 +0000 | [diff] [blame] | 5513 |  | 
 | 5514 | 	return rr_interval; | 
 | 5515 | } | 
 | 5516 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5517 | /* | 
 | 5518 |  * All the scheduling class methods: | 
 | 5519 |  */ | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5520 | const struct sched_class fair_sched_class = { | 
| Ingo Molnar | 5522d5d | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 5521 | 	.next			= &idle_sched_class, | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5522 | 	.enqueue_task		= enqueue_task_fair, | 
 | 5523 | 	.dequeue_task		= dequeue_task_fair, | 
 | 5524 | 	.yield_task		= yield_task_fair, | 
| Mike Galbraith | d95f412 | 2011-02-01 09:50:51 -0500 | [diff] [blame] | 5525 | 	.yield_to_task		= yield_to_task_fair, | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5526 |  | 
| Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 5527 | 	.check_preempt_curr	= check_preempt_wakeup, | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5528 |  | 
 | 5529 | 	.pick_next_task		= pick_next_task_fair, | 
 | 5530 | 	.put_prev_task		= put_prev_task_fair, | 
 | 5531 |  | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 5532 | #ifdef CONFIG_SMP | 
| Li Zefan | 4ce72a2 | 2008-10-22 15:25:26 +0800 | [diff] [blame] | 5533 | 	.select_task_rq		= select_task_rq_fair, | 
 | 5534 |  | 
| Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 5535 | 	.rq_online		= rq_online_fair, | 
 | 5536 | 	.rq_offline		= rq_offline_fair, | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 5537 |  | 
 | 5538 | 	.task_waking		= task_waking_fair, | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 5539 | #endif | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5540 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 5541 | 	.set_curr_task          = set_curr_task_fair, | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5542 | 	.task_tick		= task_tick_fair, | 
| Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 5543 | 	.task_fork		= task_fork_fair, | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5544 |  | 
 | 5545 | 	.prio_changed		= prio_changed_fair, | 
| Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 5546 | 	.switched_from		= switched_from_fair, | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5547 | 	.switched_to		= switched_to_fair, | 
| Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 5548 |  | 
| Peter Williams | 0d721ce | 2009-09-21 01:31:53 +0000 | [diff] [blame] | 5549 | 	.get_rr_interval	= get_rr_interval_fair, | 
 | 5550 |  | 
| Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 5551 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| Peter Zijlstra | b2b5ce0 | 2010-10-15 15:24:15 +0200 | [diff] [blame] | 5552 | 	.task_move_group	= task_move_group_fair, | 
| Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 5553 | #endif | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5554 | }; | 
 | 5555 |  | 
 | 5556 | #ifdef CONFIG_SCHED_DEBUG | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5557 | void print_cfs_stats(struct seq_file *m, int cpu) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5558 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5559 | 	struct cfs_rq *cfs_rq; | 
 | 5560 |  | 
| Peter Zijlstra | 5973e5b | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 5561 | 	rcu_read_lock(); | 
| Ingo Molnar | c3b64f1 | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 5562 | 	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) | 
| Ingo Molnar | 5cef9ec | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 5563 | 		print_cfs_rq(m, cpu, cfs_rq); | 
| Peter Zijlstra | 5973e5b | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 5564 | 	rcu_read_unlock(); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5565 | } | 
 | 5566 | #endif | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5567 |  | 
 | 5568 | __init void init_sched_fair_class(void) | 
 | 5569 | { | 
 | 5570 | #ifdef CONFIG_SMP | 
 | 5571 | 	open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); | 
 | 5572 |  | 
 | 5573 | #ifdef CONFIG_NO_HZ | 
| Diwakar Tundlam | 554ceca | 2012-03-07 14:44:26 -0800 | [diff] [blame] | 5574 | 	nohz.next_balance = jiffies; | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5575 | 	zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); | 
| Suresh Siddha | 7132596 | 2012-01-19 18:28:57 -0800 | [diff] [blame] | 5576 | 	cpu_notifier(sched_ilb_notifier, 0); | 
| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5577 | #endif | 
 | 5578 | #endif /* SMP */ | 
 | 5579 |  | 
 | 5580 | } |