| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1 | /* | 
 | 2 |  * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH) | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 
 | 5 |  * | 
 | 6 |  *  Interactivity improvements by Mike Galbraith | 
 | 7 |  *  (C) 2007 Mike Galbraith <efault@gmx.de> | 
 | 8 |  * | 
 | 9 |  *  Various enhancements by Dmitry Adamushko. | 
 | 10 |  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com> | 
 | 11 |  * | 
 | 12 |  *  Group scheduling enhancements by Srivatsa Vaddagiri | 
 | 13 |  *  Copyright IBM Corporation, 2007 | 
 | 14 |  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> | 
 | 15 |  * | 
 | 16 |  *  Scaled math optimizations by Thomas Gleixner | 
 | 17 |  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 18 |  * | 
 | 19 |  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra | 
 | 20 |  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 21 |  */ | 
 | 22 |  | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 23 | #include <linux/latencytop.h> | 
 | 24 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 25 | /* | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 26 |  * Targeted preemption latency for CPU-bound tasks: | 
| Mike Galbraith | 172e082 | 2009-09-09 15:41:37 +0200 | [diff] [blame] | 27 |  * (default: 5ms * (1 + ilog(ncpus)), units: nanoseconds) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 28 |  * | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 29 |  * NOTE: this latency value is not the same as the concept of | 
| Ingo Molnar | d274a4c | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 30 |  * 'timeslice length' - timeslices in CFS are of variable length | 
 | 31 |  * and have no persistent notion like in traditional, time-slice | 
 | 32 |  * based scheduling concepts. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 33 |  * | 
| Ingo Molnar | d274a4c | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 34 |  * (to see the precise effective timeslice length of your workload, | 
 | 35 |  *  run vmstat and monitor the context-switches (cs) field) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 36 |  */ | 
| Mike Galbraith | 172e082 | 2009-09-09 15:41:37 +0200 | [diff] [blame] | 37 | unsigned int sysctl_sched_latency = 5000000ULL; | 
| Ingo Molnar | 2bd8e6d | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 38 |  | 
 | 39 | /* | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 40 |  * Minimal preemption granularity for CPU-bound tasks: | 
| Mike Galbraith | 172e082 | 2009-09-09 15:41:37 +0200 | [diff] [blame] | 41 |  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 42 |  */ | 
| Mike Galbraith | 172e082 | 2009-09-09 15:41:37 +0200 | [diff] [blame] | 43 | unsigned int sysctl_sched_min_granularity = 1000000ULL; | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 44 |  | 
 | 45 | /* | 
 | 46 |  * is kept at sysctl_sched_latency / sysctl_sched_min_granularity | 
 | 47 |  */ | 
| Zou Nan hai | 722aab0 | 2007-11-26 21:21:49 +0100 | [diff] [blame] | 48 | static unsigned int sched_nr_latency = 5; | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 49 |  | 
 | 50 | /* | 
| Mike Galbraith | 2bba22c | 2009-09-09 15:41:37 +0200 | [diff] [blame] | 51 |  * After fork, child runs first. If set to 0 (default) then | 
| Ingo Molnar | 2bd8e6d | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 52 |  * parent will (try to) run first. | 
 | 53 |  */ | 
| Mike Galbraith | 2bba22c | 2009-09-09 15:41:37 +0200 | [diff] [blame] | 54 | unsigned int sysctl_sched_child_runs_first __read_mostly; | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 55 |  | 
 | 56 | /* | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 57 |  * sys_sched_yield() compat mode | 
 | 58 |  * | 
 | 59 |  * This option switches the agressive yield implementation of the | 
 | 60 |  * old scheduler back on. | 
 | 61 |  */ | 
 | 62 | unsigned int __read_mostly sysctl_sched_compat_yield; | 
 | 63 |  | 
 | 64 | /* | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 65 |  * SCHED_OTHER wake-up granularity. | 
| Mike Galbraith | 172e082 | 2009-09-09 15:41:37 +0200 | [diff] [blame] | 66 |  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 67 |  * | 
 | 68 |  * This option delays the preemption effects of decoupled workloads | 
 | 69 |  * and reduces their over-scheduling. Synchronous workloads will still | 
 | 70 |  * have immediate wakeup/sleep latencies. | 
 | 71 |  */ | 
| Mike Galbraith | 172e082 | 2009-09-09 15:41:37 +0200 | [diff] [blame] | 72 | unsigned int sysctl_sched_wakeup_granularity = 1000000UL; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 73 |  | 
| Ingo Molnar | da84d96 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 74 | const_debug unsigned int sysctl_sched_migration_cost = 500000UL; | 
 | 75 |  | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 76 | static const struct sched_class fair_sched_class; | 
 | 77 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 78 | /************************************************************** | 
 | 79 |  * CFS operations on generic schedulable entities: | 
 | 80 |  */ | 
 | 81 |  | 
 | 82 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
 | 83 |  | 
 | 84 | /* cpu runqueue to which this cfs_rq is attached */ | 
 | 85 | static inline struct rq *rq_of(struct cfs_rq *cfs_rq) | 
 | 86 | { | 
 | 87 | 	return cfs_rq->rq; | 
 | 88 | } | 
 | 89 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 90 | /* An entity is a task if it doesn't "own" a runqueue */ | 
 | 91 | #define entity_is_task(se)	(!se->my_q) | 
 | 92 |  | 
| Peter Zijlstra | 8f48894 | 2009-07-24 12:25:30 +0200 | [diff] [blame] | 93 | static inline struct task_struct *task_of(struct sched_entity *se) | 
 | 94 | { | 
 | 95 | #ifdef CONFIG_SCHED_DEBUG | 
 | 96 | 	WARN_ON_ONCE(!entity_is_task(se)); | 
 | 97 | #endif | 
 | 98 | 	return container_of(se, struct task_struct, se); | 
 | 99 | } | 
 | 100 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 101 | /* Walk up scheduling entities hierarchy */ | 
 | 102 | #define for_each_sched_entity(se) \ | 
 | 103 | 		for (; se; se = se->parent) | 
 | 104 |  | 
 | 105 | static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) | 
 | 106 | { | 
 | 107 | 	return p->se.cfs_rq; | 
 | 108 | } | 
 | 109 |  | 
 | 110 | /* runqueue on which this entity is (to be) queued */ | 
 | 111 | static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) | 
 | 112 | { | 
 | 113 | 	return se->cfs_rq; | 
 | 114 | } | 
 | 115 |  | 
 | 116 | /* runqueue "owned" by this group */ | 
 | 117 | static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) | 
 | 118 | { | 
 | 119 | 	return grp->my_q; | 
 | 120 | } | 
 | 121 |  | 
 | 122 | /* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on | 
 | 123 |  * another cpu ('this_cpu') | 
 | 124 |  */ | 
 | 125 | static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) | 
 | 126 | { | 
 | 127 | 	return cfs_rq->tg->cfs_rq[this_cpu]; | 
 | 128 | } | 
 | 129 |  | 
 | 130 | /* Iterate thr' all leaf cfs_rq's on a runqueue */ | 
 | 131 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ | 
 | 132 | 	list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) | 
 | 133 |  | 
 | 134 | /* Do the two (enqueued) entities belong to the same group ? */ | 
 | 135 | static inline int | 
 | 136 | is_same_group(struct sched_entity *se, struct sched_entity *pse) | 
 | 137 | { | 
 | 138 | 	if (se->cfs_rq == pse->cfs_rq) | 
 | 139 | 		return 1; | 
 | 140 |  | 
 | 141 | 	return 0; | 
 | 142 | } | 
 | 143 |  | 
 | 144 | static inline struct sched_entity *parent_entity(struct sched_entity *se) | 
 | 145 | { | 
 | 146 | 	return se->parent; | 
 | 147 | } | 
 | 148 |  | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 149 | /* return depth at which a sched entity is present in the hierarchy */ | 
 | 150 | static inline int depth_se(struct sched_entity *se) | 
 | 151 | { | 
 | 152 | 	int depth = 0; | 
 | 153 |  | 
 | 154 | 	for_each_sched_entity(se) | 
 | 155 | 		depth++; | 
 | 156 |  | 
 | 157 | 	return depth; | 
 | 158 | } | 
 | 159 |  | 
 | 160 | static void | 
 | 161 | find_matching_se(struct sched_entity **se, struct sched_entity **pse) | 
 | 162 | { | 
 | 163 | 	int se_depth, pse_depth; | 
 | 164 |  | 
 | 165 | 	/* | 
 | 166 | 	 * preemption test can be made between sibling entities who are in the | 
 | 167 | 	 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of | 
 | 168 | 	 * both tasks until we find their ancestors who are siblings of common | 
 | 169 | 	 * parent. | 
 | 170 | 	 */ | 
 | 171 |  | 
 | 172 | 	/* First walk up until both entities are at same depth */ | 
 | 173 | 	se_depth = depth_se(*se); | 
 | 174 | 	pse_depth = depth_se(*pse); | 
 | 175 |  | 
 | 176 | 	while (se_depth > pse_depth) { | 
 | 177 | 		se_depth--; | 
 | 178 | 		*se = parent_entity(*se); | 
 | 179 | 	} | 
 | 180 |  | 
 | 181 | 	while (pse_depth > se_depth) { | 
 | 182 | 		pse_depth--; | 
 | 183 | 		*pse = parent_entity(*pse); | 
 | 184 | 	} | 
 | 185 |  | 
 | 186 | 	while (!is_same_group(*se, *pse)) { | 
 | 187 | 		*se = parent_entity(*se); | 
 | 188 | 		*pse = parent_entity(*pse); | 
 | 189 | 	} | 
 | 190 | } | 
 | 191 |  | 
| Peter Zijlstra | 8f48894 | 2009-07-24 12:25:30 +0200 | [diff] [blame] | 192 | #else	/* !CONFIG_FAIR_GROUP_SCHED */ | 
 | 193 |  | 
 | 194 | static inline struct task_struct *task_of(struct sched_entity *se) | 
 | 195 | { | 
 | 196 | 	return container_of(se, struct task_struct, se); | 
 | 197 | } | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 198 |  | 
 | 199 | static inline struct rq *rq_of(struct cfs_rq *cfs_rq) | 
 | 200 | { | 
 | 201 | 	return container_of(cfs_rq, struct rq, cfs); | 
 | 202 | } | 
 | 203 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 204 | #define entity_is_task(se)	1 | 
 | 205 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 206 | #define for_each_sched_entity(se) \ | 
 | 207 | 		for (; se; se = NULL) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 208 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 209 | static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 210 | { | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 211 | 	return &task_rq(p)->cfs; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 212 | } | 
 | 213 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 214 | static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) | 
 | 215 | { | 
 | 216 | 	struct task_struct *p = task_of(se); | 
 | 217 | 	struct rq *rq = task_rq(p); | 
 | 218 |  | 
 | 219 | 	return &rq->cfs; | 
 | 220 | } | 
 | 221 |  | 
 | 222 | /* runqueue "owned" by this group */ | 
 | 223 | static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) | 
 | 224 | { | 
 | 225 | 	return NULL; | 
 | 226 | } | 
 | 227 |  | 
 | 228 | static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) | 
 | 229 | { | 
 | 230 | 	return &cpu_rq(this_cpu)->cfs; | 
 | 231 | } | 
 | 232 |  | 
 | 233 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ | 
 | 234 | 		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) | 
 | 235 |  | 
 | 236 | static inline int | 
 | 237 | is_same_group(struct sched_entity *se, struct sched_entity *pse) | 
 | 238 | { | 
 | 239 | 	return 1; | 
 | 240 | } | 
 | 241 |  | 
 | 242 | static inline struct sched_entity *parent_entity(struct sched_entity *se) | 
 | 243 | { | 
 | 244 | 	return NULL; | 
 | 245 | } | 
 | 246 |  | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 247 | static inline void | 
 | 248 | find_matching_se(struct sched_entity **se, struct sched_entity **pse) | 
 | 249 | { | 
 | 250 | } | 
 | 251 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 252 | #endif	/* CONFIG_FAIR_GROUP_SCHED */ | 
 | 253 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 254 |  | 
 | 255 | /************************************************************** | 
 | 256 |  * Scheduling class tree data structure manipulation methods: | 
 | 257 |  */ | 
 | 258 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 259 | static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime) | 
| Peter Zijlstra | 02e0431 | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 260 | { | 
| Peter Zijlstra | 368059a | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 261 | 	s64 delta = (s64)(vruntime - min_vruntime); | 
 | 262 | 	if (delta > 0) | 
| Peter Zijlstra | 02e0431 | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 263 | 		min_vruntime = vruntime; | 
 | 264 |  | 
 | 265 | 	return min_vruntime; | 
 | 266 | } | 
 | 267 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 268 | static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) | 
| Peter Zijlstra | b0ffd24 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 269 | { | 
 | 270 | 	s64 delta = (s64)(vruntime - min_vruntime); | 
 | 271 | 	if (delta < 0) | 
 | 272 | 		min_vruntime = vruntime; | 
 | 273 |  | 
 | 274 | 	return min_vruntime; | 
 | 275 | } | 
 | 276 |  | 
| Fabio Checconi | 54fdc58 | 2009-07-16 12:32:27 +0200 | [diff] [blame] | 277 | static inline int entity_before(struct sched_entity *a, | 
 | 278 | 				struct sched_entity *b) | 
 | 279 | { | 
 | 280 | 	return (s64)(a->vruntime - b->vruntime) < 0; | 
 | 281 | } | 
 | 282 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 283 | static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Peter Zijlstra | 9014623 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 284 | { | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 285 | 	return se->vruntime - cfs_rq->min_vruntime; | 
| Peter Zijlstra | 9014623 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 286 | } | 
 | 287 |  | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 288 | static void update_min_vruntime(struct cfs_rq *cfs_rq) | 
 | 289 | { | 
 | 290 | 	u64 vruntime = cfs_rq->min_vruntime; | 
 | 291 |  | 
 | 292 | 	if (cfs_rq->curr) | 
 | 293 | 		vruntime = cfs_rq->curr->vruntime; | 
 | 294 |  | 
 | 295 | 	if (cfs_rq->rb_leftmost) { | 
 | 296 | 		struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost, | 
 | 297 | 						   struct sched_entity, | 
 | 298 | 						   run_node); | 
 | 299 |  | 
| Peter Zijlstra | e17036d | 2009-01-15 14:53:39 +0100 | [diff] [blame] | 300 | 		if (!cfs_rq->curr) | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 301 | 			vruntime = se->vruntime; | 
 | 302 | 		else | 
 | 303 | 			vruntime = min_vruntime(vruntime, se->vruntime); | 
 | 304 | 	} | 
 | 305 |  | 
 | 306 | 	cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); | 
 | 307 | } | 
 | 308 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 309 | /* | 
 | 310 |  * Enqueue an entity into the rb-tree: | 
 | 311 |  */ | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 312 | static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 313 | { | 
 | 314 | 	struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; | 
 | 315 | 	struct rb_node *parent = NULL; | 
 | 316 | 	struct sched_entity *entry; | 
| Peter Zijlstra | 9014623 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 317 | 	s64 key = entity_key(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 318 | 	int leftmost = 1; | 
 | 319 |  | 
 | 320 | 	/* | 
 | 321 | 	 * Find the right place in the rbtree: | 
 | 322 | 	 */ | 
 | 323 | 	while (*link) { | 
 | 324 | 		parent = *link; | 
 | 325 | 		entry = rb_entry(parent, struct sched_entity, run_node); | 
 | 326 | 		/* | 
 | 327 | 		 * We dont care about collisions. Nodes with | 
 | 328 | 		 * the same key stay together. | 
 | 329 | 		 */ | 
| Peter Zijlstra | 9014623 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 330 | 		if (key < entity_key(cfs_rq, entry)) { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 331 | 			link = &parent->rb_left; | 
 | 332 | 		} else { | 
 | 333 | 			link = &parent->rb_right; | 
 | 334 | 			leftmost = 0; | 
 | 335 | 		} | 
 | 336 | 	} | 
 | 337 |  | 
 | 338 | 	/* | 
 | 339 | 	 * Maintain a cache of leftmost tree entries (it is frequently | 
 | 340 | 	 * used): | 
 | 341 | 	 */ | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 342 | 	if (leftmost) | 
| Ingo Molnar | 57cb499 | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 343 | 		cfs_rq->rb_leftmost = &se->run_node; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 344 |  | 
 | 345 | 	rb_link_node(&se->run_node, parent, link); | 
 | 346 | 	rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 347 | } | 
 | 348 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 349 | static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 350 | { | 
| Peter Zijlstra | 3fe6974 | 2008-03-14 20:55:51 +0100 | [diff] [blame] | 351 | 	if (cfs_rq->rb_leftmost == &se->run_node) { | 
 | 352 | 		struct rb_node *next_node; | 
| Peter Zijlstra | 3fe6974 | 2008-03-14 20:55:51 +0100 | [diff] [blame] | 353 |  | 
 | 354 | 		next_node = rb_next(&se->run_node); | 
 | 355 | 		cfs_rq->rb_leftmost = next_node; | 
| Peter Zijlstra | 3fe6974 | 2008-03-14 20:55:51 +0100 | [diff] [blame] | 356 | 	} | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 357 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 358 | 	rb_erase(&se->run_node, &cfs_rq->tasks_timeline); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 359 | } | 
 | 360 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 361 | static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq) | 
 | 362 | { | 
| Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 363 | 	struct rb_node *left = cfs_rq->rb_leftmost; | 
 | 364 |  | 
 | 365 | 	if (!left) | 
 | 366 | 		return NULL; | 
 | 367 |  | 
 | 368 | 	return rb_entry(left, struct sched_entity, run_node); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 369 | } | 
 | 370 |  | 
| Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 371 | static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 372 | { | 
| Ingo Molnar | 7eee3e6 | 2008-02-22 10:32:21 +0100 | [diff] [blame] | 373 | 	struct rb_node *last = rb_last(&cfs_rq->tasks_timeline); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 374 |  | 
| Balbir Singh | 70eee74 | 2008-02-22 13:25:53 +0530 | [diff] [blame] | 375 | 	if (!last) | 
 | 376 | 		return NULL; | 
| Ingo Molnar | 7eee3e6 | 2008-02-22 10:32:21 +0100 | [diff] [blame] | 377 |  | 
 | 378 | 	return rb_entry(last, struct sched_entity, run_node); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 379 | } | 
 | 380 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 381 | /************************************************************** | 
 | 382 |  * Scheduling class statistics methods: | 
 | 383 |  */ | 
 | 384 |  | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 385 | #ifdef CONFIG_SCHED_DEBUG | 
 | 386 | int sched_nr_latency_handler(struct ctl_table *table, int write, | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 387 | 		void __user *buffer, size_t *lenp, | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 388 | 		loff_t *ppos) | 
 | 389 | { | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 390 | 	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 391 |  | 
 | 392 | 	if (ret || !write) | 
 | 393 | 		return ret; | 
 | 394 |  | 
 | 395 | 	sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, | 
 | 396 | 					sysctl_sched_min_granularity); | 
 | 397 |  | 
 | 398 | 	return 0; | 
 | 399 | } | 
 | 400 | #endif | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 401 |  | 
 | 402 | /* | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 403 |  * delta /= w | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 404 |  */ | 
 | 405 | static inline unsigned long | 
 | 406 | calc_delta_fair(unsigned long delta, struct sched_entity *se) | 
 | 407 | { | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 408 | 	if (unlikely(se->load.weight != NICE_0_LOAD)) | 
 | 409 | 		delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load); | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 410 |  | 
 | 411 | 	return delta; | 
 | 412 | } | 
 | 413 |  | 
 | 414 | /* | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 415 |  * The idea is to set a period in which each task runs once. | 
 | 416 |  * | 
 | 417 |  * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch | 
 | 418 |  * this period because otherwise the slices get too small. | 
 | 419 |  * | 
 | 420 |  * p = (nr <= nl) ? l : l*nr/nl | 
 | 421 |  */ | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 422 | static u64 __sched_period(unsigned long nr_running) | 
 | 423 | { | 
 | 424 | 	u64 period = sysctl_sched_latency; | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 425 | 	unsigned long nr_latency = sched_nr_latency; | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 426 |  | 
 | 427 | 	if (unlikely(nr_running > nr_latency)) { | 
| Peter Zijlstra | 4bf0b77 | 2008-01-25 21:08:21 +0100 | [diff] [blame] | 428 | 		period = sysctl_sched_min_granularity; | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 429 | 		period *= nr_running; | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 430 | 	} | 
 | 431 |  | 
 | 432 | 	return period; | 
 | 433 | } | 
 | 434 |  | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 435 | /* | 
 | 436 |  * We calculate the wall-time slice from the period by taking a part | 
 | 437 |  * proportional to the weight. | 
 | 438 |  * | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 439 |  * s = p*P[w/rw] | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 440 |  */ | 
| Peter Zijlstra | 6d0f0eb | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 441 | static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 442 | { | 
| Mike Galbraith | 0a58244 | 2009-01-02 12:16:42 +0100 | [diff] [blame] | 443 | 	u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 444 |  | 
| Mike Galbraith | 0a58244 | 2009-01-02 12:16:42 +0100 | [diff] [blame] | 445 | 	for_each_sched_entity(se) { | 
| Lin Ming | 6272d68 | 2009-01-15 17:17:15 +0100 | [diff] [blame] | 446 | 		struct load_weight *load; | 
| Christian Engelmayer | 3104bf0 | 2009-06-16 10:35:12 +0200 | [diff] [blame] | 447 | 		struct load_weight lw; | 
| Lin Ming | 6272d68 | 2009-01-15 17:17:15 +0100 | [diff] [blame] | 448 |  | 
 | 449 | 		cfs_rq = cfs_rq_of(se); | 
 | 450 | 		load = &cfs_rq->load; | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 451 |  | 
| Mike Galbraith | 0a58244 | 2009-01-02 12:16:42 +0100 | [diff] [blame] | 452 | 		if (unlikely(!se->on_rq)) { | 
| Christian Engelmayer | 3104bf0 | 2009-06-16 10:35:12 +0200 | [diff] [blame] | 453 | 			lw = cfs_rq->load; | 
| Mike Galbraith | 0a58244 | 2009-01-02 12:16:42 +0100 | [diff] [blame] | 454 |  | 
 | 455 | 			update_load_add(&lw, se->load.weight); | 
 | 456 | 			load = &lw; | 
 | 457 | 		} | 
 | 458 | 		slice = calc_delta_mine(slice, se->load.weight, load); | 
 | 459 | 	} | 
 | 460 | 	return slice; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 461 | } | 
 | 462 |  | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 463 | /* | 
| Peter Zijlstra | ac884de | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 464 |  * We calculate the vruntime slice of a to be inserted task | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 465 |  * | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 466 |  * vs = s/w | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 467 |  */ | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 468 | static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 469 | { | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 470 | 	return calc_delta_fair(sched_slice(cfs_rq, se), se); | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 471 | } | 
 | 472 |  | 
 | 473 | /* | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 474 |  * Update the current task's runtime statistics. Skip current tasks that | 
 | 475 |  * are not in our scheduling class. | 
 | 476 |  */ | 
 | 477 | static inline void | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 478 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | 
 | 479 | 	      unsigned long delta_exec) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 480 | { | 
| Ingo Molnar | bbdba7c | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 481 | 	unsigned long delta_exec_weighted; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 482 |  | 
| Ingo Molnar | 8179ca23 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 483 | 	schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 484 |  | 
 | 485 | 	curr->sum_exec_runtime += delta_exec; | 
| Ingo Molnar | 7a62eab | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 486 | 	schedstat_add(cfs_rq, exec_clock, delta_exec); | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 487 | 	delta_exec_weighted = calc_delta_fair(delta_exec, curr); | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 488 | 	curr->vruntime += delta_exec_weighted; | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 489 | 	update_min_vruntime(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 490 | } | 
 | 491 |  | 
| Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 492 | static void update_curr(struct cfs_rq *cfs_rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 493 | { | 
| Ingo Molnar | 429d43b | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 494 | 	struct sched_entity *curr = cfs_rq->curr; | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 495 | 	u64 now = rq_of(cfs_rq)->clock; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 496 | 	unsigned long delta_exec; | 
 | 497 |  | 
 | 498 | 	if (unlikely(!curr)) | 
 | 499 | 		return; | 
 | 500 |  | 
 | 501 | 	/* | 
 | 502 | 	 * Get the amount of time the current task was running | 
 | 503 | 	 * since the last time we changed load (this cannot | 
 | 504 | 	 * overflow on 32 bits): | 
 | 505 | 	 */ | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 506 | 	delta_exec = (unsigned long)(now - curr->exec_start); | 
| Peter Zijlstra | 34f28ec | 2008-12-16 08:45:31 +0100 | [diff] [blame] | 507 | 	if (!delta_exec) | 
 | 508 | 		return; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 509 |  | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 510 | 	__update_curr(cfs_rq, curr, delta_exec); | 
 | 511 | 	curr->exec_start = now; | 
| Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 512 |  | 
 | 513 | 	if (entity_is_task(curr)) { | 
 | 514 | 		struct task_struct *curtask = task_of(curr); | 
 | 515 |  | 
| Ingo Molnar | f977bb4 | 2009-09-13 18:15:54 +0200 | [diff] [blame] | 516 | 		trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); | 
| Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 517 | 		cpuacct_charge(curtask, delta_exec); | 
| Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 518 | 		account_group_exec_runtime(curtask, delta_exec); | 
| Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 519 | 	} | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 520 | } | 
 | 521 |  | 
 | 522 | static inline void | 
| Ingo Molnar | 5870db5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 523 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 524 | { | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 525 | 	schedstat_set(se->wait_start, rq_of(cfs_rq)->clock); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 526 | } | 
 | 527 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 528 | /* | 
 | 529 |  * Task is being enqueued - update stats: | 
 | 530 |  */ | 
| Ingo Molnar | d2417e5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 531 | static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 532 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 533 | 	/* | 
 | 534 | 	 * Are we enqueueing a waiting task? (for current tasks | 
 | 535 | 	 * a dequeue/enqueue event is a NOP) | 
 | 536 | 	 */ | 
| Ingo Molnar | 429d43b | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 537 | 	if (se != cfs_rq->curr) | 
| Ingo Molnar | 5870db5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 538 | 		update_stats_wait_start(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 539 | } | 
 | 540 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 541 | static void | 
| Ingo Molnar | 9ef0a96 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 542 | update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 543 | { | 
| Ingo Molnar | bbdba7c | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 544 | 	schedstat_set(se->wait_max, max(se->wait_max, | 
 | 545 | 			rq_of(cfs_rq)->clock - se->wait_start)); | 
| Arjan van de Ven | 6d08259 | 2008-01-25 21:08:35 +0100 | [diff] [blame] | 546 | 	schedstat_set(se->wait_count, se->wait_count + 1); | 
 | 547 | 	schedstat_set(se->wait_sum, se->wait_sum + | 
 | 548 | 			rq_of(cfs_rq)->clock - se->wait_start); | 
| Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 549 | #ifdef CONFIG_SCHEDSTATS | 
 | 550 | 	if (entity_is_task(se)) { | 
 | 551 | 		trace_sched_stat_wait(task_of(se), | 
 | 552 | 			rq_of(cfs_rq)->clock - se->wait_start); | 
 | 553 | 	} | 
 | 554 | #endif | 
| Ingo Molnar | e1f8450 | 2009-09-10 20:52:09 +0200 | [diff] [blame] | 555 | 	schedstat_set(se->wait_start, 0); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 556 | } | 
 | 557 |  | 
 | 558 | static inline void | 
| Ingo Molnar | 19b6a2e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 559 | update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 560 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 561 | 	/* | 
 | 562 | 	 * Mark the end of the wait period if dequeueing a | 
 | 563 | 	 * waiting task: | 
 | 564 | 	 */ | 
| Ingo Molnar | 429d43b | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 565 | 	if (se != cfs_rq->curr) | 
| Ingo Molnar | 9ef0a96 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 566 | 		update_stats_wait_end(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 567 | } | 
 | 568 |  | 
 | 569 | /* | 
 | 570 |  * We are picking a new current task - update its stats: | 
 | 571 |  */ | 
 | 572 | static inline void | 
| Ingo Molnar | 79303e9 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 573 | update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 574 | { | 
 | 575 | 	/* | 
 | 576 | 	 * We are starting a new run period: | 
 | 577 | 	 */ | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 578 | 	se->exec_start = rq_of(cfs_rq)->clock; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 579 | } | 
 | 580 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 581 | /************************************************** | 
 | 582 |  * Scheduling class queueing methods: | 
 | 583 |  */ | 
 | 584 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 585 | #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED | 
 | 586 | static void | 
 | 587 | add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight) | 
 | 588 | { | 
 | 589 | 	cfs_rq->task_weight += weight; | 
 | 590 | } | 
 | 591 | #else | 
 | 592 | static inline void | 
 | 593 | add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight) | 
 | 594 | { | 
 | 595 | } | 
 | 596 | #endif | 
 | 597 |  | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 598 | static void | 
 | 599 | account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
 | 600 | { | 
 | 601 | 	update_load_add(&cfs_rq->load, se->load.weight); | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 602 | 	if (!parent_entity(se)) | 
 | 603 | 		inc_cpu_load(rq_of(cfs_rq), se->load.weight); | 
| Bharata B Rao | b87f172 | 2008-09-25 09:53:54 +0530 | [diff] [blame] | 604 | 	if (entity_is_task(se)) { | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 605 | 		add_cfs_task_weight(cfs_rq, se->load.weight); | 
| Bharata B Rao | b87f172 | 2008-09-25 09:53:54 +0530 | [diff] [blame] | 606 | 		list_add(&se->group_node, &cfs_rq->tasks); | 
 | 607 | 	} | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 608 | 	cfs_rq->nr_running++; | 
 | 609 | 	se->on_rq = 1; | 
 | 610 | } | 
 | 611 |  | 
 | 612 | static void | 
 | 613 | account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
 | 614 | { | 
 | 615 | 	update_load_sub(&cfs_rq->load, se->load.weight); | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 616 | 	if (!parent_entity(se)) | 
 | 617 | 		dec_cpu_load(rq_of(cfs_rq), se->load.weight); | 
| Bharata B Rao | b87f172 | 2008-09-25 09:53:54 +0530 | [diff] [blame] | 618 | 	if (entity_is_task(se)) { | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 619 | 		add_cfs_task_weight(cfs_rq, -se->load.weight); | 
| Bharata B Rao | b87f172 | 2008-09-25 09:53:54 +0530 | [diff] [blame] | 620 | 		list_del_init(&se->group_node); | 
 | 621 | 	} | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 622 | 	cfs_rq->nr_running--; | 
 | 623 | 	se->on_rq = 0; | 
 | 624 | } | 
 | 625 |  | 
| Ingo Molnar | 2396af6 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 626 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 627 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 628 | #ifdef CONFIG_SCHEDSTATS | 
| Peter Zijlstra | e414314 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 629 | 	struct task_struct *tsk = NULL; | 
 | 630 |  | 
 | 631 | 	if (entity_is_task(se)) | 
 | 632 | 		tsk = task_of(se); | 
 | 633 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 634 | 	if (se->sleep_start) { | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 635 | 		u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 636 |  | 
 | 637 | 		if ((s64)delta < 0) | 
 | 638 | 			delta = 0; | 
 | 639 |  | 
 | 640 | 		if (unlikely(delta > se->sleep_max)) | 
 | 641 | 			se->sleep_max = delta; | 
 | 642 |  | 
 | 643 | 		se->sleep_start = 0; | 
 | 644 | 		se->sum_sleep_runtime += delta; | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 645 |  | 
| Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 646 | 		if (tsk) { | 
| Peter Zijlstra | e414314 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 647 | 			account_scheduler_latency(tsk, delta >> 10, 1); | 
| Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 648 | 			trace_sched_stat_sleep(tsk, delta); | 
 | 649 | 		} | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 650 | 	} | 
 | 651 | 	if (se->block_start) { | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 652 | 		u64 delta = rq_of(cfs_rq)->clock - se->block_start; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 653 |  | 
 | 654 | 		if ((s64)delta < 0) | 
 | 655 | 			delta = 0; | 
 | 656 |  | 
 | 657 | 		if (unlikely(delta > se->block_max)) | 
 | 658 | 			se->block_max = delta; | 
 | 659 |  | 
 | 660 | 		se->block_start = 0; | 
 | 661 | 		se->sum_sleep_runtime += delta; | 
| Ingo Molnar | 30084fb | 2007-10-02 14:13:08 +0200 | [diff] [blame] | 662 |  | 
| Peter Zijlstra | e414314 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 663 | 		if (tsk) { | 
| Arjan van de Ven | 8f0dfc3 | 2009-07-20 11:26:58 -0700 | [diff] [blame] | 664 | 			if (tsk->in_iowait) { | 
 | 665 | 				se->iowait_sum += delta; | 
 | 666 | 				se->iowait_count++; | 
| Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 667 | 				trace_sched_stat_iowait(tsk, delta); | 
| Arjan van de Ven | 8f0dfc3 | 2009-07-20 11:26:58 -0700 | [diff] [blame] | 668 | 			} | 
 | 669 |  | 
| Peter Zijlstra | e414314 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 670 | 			/* | 
 | 671 | 			 * Blocking time is in units of nanosecs, so shift by | 
 | 672 | 			 * 20 to get a milliseconds-range estimation of the | 
 | 673 | 			 * amount of time that the task spent sleeping: | 
 | 674 | 			 */ | 
 | 675 | 			if (unlikely(prof_on == SLEEP_PROFILING)) { | 
 | 676 | 				profile_hits(SLEEP_PROFILING, | 
 | 677 | 						(void *)get_wchan(tsk), | 
 | 678 | 						delta >> 20); | 
 | 679 | 			} | 
 | 680 | 			account_scheduler_latency(tsk, delta >> 10, 0); | 
| Ingo Molnar | 30084fb | 2007-10-02 14:13:08 +0200 | [diff] [blame] | 681 | 		} | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 682 | 	} | 
 | 683 | #endif | 
 | 684 | } | 
 | 685 |  | 
| Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 686 | static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
 | 687 | { | 
 | 688 | #ifdef CONFIG_SCHED_DEBUG | 
 | 689 | 	s64 d = se->vruntime - cfs_rq->min_vruntime; | 
 | 690 |  | 
 | 691 | 	if (d < 0) | 
 | 692 | 		d = -d; | 
 | 693 |  | 
 | 694 | 	if (d > 3*sysctl_sched_latency) | 
 | 695 | 		schedstat_inc(cfs_rq, nr_spread_over); | 
 | 696 | #endif | 
 | 697 | } | 
 | 698 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 699 | static void | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 700 | place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | 
 | 701 | { | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 702 | 	u64 vruntime = cfs_rq->min_vruntime; | 
| Peter Zijlstra | 94dfb5e | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 703 |  | 
| Peter Zijlstra | 2cb8600 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 704 | 	/* | 
 | 705 | 	 * The 'current' period is already promised to the current tasks, | 
 | 706 | 	 * however the extra weight of the new task will slow them down a | 
 | 707 | 	 * little, place the new task so that it fits in the slot that | 
 | 708 | 	 * stays open at the end. | 
 | 709 | 	 */ | 
| Peter Zijlstra | 94dfb5e | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 710 | 	if (initial && sched_feat(START_DEBIT)) | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 711 | 		vruntime += sched_vslice(cfs_rq, se); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 712 |  | 
| Mike Galbraith | a2e7a7e | 2009-09-18 09:19:25 +0200 | [diff] [blame] | 713 | 	/* sleeps up to a single latency don't count. */ | 
 | 714 | 	if (!initial && sched_feat(FAIR_SLEEPERS)) { | 
 | 715 | 		unsigned long thresh = sysctl_sched_latency; | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 716 |  | 
| Mike Galbraith | a2e7a7e | 2009-09-18 09:19:25 +0200 | [diff] [blame] | 717 | 		/* | 
 | 718 | 		 * Convert the sleeper threshold into virtual time. | 
 | 719 | 		 * SCHED_IDLE is a special sub-class.  We care about | 
 | 720 | 		 * fairness only relative to other SCHED_IDLE tasks, | 
 | 721 | 		 * all of which have the same weight. | 
 | 722 | 		 */ | 
 | 723 | 		if (sched_feat(NORMALIZED_SLEEPER) && (!entity_is_task(se) || | 
 | 724 | 				 task_of(se)->policy != SCHED_IDLE)) | 
 | 725 | 			thresh = calc_delta_fair(thresh, se); | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 726 |  | 
| Mike Galbraith | a2e7a7e | 2009-09-18 09:19:25 +0200 | [diff] [blame] | 727 | 		/* | 
 | 728 | 		 * Halve their sleep time's effect, to allow | 
 | 729 | 		 * for a gentler effect of sleepers: | 
 | 730 | 		 */ | 
 | 731 | 		if (sched_feat(GENTLE_FAIR_SLEEPERS)) | 
 | 732 | 			thresh >>= 1; | 
| Ingo Molnar | 51e0304 | 2009-09-16 08:54:45 +0200 | [diff] [blame] | 733 |  | 
| Mike Galbraith | a2e7a7e | 2009-09-18 09:19:25 +0200 | [diff] [blame] | 734 | 		vruntime -= thresh; | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 735 | 	} | 
 | 736 |  | 
| Mike Galbraith | b5d9d73 | 2009-09-08 11:12:28 +0200 | [diff] [blame] | 737 | 	/* ensure we never gain time by being placed backwards. */ | 
 | 738 | 	vruntime = max_vruntime(se->vruntime, vruntime); | 
 | 739 |  | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 740 | 	se->vruntime = vruntime; | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 741 | } | 
 | 742 |  | 
 | 743 | static void | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 744 | enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 745 | { | 
 | 746 | 	/* | 
| Dmitry Adamushko | a2a2d68 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 747 | 	 * Update run-time statistics of the 'current'. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 748 | 	 */ | 
| Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 749 | 	update_curr(cfs_rq); | 
| Peter Zijlstra | a992241 | 2008-05-05 23:56:17 +0200 | [diff] [blame] | 750 | 	account_entity_enqueue(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 751 |  | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 752 | 	if (wakeup) { | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 753 | 		place_entity(cfs_rq, se, 0); | 
| Ingo Molnar | 2396af6 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 754 | 		enqueue_sleeper(cfs_rq, se); | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 755 | 	} | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 756 |  | 
| Ingo Molnar | d2417e5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 757 | 	update_stats_enqueue(cfs_rq, se); | 
| Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 758 | 	check_spread(cfs_rq, se); | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 759 | 	if (se != cfs_rq->curr) | 
 | 760 | 		__enqueue_entity(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 761 | } | 
 | 762 |  | 
| Peter Zijlstra | a571bbe | 2009-01-28 14:51:40 +0100 | [diff] [blame] | 763 | static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Peter Zijlstra | 2002c69 | 2008-11-11 11:52:33 +0100 | [diff] [blame] | 764 | { | 
| Peter Zijlstra | de69a80 | 2009-09-17 09:01:20 +0200 | [diff] [blame] | 765 | 	if (!se || cfs_rq->last == se) | 
| Peter Zijlstra | 2002c69 | 2008-11-11 11:52:33 +0100 | [diff] [blame] | 766 | 		cfs_rq->last = NULL; | 
 | 767 |  | 
| Peter Zijlstra | de69a80 | 2009-09-17 09:01:20 +0200 | [diff] [blame] | 768 | 	if (!se || cfs_rq->next == se) | 
| Peter Zijlstra | 2002c69 | 2008-11-11 11:52:33 +0100 | [diff] [blame] | 769 | 		cfs_rq->next = NULL; | 
 | 770 | } | 
 | 771 |  | 
| Peter Zijlstra | a571bbe | 2009-01-28 14:51:40 +0100 | [diff] [blame] | 772 | static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
 | 773 | { | 
 | 774 | 	for_each_sched_entity(se) | 
 | 775 | 		__clear_buddies(cfs_rq_of(se), se); | 
 | 776 | } | 
 | 777 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 778 | static void | 
| Ingo Molnar | 525c271 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 779 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 780 | { | 
| Dmitry Adamushko | a2a2d68 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 781 | 	/* | 
 | 782 | 	 * Update run-time statistics of the 'current'. | 
 | 783 | 	 */ | 
 | 784 | 	update_curr(cfs_rq); | 
 | 785 |  | 
| Ingo Molnar | 19b6a2e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 786 | 	update_stats_dequeue(cfs_rq, se); | 
| Dmitry Adamushko | db36cc7 | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 787 | 	if (sleep) { | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 788 | #ifdef CONFIG_SCHEDSTATS | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 789 | 		if (entity_is_task(se)) { | 
 | 790 | 			struct task_struct *tsk = task_of(se); | 
 | 791 |  | 
 | 792 | 			if (tsk->state & TASK_INTERRUPTIBLE) | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 793 | 				se->sleep_start = rq_of(cfs_rq)->clock; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 794 | 			if (tsk->state & TASK_UNINTERRUPTIBLE) | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 795 | 				se->block_start = rq_of(cfs_rq)->clock; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 796 | 		} | 
| Dmitry Adamushko | db36cc7 | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 797 | #endif | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 798 | 	} | 
 | 799 |  | 
| Peter Zijlstra | 2002c69 | 2008-11-11 11:52:33 +0100 | [diff] [blame] | 800 | 	clear_buddies(cfs_rq, se); | 
| Peter Zijlstra | 4793241 | 2008-11-04 21:25:09 +0100 | [diff] [blame] | 801 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 802 | 	if (se != cfs_rq->curr) | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 803 | 		__dequeue_entity(cfs_rq, se); | 
 | 804 | 	account_entity_dequeue(cfs_rq, se); | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 805 | 	update_min_vruntime(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 806 | } | 
 | 807 |  | 
 | 808 | /* | 
 | 809 |  * Preempt the current task with a newly woken task if needed: | 
 | 810 |  */ | 
| Peter Zijlstra | 7c92e54 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 811 | static void | 
| Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 812 | check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 813 | { | 
| Peter Zijlstra | 1169783 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 814 | 	unsigned long ideal_runtime, delta_exec; | 
 | 815 |  | 
| Peter Zijlstra | 6d0f0eb | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 816 | 	ideal_runtime = sched_slice(cfs_rq, curr); | 
| Peter Zijlstra | 1169783 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 817 | 	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | 
| Mike Galbraith | a9f3e2b | 2009-01-28 14:51:39 +0100 | [diff] [blame] | 818 | 	if (delta_exec > ideal_runtime) { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 819 | 		resched_task(rq_of(cfs_rq)->curr); | 
| Mike Galbraith | a9f3e2b | 2009-01-28 14:51:39 +0100 | [diff] [blame] | 820 | 		/* | 
 | 821 | 		 * The current task ran long enough, ensure it doesn't get | 
 | 822 | 		 * re-elected due to buddy favours. | 
 | 823 | 		 */ | 
 | 824 | 		clear_buddies(cfs_rq, curr); | 
 | 825 | 	} | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 826 | } | 
 | 827 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 828 | static void | 
| Ingo Molnar | 8494f41 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 829 | set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 830 | { | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 831 | 	/* 'current' is not kept within the tree. */ | 
 | 832 | 	if (se->on_rq) { | 
 | 833 | 		/* | 
 | 834 | 		 * Any task has to be enqueued before it get to execute on | 
 | 835 | 		 * a CPU. So account for the time it spent waiting on the | 
 | 836 | 		 * runqueue. | 
 | 837 | 		 */ | 
 | 838 | 		update_stats_wait_end(cfs_rq, se); | 
 | 839 | 		__dequeue_entity(cfs_rq, se); | 
 | 840 | 	} | 
 | 841 |  | 
| Ingo Molnar | 79303e9 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 842 | 	update_stats_curr_start(cfs_rq, se); | 
| Ingo Molnar | 429d43b | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 843 | 	cfs_rq->curr = se; | 
| Ingo Molnar | eba1ed4 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 844 | #ifdef CONFIG_SCHEDSTATS | 
 | 845 | 	/* | 
 | 846 | 	 * Track our maximum slice length, if the CPU's load is at | 
 | 847 | 	 * least twice that of our own weight (i.e. dont track it | 
 | 848 | 	 * when there are only lesser-weight tasks around): | 
 | 849 | 	 */ | 
| Dmitry Adamushko | 495eca4 | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 850 | 	if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { | 
| Ingo Molnar | eba1ed4 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 851 | 		se->slice_max = max(se->slice_max, | 
 | 852 | 			se->sum_exec_runtime - se->prev_sum_exec_runtime); | 
 | 853 | 	} | 
 | 854 | #endif | 
| Peter Zijlstra | 4a55b45 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 855 | 	se->prev_sum_exec_runtime = se->sum_exec_runtime; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 856 | } | 
 | 857 |  | 
| Peter Zijlstra | 3f3a490 | 2008-10-24 11:06:16 +0200 | [diff] [blame] | 858 | static int | 
 | 859 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); | 
 | 860 |  | 
| Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 861 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 862 | { | 
| Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 863 | 	struct sched_entity *se = __pick_next_entity(cfs_rq); | 
 | 864 |  | 
| Peter Zijlstra | 4793241 | 2008-11-04 21:25:09 +0100 | [diff] [blame] | 865 | 	if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1) | 
 | 866 | 		return cfs_rq->next; | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 867 |  | 
| Peter Zijlstra | 4793241 | 2008-11-04 21:25:09 +0100 | [diff] [blame] | 868 | 	if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1) | 
 | 869 | 		return cfs_rq->last; | 
 | 870 |  | 
 | 871 | 	return se; | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 872 | } | 
 | 873 |  | 
| Ingo Molnar | ab6cde2 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 874 | static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 875 | { | 
 | 876 | 	/* | 
 | 877 | 	 * If still on the runqueue then deactivate_task() | 
 | 878 | 	 * was not called and update_curr() has to be done: | 
 | 879 | 	 */ | 
 | 880 | 	if (prev->on_rq) | 
| Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 881 | 		update_curr(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 882 |  | 
| Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 883 | 	check_spread(cfs_rq, prev); | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 884 | 	if (prev->on_rq) { | 
| Ingo Molnar | 5870db5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 885 | 		update_stats_wait_start(cfs_rq, prev); | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 886 | 		/* Put 'current' back into the tree. */ | 
 | 887 | 		__enqueue_entity(cfs_rq, prev); | 
 | 888 | 	} | 
| Ingo Molnar | 429d43b | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 889 | 	cfs_rq->curr = NULL; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 890 | } | 
 | 891 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 892 | static void | 
 | 893 | entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 894 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 895 | 	/* | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 896 | 	 * Update run-time statistics of the 'current'. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 897 | 	 */ | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 898 | 	update_curr(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 899 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 900 | #ifdef CONFIG_SCHED_HRTICK | 
 | 901 | 	/* | 
 | 902 | 	 * queued ticks are scheduled to match the slice, so don't bother | 
 | 903 | 	 * validating it and just reschedule. | 
 | 904 | 	 */ | 
| Harvey Harrison | 983ed7a | 2008-04-24 18:17:55 -0700 | [diff] [blame] | 905 | 	if (queued) { | 
 | 906 | 		resched_task(rq_of(cfs_rq)->curr); | 
 | 907 | 		return; | 
 | 908 | 	} | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 909 | 	/* | 
 | 910 | 	 * don't let the period tick interfere with the hrtick preemption | 
 | 911 | 	 */ | 
 | 912 | 	if (!sched_feat(DOUBLE_TICK) && | 
 | 913 | 			hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) | 
 | 914 | 		return; | 
 | 915 | #endif | 
 | 916 |  | 
| Peter Zijlstra | ce6c131 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 917 | 	if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT)) | 
| Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 918 | 		check_preempt_tick(cfs_rq, curr); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 919 | } | 
 | 920 |  | 
 | 921 | /************************************************** | 
 | 922 |  * CFS operations on tasks: | 
 | 923 |  */ | 
 | 924 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 925 | #ifdef CONFIG_SCHED_HRTICK | 
 | 926 | static void hrtick_start_fair(struct rq *rq, struct task_struct *p) | 
 | 927 | { | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 928 | 	struct sched_entity *se = &p->se; | 
 | 929 | 	struct cfs_rq *cfs_rq = cfs_rq_of(se); | 
 | 930 |  | 
 | 931 | 	WARN_ON(task_rq(p) != rq); | 
 | 932 |  | 
 | 933 | 	if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) { | 
 | 934 | 		u64 slice = sched_slice(cfs_rq, se); | 
 | 935 | 		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; | 
 | 936 | 		s64 delta = slice - ran; | 
 | 937 |  | 
 | 938 | 		if (delta < 0) { | 
 | 939 | 			if (rq->curr == p) | 
 | 940 | 				resched_task(p); | 
 | 941 | 			return; | 
 | 942 | 		} | 
 | 943 |  | 
 | 944 | 		/* | 
 | 945 | 		 * Don't schedule slices shorter than 10000ns, that just | 
 | 946 | 		 * doesn't make sense. Rely on vruntime for fairness. | 
 | 947 | 		 */ | 
| Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 948 | 		if (rq->curr != p) | 
| Peter Zijlstra | 157124c | 2008-07-28 11:53:11 +0200 | [diff] [blame] | 949 | 			delta = max_t(s64, 10000LL, delta); | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 950 |  | 
| Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 951 | 		hrtick_start(rq, delta); | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 952 | 	} | 
 | 953 | } | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 954 |  | 
 | 955 | /* | 
 | 956 |  * called from enqueue/dequeue and updates the hrtick when the | 
 | 957 |  * current task is from our class and nr_running is low enough | 
 | 958 |  * to matter. | 
 | 959 |  */ | 
 | 960 | static void hrtick_update(struct rq *rq) | 
 | 961 | { | 
 | 962 | 	struct task_struct *curr = rq->curr; | 
 | 963 |  | 
 | 964 | 	if (curr->sched_class != &fair_sched_class) | 
 | 965 | 		return; | 
 | 966 |  | 
 | 967 | 	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) | 
 | 968 | 		hrtick_start_fair(rq, curr); | 
 | 969 | } | 
| Dhaval Giani | 55e12e5 | 2008-06-24 23:39:43 +0530 | [diff] [blame] | 970 | #else /* !CONFIG_SCHED_HRTICK */ | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 971 | static inline void | 
 | 972 | hrtick_start_fair(struct rq *rq, struct task_struct *p) | 
 | 973 | { | 
 | 974 | } | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 975 |  | 
 | 976 | static inline void hrtick_update(struct rq *rq) | 
 | 977 | { | 
 | 978 | } | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 979 | #endif | 
 | 980 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 981 | /* | 
 | 982 |  * The enqueue_task method is called before nr_running is | 
 | 983 |  * increased. Here we update the fair scheduling stats and | 
 | 984 |  * then put the task into the rbtree: | 
 | 985 |  */ | 
| Ingo Molnar | fd390f6 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 986 | static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 987 | { | 
 | 988 | 	struct cfs_rq *cfs_rq; | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 989 | 	struct sched_entity *se = &p->se; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 990 |  | 
 | 991 | 	for_each_sched_entity(se) { | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 992 | 		if (se->on_rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 993 | 			break; | 
 | 994 | 		cfs_rq = cfs_rq_of(se); | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 995 | 		enqueue_entity(cfs_rq, se, wakeup); | 
| Srivatsa Vaddagiri | b9fa3df | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 996 | 		wakeup = 1; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 997 | 	} | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 998 |  | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 999 | 	hrtick_update(rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1000 | } | 
 | 1001 |  | 
 | 1002 | /* | 
 | 1003 |  * The dequeue_task method is called before nr_running is | 
 | 1004 |  * decreased. We remove the task from the rbtree and | 
 | 1005 |  * update the fair scheduling stats: | 
 | 1006 |  */ | 
| Ingo Molnar | f02231e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1007 | static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1008 | { | 
 | 1009 | 	struct cfs_rq *cfs_rq; | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 1010 | 	struct sched_entity *se = &p->se; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1011 |  | 
 | 1012 | 	for_each_sched_entity(se) { | 
 | 1013 | 		cfs_rq = cfs_rq_of(se); | 
| Ingo Molnar | 525c271 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1014 | 		dequeue_entity(cfs_rq, se, sleep); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1015 | 		/* Don't dequeue parent if it has other entities besides us */ | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 1016 | 		if (cfs_rq->load.weight) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1017 | 			break; | 
| Srivatsa Vaddagiri | b9fa3df | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 1018 | 		sleep = 1; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1019 | 	} | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1020 |  | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 1021 | 	hrtick_update(rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1022 | } | 
 | 1023 |  | 
 | 1024 | /* | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 1025 |  * sched_yield() support is very simple - we dequeue and enqueue. | 
 | 1026 |  * | 
 | 1027 |  * If compat_yield is turned on then we requeue to the end of the tree. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1028 |  */ | 
| Dmitry Adamushko | 4530d7a | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1029 | static void yield_task_fair(struct rq *rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1030 | { | 
| Ingo Molnar | db292ca3 | 2007-12-04 17:04:39 +0100 | [diff] [blame] | 1031 | 	struct task_struct *curr = rq->curr; | 
 | 1032 | 	struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 
 | 1033 | 	struct sched_entity *rightmost, *se = &curr->se; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1034 |  | 
 | 1035 | 	/* | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 1036 | 	 * Are we the only task in the tree? | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1037 | 	 */ | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 1038 | 	if (unlikely(cfs_rq->nr_running == 1)) | 
 | 1039 | 		return; | 
 | 1040 |  | 
| Peter Zijlstra | 2002c69 | 2008-11-11 11:52:33 +0100 | [diff] [blame] | 1041 | 	clear_buddies(cfs_rq, se); | 
 | 1042 |  | 
| Ingo Molnar | db292ca3 | 2007-12-04 17:04:39 +0100 | [diff] [blame] | 1043 | 	if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) { | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 1044 | 		update_rq_clock(rq); | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 1045 | 		/* | 
| Dmitry Adamushko | a2a2d68 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 1046 | 		 * Update run-time statistics of the 'current'. | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 1047 | 		 */ | 
| Dmitry Adamushko | 2b1e315 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 1048 | 		update_curr(cfs_rq); | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 1049 |  | 
 | 1050 | 		return; | 
 | 1051 | 	} | 
 | 1052 | 	/* | 
 | 1053 | 	 * Find the rightmost entry in the rbtree: | 
 | 1054 | 	 */ | 
| Dmitry Adamushko | 2b1e315 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 1055 | 	rightmost = __pick_last_entity(cfs_rq); | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 1056 | 	/* | 
 | 1057 | 	 * Already in the rightmost position? | 
 | 1058 | 	 */ | 
| Fabio Checconi | 54fdc58 | 2009-07-16 12:32:27 +0200 | [diff] [blame] | 1059 | 	if (unlikely(!rightmost || entity_before(rightmost, se))) | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 1060 | 		return; | 
 | 1061 |  | 
 | 1062 | 	/* | 
 | 1063 | 	 * Minimally necessary key value to be last in the tree: | 
| Dmitry Adamushko | 2b1e315 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 1064 | 	 * Upon rescheduling, sched_class::put_prev_task() will place | 
 | 1065 | 	 * 'current' within the tree based on its new key value. | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 1066 | 	 */ | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 1067 | 	se->vruntime = rightmost->vruntime + 1; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1068 | } | 
 | 1069 |  | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1070 | #ifdef CONFIG_SMP | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1071 |  | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1072 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| Peter Zijlstra | f5bfb7d | 2008-06-27 13:41:39 +0200 | [diff] [blame] | 1073 | /* | 
 | 1074 |  * effective_load() calculates the load change as seen from the root_task_group | 
 | 1075 |  * | 
 | 1076 |  * Adding load to a group doesn't make a group heavier, but can cause movement | 
 | 1077 |  * of group shares between cpus. Assuming the shares were perfectly aligned one | 
 | 1078 |  * can calculate the shift in shares. | 
 | 1079 |  * | 
 | 1080 |  * The problem is that perfectly aligning the shares is rather expensive, hence | 
 | 1081 |  * we try to avoid doing that too often - see update_shares(), which ratelimits | 
 | 1082 |  * this change. | 
 | 1083 |  * | 
 | 1084 |  * We compensate this by not only taking the current delta into account, but | 
 | 1085 |  * also considering the delta between when the shares were last adjusted and | 
 | 1086 |  * now. | 
 | 1087 |  * | 
 | 1088 |  * We still saw a performance dip, some tracing learned us that between | 
 | 1089 |  * cgroup:/ and cgroup:/foo balancing the number of affine wakeups increased | 
 | 1090 |  * significantly. Therefore try to bias the error in direction of failing | 
 | 1091 |  * the affine wakeup. | 
 | 1092 |  * | 
 | 1093 |  */ | 
| Peter Zijlstra | f1d239f | 2008-06-27 13:41:38 +0200 | [diff] [blame] | 1094 | static long effective_load(struct task_group *tg, int cpu, | 
 | 1095 | 		long wl, long wg) | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1096 | { | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1097 | 	struct sched_entity *se = tg->se[cpu]; | 
| Peter Zijlstra | f1d239f | 2008-06-27 13:41:38 +0200 | [diff] [blame] | 1098 |  | 
 | 1099 | 	if (!tg->parent) | 
 | 1100 | 		return wl; | 
 | 1101 |  | 
 | 1102 | 	/* | 
| Peter Zijlstra | f5bfb7d | 2008-06-27 13:41:39 +0200 | [diff] [blame] | 1103 | 	 * By not taking the decrease of shares on the other cpu into | 
 | 1104 | 	 * account our error leans towards reducing the affine wakeups. | 
 | 1105 | 	 */ | 
 | 1106 | 	if (!wl && sched_feat(ASYM_EFF_LOAD)) | 
 | 1107 | 		return wl; | 
 | 1108 |  | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1109 | 	for_each_sched_entity(se) { | 
| Peter Zijlstra | cb5ef42 | 2008-06-27 13:41:32 +0200 | [diff] [blame] | 1110 | 		long S, rw, s, a, b; | 
| Peter Zijlstra | 940959e | 2008-09-23 15:33:42 +0200 | [diff] [blame] | 1111 | 		long more_w; | 
 | 1112 |  | 
 | 1113 | 		/* | 
 | 1114 | 		 * Instead of using this increment, also add the difference | 
 | 1115 | 		 * between when the shares were last updated and now. | 
 | 1116 | 		 */ | 
 | 1117 | 		more_w = se->my_q->load.weight - se->my_q->rq_weight; | 
 | 1118 | 		wl += more_w; | 
 | 1119 | 		wg += more_w; | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1120 |  | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1121 | 		S = se->my_q->tg->shares; | 
 | 1122 | 		s = se->my_q->shares; | 
| Peter Zijlstra | f1d239f | 2008-06-27 13:41:38 +0200 | [diff] [blame] | 1123 | 		rw = se->my_q->rq_weight; | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1124 |  | 
| Peter Zijlstra | cb5ef42 | 2008-06-27 13:41:32 +0200 | [diff] [blame] | 1125 | 		a = S*(rw + wl); | 
 | 1126 | 		b = S*rw + s*wg; | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1127 |  | 
| Peter Zijlstra | 940959e | 2008-09-23 15:33:42 +0200 | [diff] [blame] | 1128 | 		wl = s*(a-b); | 
 | 1129 |  | 
 | 1130 | 		if (likely(b)) | 
 | 1131 | 			wl /= b; | 
 | 1132 |  | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1133 | 		/* | 
 | 1134 | 		 * Assume the group is already running and will | 
 | 1135 | 		 * thus already be accounted for in the weight. | 
 | 1136 | 		 * | 
 | 1137 | 		 * That is, moving shares between CPUs, does not | 
 | 1138 | 		 * alter the group weight. | 
 | 1139 | 		 */ | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1140 | 		wg = 0; | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1141 | 	} | 
 | 1142 |  | 
 | 1143 | 	return wl; | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1144 | } | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1145 |  | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1146 | #else | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1147 |  | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1148 | static inline unsigned long effective_load(struct task_group *tg, int cpu, | 
 | 1149 | 		unsigned long wl, unsigned long wg) | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1150 | { | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1151 | 	return wl; | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1152 | } | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1153 |  | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1154 | #endif | 
 | 1155 |  | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1156 | static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1157 | { | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1158 | 	struct task_struct *curr = current; | 
 | 1159 | 	unsigned long this_load, load; | 
 | 1160 | 	int idx, this_cpu, prev_cpu; | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1161 | 	unsigned long tl_per_task; | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1162 | 	unsigned int imbalance; | 
 | 1163 | 	struct task_group *tg; | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1164 | 	unsigned long weight; | 
| Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 1165 | 	int balanced; | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1166 |  | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1167 | 	idx	  = sd->wake_idx; | 
 | 1168 | 	this_cpu  = smp_processor_id(); | 
 | 1169 | 	prev_cpu  = task_cpu(p); | 
 | 1170 | 	load	  = source_load(prev_cpu, idx); | 
 | 1171 | 	this_load = target_load(this_cpu, idx); | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1172 |  | 
| Peter Zijlstra | e69b0f1 | 2009-09-15 19:38:52 +0200 | [diff] [blame] | 1173 | 	if (sync) { | 
 | 1174 | 	       if (sched_feat(SYNC_LESS) && | 
 | 1175 | 		   (curr->se.avg_overlap > sysctl_sched_migration_cost || | 
 | 1176 | 		    p->se.avg_overlap > sysctl_sched_migration_cost)) | 
 | 1177 | 		       sync = 0; | 
 | 1178 | 	} else { | 
 | 1179 | 		if (sched_feat(SYNC_MORE) && | 
 | 1180 | 		    (curr->se.avg_overlap < sysctl_sched_migration_cost && | 
 | 1181 | 		     p->se.avg_overlap < sysctl_sched_migration_cost)) | 
 | 1182 | 			sync = 1; | 
 | 1183 | 	} | 
| Peter Zijlstra | fc631c8 | 2009-02-11 14:27:17 +0100 | [diff] [blame] | 1184 |  | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1185 | 	/* | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1186 | 	 * If sync wakeup then subtract the (maximum possible) | 
 | 1187 | 	 * effect of the currently running task from the load | 
 | 1188 | 	 * of the current CPU: | 
 | 1189 | 	 */ | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1190 | 	if (sync) { | 
 | 1191 | 		tg = task_group(current); | 
 | 1192 | 		weight = current->se.load.weight; | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1193 |  | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1194 | 		this_load += effective_load(tg, this_cpu, -weight, -weight); | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1195 | 		load += effective_load(tg, prev_cpu, 0, -weight); | 
 | 1196 | 	} | 
 | 1197 |  | 
 | 1198 | 	tg = task_group(p); | 
 | 1199 | 	weight = p->se.load.weight; | 
 | 1200 |  | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1201 | 	imbalance = 100 + (sd->imbalance_pct - 100) / 2; | 
 | 1202 |  | 
| Peter Zijlstra | 71a29aa | 2009-09-07 18:28:05 +0200 | [diff] [blame] | 1203 | 	/* | 
 | 1204 | 	 * In low-load situations, where prev_cpu is idle and this_cpu is idle | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1205 | 	 * due to the sync cause above having dropped this_load to 0, we'll | 
 | 1206 | 	 * always have an imbalance, but there's really nothing you can do | 
 | 1207 | 	 * about that, so that's good too. | 
| Peter Zijlstra | 71a29aa | 2009-09-07 18:28:05 +0200 | [diff] [blame] | 1208 | 	 * | 
 | 1209 | 	 * Otherwise check if either cpus are near enough in load to allow this | 
 | 1210 | 	 * task to be woken on this_cpu. | 
 | 1211 | 	 */ | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1212 | 	balanced = !this_load || | 
 | 1213 | 		100*(this_load + effective_load(tg, this_cpu, weight, weight)) <= | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1214 | 		imbalance*(load + effective_load(tg, prev_cpu, 0, weight)); | 
| Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 1215 |  | 
 | 1216 | 	/* | 
 | 1217 | 	 * If the currently running task will sleep within | 
 | 1218 | 	 * a reasonable amount of time then attract this newly | 
 | 1219 | 	 * woken task: | 
 | 1220 | 	 */ | 
| Peter Zijlstra | 2fb7635 | 2008-10-08 09:16:04 +0200 | [diff] [blame] | 1221 | 	if (sync && balanced) | 
 | 1222 | 		return 1; | 
| Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 1223 |  | 
 | 1224 | 	schedstat_inc(p, se.nr_wakeups_affine_attempts); | 
 | 1225 | 	tl_per_task = cpu_avg_load_per_task(this_cpu); | 
 | 1226 |  | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1227 | 	if (balanced || | 
 | 1228 | 	    (this_load <= load && | 
 | 1229 | 	     this_load + target_load(prev_cpu, idx) <= tl_per_task)) { | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1230 | 		/* | 
 | 1231 | 		 * This domain has SD_WAKE_AFFINE and | 
 | 1232 | 		 * p is cache cold in this domain, and | 
 | 1233 | 		 * there is no bad imbalance. | 
 | 1234 | 		 */ | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1235 | 		schedstat_inc(sd, ttwu_move_affine); | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1236 | 		schedstat_inc(p, se.nr_wakeups_affine); | 
 | 1237 |  | 
 | 1238 | 		return 1; | 
 | 1239 | 	} | 
 | 1240 | 	return 0; | 
 | 1241 | } | 
 | 1242 |  | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1243 | /* | 
 | 1244 |  * find_idlest_group finds and returns the least busy CPU group within the | 
 | 1245 |  * domain. | 
 | 1246 |  */ | 
 | 1247 | static struct sched_group * | 
| Peter Zijlstra | 78e7ed5 | 2009-09-03 13:16:51 +0200 | [diff] [blame] | 1248 | find_idlest_group(struct sched_domain *sd, struct task_struct *p, | 
| Peter Zijlstra | 5158f4e | 2009-09-16 13:46:59 +0200 | [diff] [blame] | 1249 | 		  int this_cpu, int load_idx) | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1250 | { | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1251 | 	struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; | 
 | 1252 | 	unsigned long min_load = ULONG_MAX, this_load = 0; | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1253 | 	int imbalance = 100 + (sd->imbalance_pct-100)/2; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1254 |  | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1255 | 	do { | 
 | 1256 | 		unsigned long load, avg_load; | 
 | 1257 | 		int local_group; | 
 | 1258 | 		int i; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1259 |  | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1260 | 		/* Skip over this group if it has no CPUs allowed */ | 
 | 1261 | 		if (!cpumask_intersects(sched_group_cpus(group), | 
 | 1262 | 					&p->cpus_allowed)) | 
 | 1263 | 			continue; | 
 | 1264 |  | 
 | 1265 | 		local_group = cpumask_test_cpu(this_cpu, | 
 | 1266 | 					       sched_group_cpus(group)); | 
 | 1267 |  | 
 | 1268 | 		/* Tally up the load of all CPUs in the group */ | 
 | 1269 | 		avg_load = 0; | 
 | 1270 |  | 
 | 1271 | 		for_each_cpu(i, sched_group_cpus(group)) { | 
 | 1272 | 			/* Bias balancing toward cpus of our domain */ | 
 | 1273 | 			if (local_group) | 
 | 1274 | 				load = source_load(i, load_idx); | 
 | 1275 | 			else | 
 | 1276 | 				load = target_load(i, load_idx); | 
 | 1277 |  | 
 | 1278 | 			avg_load += load; | 
 | 1279 | 		} | 
 | 1280 |  | 
 | 1281 | 		/* Adjust by relative CPU power of the group */ | 
 | 1282 | 		avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power; | 
 | 1283 |  | 
 | 1284 | 		if (local_group) { | 
 | 1285 | 			this_load = avg_load; | 
 | 1286 | 			this = group; | 
 | 1287 | 		} else if (avg_load < min_load) { | 
 | 1288 | 			min_load = avg_load; | 
 | 1289 | 			idlest = group; | 
 | 1290 | 		} | 
 | 1291 | 	} while (group = group->next, group != sd->groups); | 
 | 1292 |  | 
 | 1293 | 	if (!idlest || 100*this_load < imbalance*min_load) | 
 | 1294 | 		return NULL; | 
 | 1295 | 	return idlest; | 
 | 1296 | } | 
 | 1297 |  | 
 | 1298 | /* | 
 | 1299 |  * find_idlest_cpu - find the idlest cpu among the cpus in group. | 
 | 1300 |  */ | 
 | 1301 | static int | 
 | 1302 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) | 
 | 1303 | { | 
 | 1304 | 	unsigned long load, min_load = ULONG_MAX; | 
 | 1305 | 	int idlest = -1; | 
 | 1306 | 	int i; | 
 | 1307 |  | 
 | 1308 | 	/* Traverse only the allowed CPUs */ | 
 | 1309 | 	for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) { | 
 | 1310 | 		load = weighted_cpuload(i); | 
 | 1311 |  | 
 | 1312 | 		if (load < min_load || (load == min_load && i == this_cpu)) { | 
 | 1313 | 			min_load = load; | 
 | 1314 | 			idlest = i; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1315 | 		} | 
 | 1316 | 	} | 
 | 1317 |  | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1318 | 	return idlest; | 
 | 1319 | } | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1320 |  | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1321 | /* | 
 | 1322 |  * sched_balance_self: balance the current task (running on cpu) in domains | 
 | 1323 |  * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and | 
 | 1324 |  * SD_BALANCE_EXEC. | 
 | 1325 |  * | 
 | 1326 |  * Balance, ie. select the least loaded group. | 
 | 1327 |  * | 
 | 1328 |  * Returns the target CPU number, or the same CPU if no balancing is needed. | 
 | 1329 |  * | 
 | 1330 |  * preempt must be disabled. | 
 | 1331 |  */ | 
| Peter Zijlstra | 5158f4e | 2009-09-16 13:46:59 +0200 | [diff] [blame] | 1332 | static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1333 | { | 
| Peter Zijlstra | 29cd8ba | 2009-09-17 09:01:14 +0200 | [diff] [blame] | 1334 | 	struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1335 | 	int cpu = smp_processor_id(); | 
 | 1336 | 	int prev_cpu = task_cpu(p); | 
 | 1337 | 	int new_cpu = cpu; | 
 | 1338 | 	int want_affine = 0; | 
| Peter Zijlstra | 29cd8ba | 2009-09-17 09:01:14 +0200 | [diff] [blame] | 1339 | 	int want_sd = 1; | 
| Peter Zijlstra | 5158f4e | 2009-09-16 13:46:59 +0200 | [diff] [blame] | 1340 | 	int sync = wake_flags & WF_SYNC; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1341 |  | 
| Peter Zijlstra | 0763a66 | 2009-09-14 19:37:39 +0200 | [diff] [blame] | 1342 | 	if (sd_flag & SD_BALANCE_WAKE) { | 
| Mike Galbraith | 3f04e8c | 2009-09-19 16:52:35 +0200 | [diff] [blame] | 1343 | 		if (sched_feat(AFFINE_WAKEUPS) && | 
 | 1344 | 		    cpumask_test_cpu(cpu, &p->cpus_allowed)) | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1345 | 			want_affine = 1; | 
 | 1346 | 		new_cpu = prev_cpu; | 
 | 1347 | 	} | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1348 |  | 
| Peter Zijlstra | 83f5496 | 2009-09-10 18:18:47 +0200 | [diff] [blame] | 1349 | 	rcu_read_lock(); | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1350 | 	for_each_domain(cpu, tmp) { | 
 | 1351 | 		/* | 
| Peter Zijlstra | ae154be | 2009-09-10 14:40:57 +0200 | [diff] [blame] | 1352 | 		 * If power savings logic is enabled for a domain, see if we | 
 | 1353 | 		 * are not overloaded, if so, don't balance wider. | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1354 | 		 */ | 
| Peter Zijlstra | 59abf02 | 2009-09-16 08:28:30 +0200 | [diff] [blame] | 1355 | 		if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) { | 
| Peter Zijlstra | ae154be | 2009-09-10 14:40:57 +0200 | [diff] [blame] | 1356 | 			unsigned long power = 0; | 
 | 1357 | 			unsigned long nr_running = 0; | 
 | 1358 | 			unsigned long capacity; | 
 | 1359 | 			int i; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1360 |  | 
| Peter Zijlstra | ae154be | 2009-09-10 14:40:57 +0200 | [diff] [blame] | 1361 | 			for_each_cpu(i, sched_domain_span(tmp)) { | 
 | 1362 | 				power += power_of(i); | 
 | 1363 | 				nr_running += cpu_rq(i)->cfs.nr_running; | 
 | 1364 | 			} | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1365 |  | 
| Peter Zijlstra | ae154be | 2009-09-10 14:40:57 +0200 | [diff] [blame] | 1366 | 			capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE); | 
| Ingo Molnar | 4ae7d5c | 2008-03-19 01:42:00 +0100 | [diff] [blame] | 1367 |  | 
| Peter Zijlstra | 59abf02 | 2009-09-16 08:28:30 +0200 | [diff] [blame] | 1368 | 			if (tmp->flags & SD_POWERSAVINGS_BALANCE) | 
 | 1369 | 				nr_running /= 2; | 
 | 1370 |  | 
 | 1371 | 			if (nr_running < capacity) | 
| Peter Zijlstra | 29cd8ba | 2009-09-17 09:01:14 +0200 | [diff] [blame] | 1372 | 				want_sd = 0; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1373 | 		} | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1374 |  | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1375 | 		if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && | 
 | 1376 | 		    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { | 
 | 1377 |  | 
| Peter Zijlstra | 29cd8ba | 2009-09-17 09:01:14 +0200 | [diff] [blame] | 1378 | 			affine_sd = tmp; | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1379 | 			want_affine = 0; | 
 | 1380 | 		} | 
 | 1381 |  | 
| Peter Zijlstra | 29cd8ba | 2009-09-17 09:01:14 +0200 | [diff] [blame] | 1382 | 		if (!want_sd && !want_affine) | 
 | 1383 | 			break; | 
 | 1384 |  | 
| Peter Zijlstra | 0763a66 | 2009-09-14 19:37:39 +0200 | [diff] [blame] | 1385 | 		if (!(tmp->flags & sd_flag)) | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1386 | 			continue; | 
 | 1387 |  | 
| Peter Zijlstra | 29cd8ba | 2009-09-17 09:01:14 +0200 | [diff] [blame] | 1388 | 		if (want_sd) | 
 | 1389 | 			sd = tmp; | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1390 | 	} | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1391 |  | 
| Peter Zijlstra | 29cd8ba | 2009-09-17 09:01:14 +0200 | [diff] [blame] | 1392 | 	if (sched_feat(LB_SHARES_UPDATE)) { | 
 | 1393 | 		/* | 
 | 1394 | 		 * Pick the largest domain to update shares over | 
 | 1395 | 		 */ | 
 | 1396 | 		tmp = sd; | 
 | 1397 | 		if (affine_sd && (!tmp || | 
 | 1398 | 				  cpumask_weight(sched_domain_span(affine_sd)) > | 
 | 1399 | 				  cpumask_weight(sched_domain_span(sd)))) | 
 | 1400 | 			tmp = affine_sd; | 
 | 1401 |  | 
 | 1402 | 		if (tmp) | 
 | 1403 | 			update_shares(tmp); | 
 | 1404 | 	} | 
 | 1405 |  | 
 | 1406 | 	if (affine_sd && wake_affine(affine_sd, p, sync)) { | 
 | 1407 | 		new_cpu = cpu; | 
 | 1408 | 		goto out; | 
 | 1409 | 	} | 
| Peter Zijlstra | 3b64089 | 2009-09-16 13:44:33 +0200 | [diff] [blame] | 1410 |  | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1411 | 	while (sd) { | 
| Peter Zijlstra | 5158f4e | 2009-09-16 13:46:59 +0200 | [diff] [blame] | 1412 | 		int load_idx = sd->forkexec_idx; | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1413 | 		struct sched_group *group; | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1414 | 		int weight; | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1415 |  | 
| Peter Zijlstra | 0763a66 | 2009-09-14 19:37:39 +0200 | [diff] [blame] | 1416 | 		if (!(sd->flags & sd_flag)) { | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1417 | 			sd = sd->child; | 
 | 1418 | 			continue; | 
 | 1419 | 		} | 
 | 1420 |  | 
| Peter Zijlstra | 5158f4e | 2009-09-16 13:46:59 +0200 | [diff] [blame] | 1421 | 		if (sd_flag & SD_BALANCE_WAKE) | 
 | 1422 | 			load_idx = sd->wake_idx; | 
 | 1423 |  | 
 | 1424 | 		group = find_idlest_group(sd, p, cpu, load_idx); | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1425 | 		if (!group) { | 
 | 1426 | 			sd = sd->child; | 
 | 1427 | 			continue; | 
 | 1428 | 		} | 
 | 1429 |  | 
| Peter Zijlstra | d7c33c4 | 2009-09-11 12:45:38 +0200 | [diff] [blame] | 1430 | 		new_cpu = find_idlest_cpu(group, p, cpu); | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1431 | 		if (new_cpu == -1 || new_cpu == cpu) { | 
 | 1432 | 			/* Now try balancing at a lower domain level of cpu */ | 
 | 1433 | 			sd = sd->child; | 
 | 1434 | 			continue; | 
 | 1435 | 		} | 
 | 1436 |  | 
 | 1437 | 		/* Now try balancing at a lower domain level of new_cpu */ | 
 | 1438 | 		cpu = new_cpu; | 
 | 1439 | 		weight = cpumask_weight(sched_domain_span(sd)); | 
 | 1440 | 		sd = NULL; | 
 | 1441 | 		for_each_domain(cpu, tmp) { | 
 | 1442 | 			if (weight <= cpumask_weight(sched_domain_span(tmp))) | 
 | 1443 | 				break; | 
| Peter Zijlstra | 0763a66 | 2009-09-14 19:37:39 +0200 | [diff] [blame] | 1444 | 			if (tmp->flags & sd_flag) | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1445 | 				sd = tmp; | 
 | 1446 | 		} | 
 | 1447 | 		/* while loop will break here if sd == NULL */ | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1448 | 	} | 
 | 1449 |  | 
| Ingo Molnar | f482738 | 2008-03-16 21:21:47 +0100 | [diff] [blame] | 1450 | out: | 
| Peter Zijlstra | 83f5496 | 2009-09-10 18:18:47 +0200 | [diff] [blame] | 1451 | 	rcu_read_unlock(); | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1452 | 	return new_cpu; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1453 | } | 
 | 1454 | #endif /* CONFIG_SMP */ | 
 | 1455 |  | 
| Peter Zijlstra | e52fb7c | 2009-01-14 12:39:19 +0100 | [diff] [blame] | 1456 | /* | 
 | 1457 |  * Adaptive granularity | 
 | 1458 |  * | 
 | 1459 |  * se->avg_wakeup gives the average time a task runs until it does a wakeup, | 
 | 1460 |  * with the limit of wakeup_gran -- when it never does a wakeup. | 
 | 1461 |  * | 
 | 1462 |  * So the smaller avg_wakeup is the faster we want this task to preempt, | 
 | 1463 |  * but we don't want to treat the preemptee unfairly and therefore allow it | 
 | 1464 |  * to run for at least the amount of time we'd like to run. | 
 | 1465 |  * | 
 | 1466 |  * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one | 
 | 1467 |  * | 
 | 1468 |  * NOTE: we use *nr_running to scale with load, this nicely matches the | 
 | 1469 |  *       degrading latency on load. | 
 | 1470 |  */ | 
 | 1471 | static unsigned long | 
 | 1472 | adaptive_gran(struct sched_entity *curr, struct sched_entity *se) | 
 | 1473 | { | 
 | 1474 | 	u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | 
 | 1475 | 	u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running; | 
 | 1476 | 	u64 gran = 0; | 
 | 1477 |  | 
 | 1478 | 	if (this_run < expected_wakeup) | 
 | 1479 | 		gran = expected_wakeup - this_run; | 
 | 1480 |  | 
 | 1481 | 	return min_t(s64, gran, sysctl_sched_wakeup_granularity); | 
 | 1482 | } | 
 | 1483 |  | 
 | 1484 | static unsigned long | 
 | 1485 | wakeup_gran(struct sched_entity *curr, struct sched_entity *se) | 
| Peter Zijlstra | 0bbd333 | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 1486 | { | 
 | 1487 | 	unsigned long gran = sysctl_sched_wakeup_granularity; | 
 | 1488 |  | 
| Peter Zijlstra | e52fb7c | 2009-01-14 12:39:19 +0100 | [diff] [blame] | 1489 | 	if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN)) | 
 | 1490 | 		gran = adaptive_gran(curr, se); | 
 | 1491 |  | 
| Peter Zijlstra | 0bbd333 | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 1492 | 	/* | 
| Peter Zijlstra | e52fb7c | 2009-01-14 12:39:19 +0100 | [diff] [blame] | 1493 | 	 * Since its curr running now, convert the gran from real-time | 
 | 1494 | 	 * to virtual-time in his units. | 
| Peter Zijlstra | 0bbd333 | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 1495 | 	 */ | 
| Peter Zijlstra | e52fb7c | 2009-01-14 12:39:19 +0100 | [diff] [blame] | 1496 | 	if (sched_feat(ASYM_GRAN)) { | 
 | 1497 | 		/* | 
 | 1498 | 		 * By using 'se' instead of 'curr' we penalize light tasks, so | 
 | 1499 | 		 * they get preempted easier. That is, if 'se' < 'curr' then | 
 | 1500 | 		 * the resulting gran will be larger, therefore penalizing the | 
 | 1501 | 		 * lighter, if otoh 'se' > 'curr' then the resulting gran will | 
 | 1502 | 		 * be smaller, again penalizing the lighter task. | 
 | 1503 | 		 * | 
 | 1504 | 		 * This is especially important for buddies when the leftmost | 
 | 1505 | 		 * task is higher priority than the buddy. | 
 | 1506 | 		 */ | 
 | 1507 | 		if (unlikely(se->load.weight != NICE_0_LOAD)) | 
 | 1508 | 			gran = calc_delta_fair(gran, se); | 
 | 1509 | 	} else { | 
 | 1510 | 		if (unlikely(curr->load.weight != NICE_0_LOAD)) | 
 | 1511 | 			gran = calc_delta_fair(gran, curr); | 
 | 1512 | 	} | 
| Peter Zijlstra | 0bbd333 | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 1513 |  | 
 | 1514 | 	return gran; | 
 | 1515 | } | 
 | 1516 |  | 
 | 1517 | /* | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 1518 |  * Should 'se' preempt 'curr'. | 
 | 1519 |  * | 
 | 1520 |  *             |s1 | 
 | 1521 |  *        |s2 | 
 | 1522 |  *   |s3 | 
 | 1523 |  *         g | 
 | 1524 |  *      |<--->|c | 
 | 1525 |  * | 
 | 1526 |  *  w(c, s1) = -1 | 
 | 1527 |  *  w(c, s2) =  0 | 
 | 1528 |  *  w(c, s3) =  1 | 
 | 1529 |  * | 
 | 1530 |  */ | 
 | 1531 | static int | 
 | 1532 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) | 
 | 1533 | { | 
 | 1534 | 	s64 gran, vdiff = curr->vruntime - se->vruntime; | 
 | 1535 |  | 
 | 1536 | 	if (vdiff <= 0) | 
 | 1537 | 		return -1; | 
 | 1538 |  | 
| Peter Zijlstra | e52fb7c | 2009-01-14 12:39:19 +0100 | [diff] [blame] | 1539 | 	gran = wakeup_gran(curr, se); | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 1540 | 	if (vdiff > gran) | 
 | 1541 | 		return 1; | 
 | 1542 |  | 
 | 1543 | 	return 0; | 
 | 1544 | } | 
 | 1545 |  | 
| Peter Zijlstra | 0247909 | 2008-11-04 21:25:10 +0100 | [diff] [blame] | 1546 | static void set_last_buddy(struct sched_entity *se) | 
 | 1547 | { | 
| Peter Zijlstra | 6bc912b | 2009-01-15 14:53:38 +0100 | [diff] [blame] | 1548 | 	if (likely(task_of(se)->policy != SCHED_IDLE)) { | 
 | 1549 | 		for_each_sched_entity(se) | 
 | 1550 | 			cfs_rq_of(se)->last = se; | 
 | 1551 | 	} | 
| Peter Zijlstra | 0247909 | 2008-11-04 21:25:10 +0100 | [diff] [blame] | 1552 | } | 
 | 1553 |  | 
 | 1554 | static void set_next_buddy(struct sched_entity *se) | 
 | 1555 | { | 
| Peter Zijlstra | 6bc912b | 2009-01-15 14:53:38 +0100 | [diff] [blame] | 1556 | 	if (likely(task_of(se)->policy != SCHED_IDLE)) { | 
 | 1557 | 		for_each_sched_entity(se) | 
 | 1558 | 			cfs_rq_of(se)->next = se; | 
 | 1559 | 	} | 
| Peter Zijlstra | 0247909 | 2008-11-04 21:25:10 +0100 | [diff] [blame] | 1560 | } | 
 | 1561 |  | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 1562 | /* | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1563 |  * Preempt the current task with a newly woken task if needed: | 
 | 1564 |  */ | 
| Peter Zijlstra | 5a9b86f | 2009-09-16 13:47:58 +0200 | [diff] [blame] | 1565 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1566 | { | 
 | 1567 | 	struct task_struct *curr = rq->curr; | 
| Srivatsa Vaddagiri | 8651a86 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 1568 | 	struct sched_entity *se = &curr->se, *pse = &p->se; | 
| Mike Galbraith | 03e89e4 | 2008-12-16 08:45:30 +0100 | [diff] [blame] | 1569 | 	struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 
| Peter Zijlstra | 5a9b86f | 2009-09-16 13:47:58 +0200 | [diff] [blame] | 1570 | 	int sync = wake_flags & WF_SYNC; | 
| Mike Galbraith | 03e89e4 | 2008-12-16 08:45:30 +0100 | [diff] [blame] | 1571 |  | 
 | 1572 | 	update_curr(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1573 |  | 
 | 1574 | 	if (unlikely(rt_prio(p->prio))) { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1575 | 		resched_task(curr); | 
 | 1576 | 		return; | 
 | 1577 | 	} | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 1578 |  | 
| Peter Zijlstra | d95f98d | 2008-11-04 21:25:08 +0100 | [diff] [blame] | 1579 | 	if (unlikely(p->sched_class != &fair_sched_class)) | 
 | 1580 | 		return; | 
 | 1581 |  | 
| Ingo Molnar | 4ae7d5c | 2008-03-19 01:42:00 +0100 | [diff] [blame] | 1582 | 	if (unlikely(se == pse)) | 
 | 1583 | 		return; | 
 | 1584 |  | 
| Peter Zijlstra | 4793241 | 2008-11-04 21:25:09 +0100 | [diff] [blame] | 1585 | 	/* | 
 | 1586 | 	 * Only set the backward buddy when the current task is still on the | 
 | 1587 | 	 * rq. This can happen when a wakeup gets interleaved with schedule on | 
 | 1588 | 	 * the ->pre_schedule() or idle_balance() point, either of which can | 
 | 1589 | 	 * drop the rq lock. | 
 | 1590 | 	 * | 
 | 1591 | 	 * Also, during early boot the idle thread is in the fair class, for | 
 | 1592 | 	 * obvious reasons its a bad idea to schedule back to the idle thread. | 
 | 1593 | 	 */ | 
 | 1594 | 	if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle)) | 
| Peter Zijlstra | 0247909 | 2008-11-04 21:25:10 +0100 | [diff] [blame] | 1595 | 		set_last_buddy(se); | 
| Peter Zijlstra | 5a9b86f | 2009-09-16 13:47:58 +0200 | [diff] [blame] | 1596 | 	if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK)) | 
| Mike Galbraith | 3cb63d5 | 2009-09-11 12:01:17 +0200 | [diff] [blame] | 1597 | 		set_next_buddy(pse); | 
| Peter Zijlstra | 57fdc26 | 2008-09-23 15:33:45 +0200 | [diff] [blame] | 1598 |  | 
| Bharata B Rao | aec0a51 | 2008-08-28 14:42:49 +0530 | [diff] [blame] | 1599 | 	/* | 
 | 1600 | 	 * We can come here with TIF_NEED_RESCHED already set from new task | 
 | 1601 | 	 * wake up path. | 
 | 1602 | 	 */ | 
 | 1603 | 	if (test_tsk_need_resched(curr)) | 
 | 1604 | 		return; | 
 | 1605 |  | 
| Ingo Molnar | 91c234b | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 1606 | 	/* | 
| Peter Zijlstra | 6bc912b | 2009-01-15 14:53:38 +0100 | [diff] [blame] | 1607 | 	 * Batch and idle tasks do not preempt (their preemption is driven by | 
| Ingo Molnar | 91c234b | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 1608 | 	 * the tick): | 
 | 1609 | 	 */ | 
| Peter Zijlstra | 6bc912b | 2009-01-15 14:53:38 +0100 | [diff] [blame] | 1610 | 	if (unlikely(p->policy != SCHED_NORMAL)) | 
| Ingo Molnar | 91c234b | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 1611 | 		return; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1612 |  | 
| Peter Zijlstra | 6bc912b | 2009-01-15 14:53:38 +0100 | [diff] [blame] | 1613 | 	/* Idle tasks are by definition preempted by everybody. */ | 
 | 1614 | 	if (unlikely(curr->policy == SCHED_IDLE)) { | 
 | 1615 | 		resched_task(curr); | 
 | 1616 | 		return; | 
 | 1617 | 	} | 
 | 1618 |  | 
| Peter Zijlstra | e6b1b2c | 2009-09-11 11:59:22 +0200 | [diff] [blame] | 1619 | 	if ((sched_feat(WAKEUP_SYNC) && sync) || | 
 | 1620 | 	    (sched_feat(WAKEUP_OVERLAP) && | 
 | 1621 | 	     (se->avg_overlap < sysctl_sched_migration_cost && | 
 | 1622 | 	      pse->avg_overlap < sysctl_sched_migration_cost))) { | 
| Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 1623 | 		resched_task(curr); | 
 | 1624 | 		return; | 
 | 1625 | 	} | 
 | 1626 |  | 
| Peter Zijlstra | ad4b78b | 2009-09-16 12:31:31 +0200 | [diff] [blame] | 1627 | 	if (sched_feat(WAKEUP_RUNNING)) { | 
 | 1628 | 		if (pse->avg_running < se->avg_running) { | 
 | 1629 | 			set_next_buddy(pse); | 
 | 1630 | 			resched_task(curr); | 
 | 1631 | 			return; | 
 | 1632 | 		} | 
 | 1633 | 	} | 
 | 1634 |  | 
 | 1635 | 	if (!sched_feat(WAKEUP_PREEMPT)) | 
 | 1636 | 		return; | 
 | 1637 |  | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 1638 | 	find_matching_se(&se, &pse); | 
 | 1639 |  | 
| Paul Turner | 002f128 | 2009-04-08 15:29:43 -0700 | [diff] [blame] | 1640 | 	BUG_ON(!pse); | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 1641 |  | 
| Paul Turner | 002f128 | 2009-04-08 15:29:43 -0700 | [diff] [blame] | 1642 | 	if (wakeup_preempt_entity(se, pse) == 1) | 
 | 1643 | 		resched_task(curr); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1644 | } | 
 | 1645 |  | 
| Ingo Molnar | fb8d472 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1646 | static struct task_struct *pick_next_task_fair(struct rq *rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1647 | { | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1648 | 	struct task_struct *p; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1649 | 	struct cfs_rq *cfs_rq = &rq->cfs; | 
 | 1650 | 	struct sched_entity *se; | 
 | 1651 |  | 
 | 1652 | 	if (unlikely(!cfs_rq->nr_running)) | 
 | 1653 | 		return NULL; | 
 | 1654 |  | 
 | 1655 | 	do { | 
| Ingo Molnar | 9948f4b | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1656 | 		se = pick_next_entity(cfs_rq); | 
| Mike Galbraith | a9f3e2b | 2009-01-28 14:51:39 +0100 | [diff] [blame] | 1657 | 		/* | 
 | 1658 | 		 * If se was a buddy, clear it so that it will have to earn | 
 | 1659 | 		 * the favour again. | 
| Peter Zijlstra | de69a80 | 2009-09-17 09:01:20 +0200 | [diff] [blame] | 1660 | 		 * | 
 | 1661 | 		 * If se was not a buddy, clear the buddies because neither | 
 | 1662 | 		 * was elegible to run, let them earn it again. | 
 | 1663 | 		 * | 
 | 1664 | 		 * IOW. unconditionally clear buddies. | 
| Mike Galbraith | a9f3e2b | 2009-01-28 14:51:39 +0100 | [diff] [blame] | 1665 | 		 */ | 
| Peter Zijlstra | de69a80 | 2009-09-17 09:01:20 +0200 | [diff] [blame] | 1666 | 		__clear_buddies(cfs_rq, NULL); | 
| Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 1667 | 		set_next_entity(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1668 | 		cfs_rq = group_cfs_rq(se); | 
 | 1669 | 	} while (cfs_rq); | 
 | 1670 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1671 | 	p = task_of(se); | 
 | 1672 | 	hrtick_start_fair(rq, p); | 
 | 1673 |  | 
 | 1674 | 	return p; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1675 | } | 
 | 1676 |  | 
 | 1677 | /* | 
 | 1678 |  * Account for a descheduled task: | 
 | 1679 |  */ | 
| Ingo Molnar | 31ee529 | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 1680 | static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1681 | { | 
 | 1682 | 	struct sched_entity *se = &prev->se; | 
 | 1683 | 	struct cfs_rq *cfs_rq; | 
 | 1684 |  | 
 | 1685 | 	for_each_sched_entity(se) { | 
 | 1686 | 		cfs_rq = cfs_rq_of(se); | 
| Ingo Molnar | ab6cde2 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1687 | 		put_prev_entity(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1688 | 	} | 
 | 1689 | } | 
 | 1690 |  | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1691 | #ifdef CONFIG_SMP | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1692 | /************************************************** | 
 | 1693 |  * Fair scheduling class load-balancing methods: | 
 | 1694 |  */ | 
 | 1695 |  | 
 | 1696 | /* | 
 | 1697 |  * Load-balancing iterator. Note: while the runqueue stays locked | 
 | 1698 |  * during the whole iteration, the current task might be | 
 | 1699 |  * dequeued so the iterator has to be dequeue-safe. Here we | 
 | 1700 |  * achieve that by always pre-iterating before returning | 
 | 1701 |  * the current task: | 
 | 1702 |  */ | 
| Alexey Dobriyan | a995744 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 1703 | static struct task_struct * | 
| Peter Zijlstra | 4a55bd5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 1704 | __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1705 | { | 
| Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 1706 | 	struct task_struct *p = NULL; | 
 | 1707 | 	struct sched_entity *se; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1708 |  | 
| Mike Galbraith | 77ae651 | 2008-08-11 13:32:02 +0200 | [diff] [blame] | 1709 | 	if (next == &cfs_rq->tasks) | 
 | 1710 | 		return NULL; | 
 | 1711 |  | 
| Bharata B Rao | b87f172 | 2008-09-25 09:53:54 +0530 | [diff] [blame] | 1712 | 	se = list_entry(next, struct sched_entity, group_node); | 
 | 1713 | 	p = task_of(se); | 
 | 1714 | 	cfs_rq->balance_iterator = next->next; | 
| Mike Galbraith | 77ae651 | 2008-08-11 13:32:02 +0200 | [diff] [blame] | 1715 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1716 | 	return p; | 
 | 1717 | } | 
 | 1718 |  | 
 | 1719 | static struct task_struct *load_balance_start_fair(void *arg) | 
 | 1720 | { | 
 | 1721 | 	struct cfs_rq *cfs_rq = arg; | 
 | 1722 |  | 
| Peter Zijlstra | 4a55bd5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 1723 | 	return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1724 | } | 
 | 1725 |  | 
 | 1726 | static struct task_struct *load_balance_next_fair(void *arg) | 
 | 1727 | { | 
 | 1728 | 	struct cfs_rq *cfs_rq = arg; | 
 | 1729 |  | 
| Peter Zijlstra | 4a55bd5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 1730 | 	return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1731 | } | 
 | 1732 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1733 | static unsigned long | 
 | 1734 | __load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
 | 1735 | 		unsigned long max_load_move, struct sched_domain *sd, | 
 | 1736 | 		enum cpu_idle_type idle, int *all_pinned, int *this_best_prio, | 
 | 1737 | 		struct cfs_rq *cfs_rq) | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 1738 | { | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1739 | 	struct rq_iterator cfs_rq_iterator; | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1740 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1741 | 	cfs_rq_iterator.start = load_balance_start_fair; | 
 | 1742 | 	cfs_rq_iterator.next = load_balance_next_fair; | 
 | 1743 | 	cfs_rq_iterator.arg = cfs_rq; | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1744 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1745 | 	return balance_tasks(this_rq, this_cpu, busiest, | 
 | 1746 | 			max_load_move, sd, idle, all_pinned, | 
 | 1747 | 			this_best_prio, &cfs_rq_iterator); | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1748 | } | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1749 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1750 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1751 | static unsigned long | 
 | 1752 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
 | 1753 | 		  unsigned long max_load_move, | 
 | 1754 | 		  struct sched_domain *sd, enum cpu_idle_type idle, | 
 | 1755 | 		  int *all_pinned, int *this_best_prio) | 
 | 1756 | { | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1757 | 	long rem_load_move = max_load_move; | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1758 | 	int busiest_cpu = cpu_of(busiest); | 
 | 1759 | 	struct task_group *tg; | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 1760 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1761 | 	rcu_read_lock(); | 
| Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1762 | 	update_h_load(busiest_cpu); | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 1763 |  | 
| Chris Friesen | caea8a0 | 2008-09-22 11:06:09 -0600 | [diff] [blame] | 1764 | 	list_for_each_entry_rcu(tg, &task_groups, list) { | 
| Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1765 | 		struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu]; | 
| Peter Zijlstra | 42a3ac7 | 2008-06-27 13:41:29 +0200 | [diff] [blame] | 1766 | 		unsigned long busiest_h_load = busiest_cfs_rq->h_load; | 
 | 1767 | 		unsigned long busiest_weight = busiest_cfs_rq->load.weight; | 
| Srivatsa Vaddagiri | 243e0e7 | 2008-06-27 13:41:36 +0200 | [diff] [blame] | 1768 | 		u64 rem_load, moved_load; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1769 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1770 | 		/* | 
 | 1771 | 		 * empty group | 
 | 1772 | 		 */ | 
| Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1773 | 		if (!busiest_cfs_rq->task_weight) | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1774 | 			continue; | 
 | 1775 |  | 
| Srivatsa Vaddagiri | 243e0e7 | 2008-06-27 13:41:36 +0200 | [diff] [blame] | 1776 | 		rem_load = (u64)rem_load_move * busiest_weight; | 
 | 1777 | 		rem_load = div_u64(rem_load, busiest_h_load + 1); | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1778 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1779 | 		moved_load = __load_balance_fair(this_rq, this_cpu, busiest, | 
| Srivatsa Vaddagiri | 53fecd8 | 2008-06-27 13:41:20 +0200 | [diff] [blame] | 1780 | 				rem_load, sd, idle, all_pinned, this_best_prio, | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1781 | 				tg->cfs_rq[busiest_cpu]); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1782 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1783 | 		if (!moved_load) | 
 | 1784 | 			continue; | 
 | 1785 |  | 
| Peter Zijlstra | 42a3ac7 | 2008-06-27 13:41:29 +0200 | [diff] [blame] | 1786 | 		moved_load *= busiest_h_load; | 
| Srivatsa Vaddagiri | 243e0e7 | 2008-06-27 13:41:36 +0200 | [diff] [blame] | 1787 | 		moved_load = div_u64(moved_load, busiest_weight + 1); | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1788 |  | 
 | 1789 | 		rem_load_move -= moved_load; | 
 | 1790 | 		if (rem_load_move < 0) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1791 | 			break; | 
 | 1792 | 	} | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1793 | 	rcu_read_unlock(); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1794 |  | 
| Peter Williams | 4301065 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 1795 | 	return max_load_move - rem_load_move; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1796 | } | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1797 | #else | 
 | 1798 | static unsigned long | 
 | 1799 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
 | 1800 | 		  unsigned long max_load_move, | 
 | 1801 | 		  struct sched_domain *sd, enum cpu_idle_type idle, | 
 | 1802 | 		  int *all_pinned, int *this_best_prio) | 
 | 1803 | { | 
 | 1804 | 	return __load_balance_fair(this_rq, this_cpu, busiest, | 
 | 1805 | 			max_load_move, sd, idle, all_pinned, | 
 | 1806 | 			this_best_prio, &busiest->cfs); | 
 | 1807 | } | 
 | 1808 | #endif | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1809 |  | 
| Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1810 | static int | 
 | 1811 | move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
 | 1812 | 		   struct sched_domain *sd, enum cpu_idle_type idle) | 
 | 1813 | { | 
 | 1814 | 	struct cfs_rq *busy_cfs_rq; | 
 | 1815 | 	struct rq_iterator cfs_rq_iterator; | 
 | 1816 |  | 
 | 1817 | 	cfs_rq_iterator.start = load_balance_start_fair; | 
 | 1818 | 	cfs_rq_iterator.next = load_balance_next_fair; | 
 | 1819 |  | 
 | 1820 | 	for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { | 
 | 1821 | 		/* | 
 | 1822 | 		 * pass busy_cfs_rq argument into | 
 | 1823 | 		 * load_balance_[start|next]_fair iterators | 
 | 1824 | 		 */ | 
 | 1825 | 		cfs_rq_iterator.arg = busy_cfs_rq; | 
 | 1826 | 		if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle, | 
 | 1827 | 				       &cfs_rq_iterator)) | 
 | 1828 | 		    return 1; | 
 | 1829 | 	} | 
 | 1830 |  | 
 | 1831 | 	return 0; | 
 | 1832 | } | 
| Dhaval Giani | 55e12e5 | 2008-06-24 23:39:43 +0530 | [diff] [blame] | 1833 | #endif /* CONFIG_SMP */ | 
| Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1834 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1835 | /* | 
 | 1836 |  * scheduler tick hitting a task of our scheduling class: | 
 | 1837 |  */ | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1838 | static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1839 | { | 
 | 1840 | 	struct cfs_rq *cfs_rq; | 
 | 1841 | 	struct sched_entity *se = &curr->se; | 
 | 1842 |  | 
 | 1843 | 	for_each_sched_entity(se) { | 
 | 1844 | 		cfs_rq = cfs_rq_of(se); | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1845 | 		entity_tick(cfs_rq, se, queued); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1846 | 	} | 
 | 1847 | } | 
 | 1848 |  | 
 | 1849 | /* | 
 | 1850 |  * Share the fairness runtime between parent and child, thus the | 
 | 1851 |  * total amount of pressure for CPU stays equal - new tasks | 
 | 1852 |  * get a chance to run but frequent forkers are not allowed to | 
 | 1853 |  * monopolize the CPU. Note: the parent runqueue is locked, | 
 | 1854 |  * the child is not running yet. | 
 | 1855 |  */ | 
| Ingo Molnar | ee0827d | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 1856 | static void task_new_fair(struct rq *rq, struct task_struct *p) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1857 | { | 
 | 1858 | 	struct cfs_rq *cfs_rq = task_cfs_rq(p); | 
| Ingo Molnar | 429d43b | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 1859 | 	struct sched_entity *se = &p->se, *curr = cfs_rq->curr; | 
| Ingo Molnar | 00bf7bf | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 1860 | 	int this_cpu = smp_processor_id(); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1861 |  | 
 | 1862 | 	sched_info_queued(p); | 
 | 1863 |  | 
| Ting Yang | 7109c44 | 2007-08-28 12:53:24 +0200 | [diff] [blame] | 1864 | 	update_curr(cfs_rq); | 
| Mike Galbraith | b5d9d73 | 2009-09-08 11:12:28 +0200 | [diff] [blame] | 1865 | 	if (curr) | 
 | 1866 | 		se->vruntime = curr->vruntime; | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1867 | 	place_entity(cfs_rq, se, 1); | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1868 |  | 
| Srivatsa Vaddagiri | 3c90e6e | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 1869 | 	/* 'curr' will be NULL if the child belongs to a different group */ | 
| Ingo Molnar | 00bf7bf | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 1870 | 	if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) && | 
| Fabio Checconi | 54fdc58 | 2009-07-16 12:32:27 +0200 | [diff] [blame] | 1871 | 			curr && entity_before(curr, se)) { | 
| Dmitry Adamushko | 87fefa3 | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1872 | 		/* | 
| Ingo Molnar | edcb60a | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1873 | 		 * Upon rescheduling, sched_class::put_prev_task() will place | 
 | 1874 | 		 * 'current' within the tree based on its new key value. | 
 | 1875 | 		 */ | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1876 | 		swap(curr->vruntime, se->vruntime); | 
| Bharata B Rao | aec0a51 | 2008-08-28 14:42:49 +0530 | [diff] [blame] | 1877 | 		resched_task(rq->curr); | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1878 | 	} | 
 | 1879 |  | 
| Srivatsa Vaddagiri | b9dca1e | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 1880 | 	enqueue_task_fair(rq, p, 0); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1881 | } | 
 | 1882 |  | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 1883 | /* | 
 | 1884 |  * Priority of the task has changed. Check to see if we preempt | 
 | 1885 |  * the current task. | 
 | 1886 |  */ | 
 | 1887 | static void prio_changed_fair(struct rq *rq, struct task_struct *p, | 
 | 1888 | 			      int oldprio, int running) | 
 | 1889 | { | 
 | 1890 | 	/* | 
 | 1891 | 	 * Reschedule if we are currently running on this runqueue and | 
 | 1892 | 	 * our priority decreased, or if we are not currently running on | 
 | 1893 | 	 * this runqueue and our priority is higher than the current's | 
 | 1894 | 	 */ | 
 | 1895 | 	if (running) { | 
 | 1896 | 		if (p->prio > oldprio) | 
 | 1897 | 			resched_task(rq->curr); | 
 | 1898 | 	} else | 
| Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 1899 | 		check_preempt_curr(rq, p, 0); | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 1900 | } | 
 | 1901 |  | 
 | 1902 | /* | 
 | 1903 |  * We switched to the sched_fair class. | 
 | 1904 |  */ | 
 | 1905 | static void switched_to_fair(struct rq *rq, struct task_struct *p, | 
 | 1906 | 			     int running) | 
 | 1907 | { | 
 | 1908 | 	/* | 
 | 1909 | 	 * We were most likely switched from sched_rt, so | 
 | 1910 | 	 * kick off the schedule if running, otherwise just see | 
 | 1911 | 	 * if we can still preempt the current task. | 
 | 1912 | 	 */ | 
 | 1913 | 	if (running) | 
 | 1914 | 		resched_task(rq->curr); | 
 | 1915 | 	else | 
| Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 1916 | 		check_preempt_curr(rq, p, 0); | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 1917 | } | 
 | 1918 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1919 | /* Account for a task changing its policy or group. | 
 | 1920 |  * | 
 | 1921 |  * This routine is mostly called to set cfs_rq->curr field when a task | 
 | 1922 |  * migrates between groups/classes. | 
 | 1923 |  */ | 
 | 1924 | static void set_curr_task_fair(struct rq *rq) | 
 | 1925 | { | 
 | 1926 | 	struct sched_entity *se = &rq->curr->se; | 
 | 1927 |  | 
 | 1928 | 	for_each_sched_entity(se) | 
 | 1929 | 		set_next_entity(cfs_rq_of(se), se); | 
 | 1930 | } | 
 | 1931 |  | 
| Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 1932 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
 | 1933 | static void moved_group_fair(struct task_struct *p) | 
 | 1934 | { | 
 | 1935 | 	struct cfs_rq *cfs_rq = task_cfs_rq(p); | 
 | 1936 |  | 
 | 1937 | 	update_curr(cfs_rq); | 
 | 1938 | 	place_entity(cfs_rq, &p->se, 1); | 
 | 1939 | } | 
 | 1940 | #endif | 
 | 1941 |  | 
| Peter Williams | 0d721ce | 2009-09-21 01:31:53 +0000 | [diff] [blame] | 1942 | unsigned int get_rr_interval_fair(struct task_struct *task) | 
 | 1943 | { | 
 | 1944 | 	struct sched_entity *se = &task->se; | 
 | 1945 | 	unsigned long flags; | 
 | 1946 | 	struct rq *rq; | 
 | 1947 | 	unsigned int rr_interval = 0; | 
 | 1948 |  | 
 | 1949 | 	/* | 
 | 1950 | 	 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise | 
 | 1951 | 	 * idle runqueue: | 
 | 1952 | 	 */ | 
 | 1953 | 	rq = task_rq_lock(task, &flags); | 
 | 1954 | 	if (rq->cfs.load.weight) | 
 | 1955 | 		rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se)); | 
 | 1956 | 	task_rq_unlock(rq, &flags); | 
 | 1957 |  | 
 | 1958 | 	return rr_interval; | 
 | 1959 | } | 
 | 1960 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1961 | /* | 
 | 1962 |  * All the scheduling class methods: | 
 | 1963 |  */ | 
| Ingo Molnar | 5522d5d | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 1964 | static const struct sched_class fair_sched_class = { | 
 | 1965 | 	.next			= &idle_sched_class, | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1966 | 	.enqueue_task		= enqueue_task_fair, | 
 | 1967 | 	.dequeue_task		= dequeue_task_fair, | 
 | 1968 | 	.yield_task		= yield_task_fair, | 
 | 1969 |  | 
| Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1970 | 	.check_preempt_curr	= check_preempt_wakeup, | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1971 |  | 
 | 1972 | 	.pick_next_task		= pick_next_task_fair, | 
 | 1973 | 	.put_prev_task		= put_prev_task_fair, | 
 | 1974 |  | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1975 | #ifdef CONFIG_SMP | 
| Li Zefan | 4ce72a2 | 2008-10-22 15:25:26 +0800 | [diff] [blame] | 1976 | 	.select_task_rq		= select_task_rq_fair, | 
 | 1977 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1978 | 	.load_balance		= load_balance_fair, | 
| Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1979 | 	.move_one_task		= move_one_task_fair, | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1980 | #endif | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1981 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1982 | 	.set_curr_task          = set_curr_task_fair, | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1983 | 	.task_tick		= task_tick_fair, | 
 | 1984 | 	.task_new		= task_new_fair, | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 1985 |  | 
 | 1986 | 	.prio_changed		= prio_changed_fair, | 
 | 1987 | 	.switched_to		= switched_to_fair, | 
| Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 1988 |  | 
| Peter Williams | 0d721ce | 2009-09-21 01:31:53 +0000 | [diff] [blame] | 1989 | 	.get_rr_interval	= get_rr_interval_fair, | 
 | 1990 |  | 
| Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 1991 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
 | 1992 | 	.moved_group		= moved_group_fair, | 
 | 1993 | #endif | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1994 | }; | 
 | 1995 |  | 
 | 1996 | #ifdef CONFIG_SCHED_DEBUG | 
| Ingo Molnar | 5cef9ec | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1997 | static void print_cfs_stats(struct seq_file *m, int cpu) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1998 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1999 | 	struct cfs_rq *cfs_rq; | 
 | 2000 |  | 
| Peter Zijlstra | 5973e5b | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 2001 | 	rcu_read_lock(); | 
| Ingo Molnar | c3b64f1 | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 2002 | 	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) | 
| Ingo Molnar | 5cef9ec | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 2003 | 		print_cfs_rq(m, cpu, cfs_rq); | 
| Peter Zijlstra | 5973e5b | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 2004 | 	rcu_read_unlock(); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2005 | } | 
 | 2006 | #endif |