| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1 | /* | 
 | 2 |  * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH) | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 
 | 5 |  * | 
 | 6 |  *  Interactivity improvements by Mike Galbraith | 
 | 7 |  *  (C) 2007 Mike Galbraith <efault@gmx.de> | 
 | 8 |  * | 
 | 9 |  *  Various enhancements by Dmitry Adamushko. | 
 | 10 |  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com> | 
 | 11 |  * | 
 | 12 |  *  Group scheduling enhancements by Srivatsa Vaddagiri | 
 | 13 |  *  Copyright IBM Corporation, 2007 | 
 | 14 |  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> | 
 | 15 |  * | 
 | 16 |  *  Scaled math optimizations by Thomas Gleixner | 
 | 17 |  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 18 |  * | 
 | 19 |  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra | 
 | 20 |  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 21 |  */ | 
 | 22 |  | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 23 | #include <linux/latencytop.h> | 
 | 24 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 25 | /* | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 26 |  * Targeted preemption latency for CPU-bound tasks: | 
| Zou Nan hai | 722aab0 | 2007-11-26 21:21:49 +0100 | [diff] [blame] | 27 |  * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 28 |  * | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 29 |  * NOTE: this latency value is not the same as the concept of | 
| Ingo Molnar | d274a4c | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 30 |  * 'timeslice length' - timeslices in CFS are of variable length | 
 | 31 |  * and have no persistent notion like in traditional, time-slice | 
 | 32 |  * based scheduling concepts. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 33 |  * | 
| Ingo Molnar | d274a4c | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 34 |  * (to see the precise effective timeslice length of your workload, | 
 | 35 |  *  run vmstat and monitor the context-switches (cs) field) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 36 |  */ | 
| Ingo Molnar | 19978ca | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 37 | unsigned int sysctl_sched_latency = 20000000ULL; | 
| Ingo Molnar | 2bd8e6d | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 38 |  | 
 | 39 | /* | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 40 |  * Minimal preemption granularity for CPU-bound tasks: | 
| Zou Nan hai | 722aab0 | 2007-11-26 21:21:49 +0100 | [diff] [blame] | 41 |  * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds) | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 42 |  */ | 
| Zou Nan hai | 722aab0 | 2007-11-26 21:21:49 +0100 | [diff] [blame] | 43 | unsigned int sysctl_sched_min_granularity = 4000000ULL; | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 44 |  | 
 | 45 | /* | 
 | 46 |  * is kept at sysctl_sched_latency / sysctl_sched_min_granularity | 
 | 47 |  */ | 
| Zou Nan hai | 722aab0 | 2007-11-26 21:21:49 +0100 | [diff] [blame] | 48 | static unsigned int sched_nr_latency = 5; | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 49 |  | 
 | 50 | /* | 
| Ingo Molnar | 2bd8e6d | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 51 |  * After fork, child runs first. (default) If set to 0 then | 
 | 52 |  * parent will (try to) run first. | 
 | 53 |  */ | 
 | 54 | const_debug unsigned int sysctl_sched_child_runs_first = 1; | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 55 |  | 
 | 56 | /* | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 57 |  * sys_sched_yield() compat mode | 
 | 58 |  * | 
 | 59 |  * This option switches the agressive yield implementation of the | 
 | 60 |  * old scheduler back on. | 
 | 61 |  */ | 
 | 62 | unsigned int __read_mostly sysctl_sched_compat_yield; | 
 | 63 |  | 
 | 64 | /* | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 65 |  * SCHED_OTHER wake-up granularity. | 
| Peter Zijlstra | 103638d9 | 2008-06-27 13:41:16 +0200 | [diff] [blame] | 66 |  * (default: 5 msec * (1 + ilog(ncpus)), units: nanoseconds) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 67 |  * | 
 | 68 |  * This option delays the preemption effects of decoupled workloads | 
 | 69 |  * and reduces their over-scheduling. Synchronous workloads will still | 
 | 70 |  * have immediate wakeup/sleep latencies. | 
 | 71 |  */ | 
| Peter Zijlstra | 103638d9 | 2008-06-27 13:41:16 +0200 | [diff] [blame] | 72 | unsigned int sysctl_sched_wakeup_granularity = 5000000UL; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 73 |  | 
| Ingo Molnar | da84d96 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 74 | const_debug unsigned int sysctl_sched_migration_cost = 500000UL; | 
 | 75 |  | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 76 | static const struct sched_class fair_sched_class; | 
 | 77 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 78 | /************************************************************** | 
 | 79 |  * CFS operations on generic schedulable entities: | 
 | 80 |  */ | 
 | 81 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 82 | static inline struct task_struct *task_of(struct sched_entity *se) | 
 | 83 | { | 
 | 84 | 	return container_of(se, struct task_struct, se); | 
 | 85 | } | 
 | 86 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 87 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
 | 88 |  | 
 | 89 | /* cpu runqueue to which this cfs_rq is attached */ | 
 | 90 | static inline struct rq *rq_of(struct cfs_rq *cfs_rq) | 
 | 91 | { | 
 | 92 | 	return cfs_rq->rq; | 
 | 93 | } | 
 | 94 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 95 | /* An entity is a task if it doesn't "own" a runqueue */ | 
 | 96 | #define entity_is_task(se)	(!se->my_q) | 
 | 97 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 98 | /* Walk up scheduling entities hierarchy */ | 
 | 99 | #define for_each_sched_entity(se) \ | 
 | 100 | 		for (; se; se = se->parent) | 
 | 101 |  | 
 | 102 | static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) | 
 | 103 | { | 
 | 104 | 	return p->se.cfs_rq; | 
 | 105 | } | 
 | 106 |  | 
 | 107 | /* runqueue on which this entity is (to be) queued */ | 
 | 108 | static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) | 
 | 109 | { | 
 | 110 | 	return se->cfs_rq; | 
 | 111 | } | 
 | 112 |  | 
 | 113 | /* runqueue "owned" by this group */ | 
 | 114 | static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) | 
 | 115 | { | 
 | 116 | 	return grp->my_q; | 
 | 117 | } | 
 | 118 |  | 
 | 119 | /* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on | 
 | 120 |  * another cpu ('this_cpu') | 
 | 121 |  */ | 
 | 122 | static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) | 
 | 123 | { | 
 | 124 | 	return cfs_rq->tg->cfs_rq[this_cpu]; | 
 | 125 | } | 
 | 126 |  | 
 | 127 | /* Iterate thr' all leaf cfs_rq's on a runqueue */ | 
 | 128 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ | 
 | 129 | 	list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) | 
 | 130 |  | 
 | 131 | /* Do the two (enqueued) entities belong to the same group ? */ | 
 | 132 | static inline int | 
 | 133 | is_same_group(struct sched_entity *se, struct sched_entity *pse) | 
 | 134 | { | 
 | 135 | 	if (se->cfs_rq == pse->cfs_rq) | 
 | 136 | 		return 1; | 
 | 137 |  | 
 | 138 | 	return 0; | 
 | 139 | } | 
 | 140 |  | 
 | 141 | static inline struct sched_entity *parent_entity(struct sched_entity *se) | 
 | 142 | { | 
 | 143 | 	return se->parent; | 
 | 144 | } | 
 | 145 |  | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 146 | /* return depth at which a sched entity is present in the hierarchy */ | 
 | 147 | static inline int depth_se(struct sched_entity *se) | 
 | 148 | { | 
 | 149 | 	int depth = 0; | 
 | 150 |  | 
 | 151 | 	for_each_sched_entity(se) | 
 | 152 | 		depth++; | 
 | 153 |  | 
 | 154 | 	return depth; | 
 | 155 | } | 
 | 156 |  | 
 | 157 | static void | 
 | 158 | find_matching_se(struct sched_entity **se, struct sched_entity **pse) | 
 | 159 | { | 
 | 160 | 	int se_depth, pse_depth; | 
 | 161 |  | 
 | 162 | 	/* | 
 | 163 | 	 * preemption test can be made between sibling entities who are in the | 
 | 164 | 	 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of | 
 | 165 | 	 * both tasks until we find their ancestors who are siblings of common | 
 | 166 | 	 * parent. | 
 | 167 | 	 */ | 
 | 168 |  | 
 | 169 | 	/* First walk up until both entities are at same depth */ | 
 | 170 | 	se_depth = depth_se(*se); | 
 | 171 | 	pse_depth = depth_se(*pse); | 
 | 172 |  | 
 | 173 | 	while (se_depth > pse_depth) { | 
 | 174 | 		se_depth--; | 
 | 175 | 		*se = parent_entity(*se); | 
 | 176 | 	} | 
 | 177 |  | 
 | 178 | 	while (pse_depth > se_depth) { | 
 | 179 | 		pse_depth--; | 
 | 180 | 		*pse = parent_entity(*pse); | 
 | 181 | 	} | 
 | 182 |  | 
 | 183 | 	while (!is_same_group(*se, *pse)) { | 
 | 184 | 		*se = parent_entity(*se); | 
 | 185 | 		*pse = parent_entity(*pse); | 
 | 186 | 	} | 
 | 187 | } | 
 | 188 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 189 | #else	/* CONFIG_FAIR_GROUP_SCHED */ | 
 | 190 |  | 
 | 191 | static inline struct rq *rq_of(struct cfs_rq *cfs_rq) | 
 | 192 | { | 
 | 193 | 	return container_of(cfs_rq, struct rq, cfs); | 
 | 194 | } | 
 | 195 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 196 | #define entity_is_task(se)	1 | 
 | 197 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 198 | #define for_each_sched_entity(se) \ | 
 | 199 | 		for (; se; se = NULL) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 200 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 201 | static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 202 | { | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 203 | 	return &task_rq(p)->cfs; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 204 | } | 
 | 205 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 206 | static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) | 
 | 207 | { | 
 | 208 | 	struct task_struct *p = task_of(se); | 
 | 209 | 	struct rq *rq = task_rq(p); | 
 | 210 |  | 
 | 211 | 	return &rq->cfs; | 
 | 212 | } | 
 | 213 |  | 
 | 214 | /* runqueue "owned" by this group */ | 
 | 215 | static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) | 
 | 216 | { | 
 | 217 | 	return NULL; | 
 | 218 | } | 
 | 219 |  | 
 | 220 | static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) | 
 | 221 | { | 
 | 222 | 	return &cpu_rq(this_cpu)->cfs; | 
 | 223 | } | 
 | 224 |  | 
 | 225 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ | 
 | 226 | 		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) | 
 | 227 |  | 
 | 228 | static inline int | 
 | 229 | is_same_group(struct sched_entity *se, struct sched_entity *pse) | 
 | 230 | { | 
 | 231 | 	return 1; | 
 | 232 | } | 
 | 233 |  | 
 | 234 | static inline struct sched_entity *parent_entity(struct sched_entity *se) | 
 | 235 | { | 
 | 236 | 	return NULL; | 
 | 237 | } | 
 | 238 |  | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 239 | static inline void | 
 | 240 | find_matching_se(struct sched_entity **se, struct sched_entity **pse) | 
 | 241 | { | 
 | 242 | } | 
 | 243 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 244 | #endif	/* CONFIG_FAIR_GROUP_SCHED */ | 
 | 245 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 246 |  | 
 | 247 | /************************************************************** | 
 | 248 |  * Scheduling class tree data structure manipulation methods: | 
 | 249 |  */ | 
 | 250 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 251 | static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime) | 
| Peter Zijlstra | 02e0431 | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 252 | { | 
| Peter Zijlstra | 368059a | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 253 | 	s64 delta = (s64)(vruntime - min_vruntime); | 
 | 254 | 	if (delta > 0) | 
| Peter Zijlstra | 02e0431 | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 255 | 		min_vruntime = vruntime; | 
 | 256 |  | 
 | 257 | 	return min_vruntime; | 
 | 258 | } | 
 | 259 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 260 | static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) | 
| Peter Zijlstra | b0ffd24 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 261 | { | 
 | 262 | 	s64 delta = (s64)(vruntime - min_vruntime); | 
 | 263 | 	if (delta < 0) | 
 | 264 | 		min_vruntime = vruntime; | 
 | 265 |  | 
 | 266 | 	return min_vruntime; | 
 | 267 | } | 
 | 268 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 269 | static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Peter Zijlstra | 9014623 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 270 | { | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 271 | 	return se->vruntime - cfs_rq->min_vruntime; | 
| Peter Zijlstra | 9014623 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 272 | } | 
 | 273 |  | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 274 | static void update_min_vruntime(struct cfs_rq *cfs_rq) | 
 | 275 | { | 
 | 276 | 	u64 vruntime = cfs_rq->min_vruntime; | 
 | 277 |  | 
 | 278 | 	if (cfs_rq->curr) | 
 | 279 | 		vruntime = cfs_rq->curr->vruntime; | 
 | 280 |  | 
 | 281 | 	if (cfs_rq->rb_leftmost) { | 
 | 282 | 		struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost, | 
 | 283 | 						   struct sched_entity, | 
 | 284 | 						   run_node); | 
 | 285 |  | 
| Peter Zijlstra | e17036d | 2009-01-15 14:53:39 +0100 | [diff] [blame] | 286 | 		if (!cfs_rq->curr) | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 287 | 			vruntime = se->vruntime; | 
 | 288 | 		else | 
 | 289 | 			vruntime = min_vruntime(vruntime, se->vruntime); | 
 | 290 | 	} | 
 | 291 |  | 
 | 292 | 	cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); | 
 | 293 | } | 
 | 294 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 295 | /* | 
 | 296 |  * Enqueue an entity into the rb-tree: | 
 | 297 |  */ | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 298 | static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 299 | { | 
 | 300 | 	struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; | 
 | 301 | 	struct rb_node *parent = NULL; | 
 | 302 | 	struct sched_entity *entry; | 
| Peter Zijlstra | 9014623 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 303 | 	s64 key = entity_key(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 304 | 	int leftmost = 1; | 
 | 305 |  | 
 | 306 | 	/* | 
 | 307 | 	 * Find the right place in the rbtree: | 
 | 308 | 	 */ | 
 | 309 | 	while (*link) { | 
 | 310 | 		parent = *link; | 
 | 311 | 		entry = rb_entry(parent, struct sched_entity, run_node); | 
 | 312 | 		/* | 
 | 313 | 		 * We dont care about collisions. Nodes with | 
 | 314 | 		 * the same key stay together. | 
 | 315 | 		 */ | 
| Peter Zijlstra | 9014623 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 316 | 		if (key < entity_key(cfs_rq, entry)) { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 317 | 			link = &parent->rb_left; | 
 | 318 | 		} else { | 
 | 319 | 			link = &parent->rb_right; | 
 | 320 | 			leftmost = 0; | 
 | 321 | 		} | 
 | 322 | 	} | 
 | 323 |  | 
 | 324 | 	/* | 
 | 325 | 	 * Maintain a cache of leftmost tree entries (it is frequently | 
 | 326 | 	 * used): | 
 | 327 | 	 */ | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 328 | 	if (leftmost) | 
| Ingo Molnar | 57cb499 | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 329 | 		cfs_rq->rb_leftmost = &se->run_node; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 330 |  | 
 | 331 | 	rb_link_node(&se->run_node, parent, link); | 
 | 332 | 	rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 333 | } | 
 | 334 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 335 | static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 336 | { | 
| Peter Zijlstra | 3fe6974 | 2008-03-14 20:55:51 +0100 | [diff] [blame] | 337 | 	if (cfs_rq->rb_leftmost == &se->run_node) { | 
 | 338 | 		struct rb_node *next_node; | 
| Peter Zijlstra | 3fe6974 | 2008-03-14 20:55:51 +0100 | [diff] [blame] | 339 |  | 
 | 340 | 		next_node = rb_next(&se->run_node); | 
 | 341 | 		cfs_rq->rb_leftmost = next_node; | 
| Peter Zijlstra | 3fe6974 | 2008-03-14 20:55:51 +0100 | [diff] [blame] | 342 | 	} | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 343 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 344 | 	rb_erase(&se->run_node, &cfs_rq->tasks_timeline); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 345 | } | 
 | 346 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 347 | static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq) | 
 | 348 | { | 
| Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 349 | 	struct rb_node *left = cfs_rq->rb_leftmost; | 
 | 350 |  | 
 | 351 | 	if (!left) | 
 | 352 | 		return NULL; | 
 | 353 |  | 
 | 354 | 	return rb_entry(left, struct sched_entity, run_node); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 355 | } | 
 | 356 |  | 
| Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 357 | static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 358 | { | 
| Ingo Molnar | 7eee3e6 | 2008-02-22 10:32:21 +0100 | [diff] [blame] | 359 | 	struct rb_node *last = rb_last(&cfs_rq->tasks_timeline); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 360 |  | 
| Balbir Singh | 70eee74 | 2008-02-22 13:25:53 +0530 | [diff] [blame] | 361 | 	if (!last) | 
 | 362 | 		return NULL; | 
| Ingo Molnar | 7eee3e6 | 2008-02-22 10:32:21 +0100 | [diff] [blame] | 363 |  | 
 | 364 | 	return rb_entry(last, struct sched_entity, run_node); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 365 | } | 
 | 366 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 367 | /************************************************************** | 
 | 368 |  * Scheduling class statistics methods: | 
 | 369 |  */ | 
 | 370 |  | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 371 | #ifdef CONFIG_SCHED_DEBUG | 
 | 372 | int sched_nr_latency_handler(struct ctl_table *table, int write, | 
 | 373 | 		struct file *filp, void __user *buffer, size_t *lenp, | 
 | 374 | 		loff_t *ppos) | 
 | 375 | { | 
 | 376 | 	int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); | 
 | 377 |  | 
 | 378 | 	if (ret || !write) | 
 | 379 | 		return ret; | 
 | 380 |  | 
 | 381 | 	sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, | 
 | 382 | 					sysctl_sched_min_granularity); | 
 | 383 |  | 
 | 384 | 	return 0; | 
 | 385 | } | 
 | 386 | #endif | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 387 |  | 
 | 388 | /* | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 389 |  * delta /= w | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 390 |  */ | 
 | 391 | static inline unsigned long | 
 | 392 | calc_delta_fair(unsigned long delta, struct sched_entity *se) | 
 | 393 | { | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 394 | 	if (unlikely(se->load.weight != NICE_0_LOAD)) | 
 | 395 | 		delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load); | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 396 |  | 
 | 397 | 	return delta; | 
 | 398 | } | 
 | 399 |  | 
 | 400 | /* | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 401 |  * The idea is to set a period in which each task runs once. | 
 | 402 |  * | 
 | 403 |  * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch | 
 | 404 |  * this period because otherwise the slices get too small. | 
 | 405 |  * | 
 | 406 |  * p = (nr <= nl) ? l : l*nr/nl | 
 | 407 |  */ | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 408 | static u64 __sched_period(unsigned long nr_running) | 
 | 409 | { | 
 | 410 | 	u64 period = sysctl_sched_latency; | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 411 | 	unsigned long nr_latency = sched_nr_latency; | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 412 |  | 
 | 413 | 	if (unlikely(nr_running > nr_latency)) { | 
| Peter Zijlstra | 4bf0b77 | 2008-01-25 21:08:21 +0100 | [diff] [blame] | 414 | 		period = sysctl_sched_min_granularity; | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 415 | 		period *= nr_running; | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 416 | 	} | 
 | 417 |  | 
 | 418 | 	return period; | 
 | 419 | } | 
 | 420 |  | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 421 | /* | 
 | 422 |  * We calculate the wall-time slice from the period by taking a part | 
 | 423 |  * proportional to the weight. | 
 | 424 |  * | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 425 |  * s = p*P[w/rw] | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 426 |  */ | 
| Peter Zijlstra | 6d0f0eb | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 427 | static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 428 | { | 
| Mike Galbraith | 0a58244 | 2009-01-02 12:16:42 +0100 | [diff] [blame] | 429 | 	u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 430 |  | 
| Mike Galbraith | 0a58244 | 2009-01-02 12:16:42 +0100 | [diff] [blame] | 431 | 	for_each_sched_entity(se) { | 
| Lin Ming | 6272d68 | 2009-01-15 17:17:15 +0100 | [diff] [blame] | 432 | 		struct load_weight *load; | 
 | 433 |  | 
 | 434 | 		cfs_rq = cfs_rq_of(se); | 
 | 435 | 		load = &cfs_rq->load; | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 436 |  | 
| Mike Galbraith | 0a58244 | 2009-01-02 12:16:42 +0100 | [diff] [blame] | 437 | 		if (unlikely(!se->on_rq)) { | 
 | 438 | 			struct load_weight lw = cfs_rq->load; | 
 | 439 |  | 
 | 440 | 			update_load_add(&lw, se->load.weight); | 
 | 441 | 			load = &lw; | 
 | 442 | 		} | 
 | 443 | 		slice = calc_delta_mine(slice, se->load.weight, load); | 
 | 444 | 	} | 
 | 445 | 	return slice; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 446 | } | 
 | 447 |  | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 448 | /* | 
| Peter Zijlstra | ac884de | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 449 |  * We calculate the vruntime slice of a to be inserted task | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 450 |  * | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 451 |  * vs = s/w | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 452 |  */ | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 453 | static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 454 | { | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 455 | 	return calc_delta_fair(sched_slice(cfs_rq, se), se); | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 456 | } | 
 | 457 |  | 
 | 458 | /* | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 459 |  * Update the current task's runtime statistics. Skip current tasks that | 
 | 460 |  * are not in our scheduling class. | 
 | 461 |  */ | 
 | 462 | static inline void | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 463 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | 
 | 464 | 	      unsigned long delta_exec) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 465 | { | 
| Ingo Molnar | bbdba7c | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 466 | 	unsigned long delta_exec_weighted; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 467 |  | 
| Ingo Molnar | 8179ca2 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 468 | 	schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 469 |  | 
 | 470 | 	curr->sum_exec_runtime += delta_exec; | 
| Ingo Molnar | 7a62eab | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 471 | 	schedstat_add(cfs_rq, exec_clock, delta_exec); | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 472 | 	delta_exec_weighted = calc_delta_fair(delta_exec, curr); | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 473 | 	curr->vruntime += delta_exec_weighted; | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 474 | 	update_min_vruntime(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 475 | } | 
 | 476 |  | 
| Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 477 | static void update_curr(struct cfs_rq *cfs_rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 478 | { | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 479 | 	struct sched_entity *curr = cfs_rq->curr; | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 480 | 	u64 now = rq_of(cfs_rq)->clock; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 481 | 	unsigned long delta_exec; | 
 | 482 |  | 
 | 483 | 	if (unlikely(!curr)) | 
 | 484 | 		return; | 
 | 485 |  | 
 | 486 | 	/* | 
 | 487 | 	 * Get the amount of time the current task was running | 
 | 488 | 	 * since the last time we changed load (this cannot | 
 | 489 | 	 * overflow on 32 bits): | 
 | 490 | 	 */ | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 491 | 	delta_exec = (unsigned long)(now - curr->exec_start); | 
| Peter Zijlstra | 34f28ec | 2008-12-16 08:45:31 +0100 | [diff] [blame] | 492 | 	if (!delta_exec) | 
 | 493 | 		return; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 494 |  | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 495 | 	__update_curr(cfs_rq, curr, delta_exec); | 
 | 496 | 	curr->exec_start = now; | 
| Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 497 |  | 
 | 498 | 	if (entity_is_task(curr)) { | 
 | 499 | 		struct task_struct *curtask = task_of(curr); | 
 | 500 |  | 
 | 501 | 		cpuacct_charge(curtask, delta_exec); | 
| Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 502 | 		account_group_exec_runtime(curtask, delta_exec); | 
| Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 503 | 	} | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 504 | } | 
 | 505 |  | 
 | 506 | static inline void | 
| Ingo Molnar | 5870db5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 507 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 508 | { | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 509 | 	schedstat_set(se->wait_start, rq_of(cfs_rq)->clock); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 510 | } | 
 | 511 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 512 | /* | 
 | 513 |  * Task is being enqueued - update stats: | 
 | 514 |  */ | 
| Ingo Molnar | d2417e5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 515 | static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 516 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 517 | 	/* | 
 | 518 | 	 * Are we enqueueing a waiting task? (for current tasks | 
 | 519 | 	 * a dequeue/enqueue event is a NOP) | 
 | 520 | 	 */ | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 521 | 	if (se != cfs_rq->curr) | 
| Ingo Molnar | 5870db5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 522 | 		update_stats_wait_start(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 523 | } | 
 | 524 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 525 | static void | 
| Ingo Molnar | 9ef0a96 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 526 | update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 527 | { | 
| Ingo Molnar | bbdba7c | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 528 | 	schedstat_set(se->wait_max, max(se->wait_max, | 
 | 529 | 			rq_of(cfs_rq)->clock - se->wait_start)); | 
| Arjan van de Ven | 6d08259 | 2008-01-25 21:08:35 +0100 | [diff] [blame] | 530 | 	schedstat_set(se->wait_count, se->wait_count + 1); | 
 | 531 | 	schedstat_set(se->wait_sum, se->wait_sum + | 
 | 532 | 			rq_of(cfs_rq)->clock - se->wait_start); | 
| Ingo Molnar | 6cfb0d5 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 533 | 	schedstat_set(se->wait_start, 0); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 534 | } | 
 | 535 |  | 
 | 536 | static inline void | 
| Ingo Molnar | 19b6a2e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 537 | update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 538 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 539 | 	/* | 
 | 540 | 	 * Mark the end of the wait period if dequeueing a | 
 | 541 | 	 * waiting task: | 
 | 542 | 	 */ | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 543 | 	if (se != cfs_rq->curr) | 
| Ingo Molnar | 9ef0a96 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 544 | 		update_stats_wait_end(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 545 | } | 
 | 546 |  | 
 | 547 | /* | 
 | 548 |  * We are picking a new current task - update its stats: | 
 | 549 |  */ | 
 | 550 | static inline void | 
| Ingo Molnar | 79303e9 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 551 | update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 552 | { | 
 | 553 | 	/* | 
 | 554 | 	 * We are starting a new run period: | 
 | 555 | 	 */ | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 556 | 	se->exec_start = rq_of(cfs_rq)->clock; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 557 | } | 
 | 558 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 559 | /************************************************** | 
 | 560 |  * Scheduling class queueing methods: | 
 | 561 |  */ | 
 | 562 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 563 | #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED | 
 | 564 | static void | 
 | 565 | add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight) | 
 | 566 | { | 
 | 567 | 	cfs_rq->task_weight += weight; | 
 | 568 | } | 
 | 569 | #else | 
 | 570 | static inline void | 
 | 571 | add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight) | 
 | 572 | { | 
 | 573 | } | 
 | 574 | #endif | 
 | 575 |  | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 576 | static void | 
 | 577 | account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
 | 578 | { | 
 | 579 | 	update_load_add(&cfs_rq->load, se->load.weight); | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 580 | 	if (!parent_entity(se)) | 
 | 581 | 		inc_cpu_load(rq_of(cfs_rq), se->load.weight); | 
| Bharata B Rao | b87f172 | 2008-09-25 09:53:54 +0530 | [diff] [blame] | 582 | 	if (entity_is_task(se)) { | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 583 | 		add_cfs_task_weight(cfs_rq, se->load.weight); | 
| Bharata B Rao | b87f172 | 2008-09-25 09:53:54 +0530 | [diff] [blame] | 584 | 		list_add(&se->group_node, &cfs_rq->tasks); | 
 | 585 | 	} | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 586 | 	cfs_rq->nr_running++; | 
 | 587 | 	se->on_rq = 1; | 
 | 588 | } | 
 | 589 |  | 
 | 590 | static void | 
 | 591 | account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
 | 592 | { | 
 | 593 | 	update_load_sub(&cfs_rq->load, se->load.weight); | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 594 | 	if (!parent_entity(se)) | 
 | 595 | 		dec_cpu_load(rq_of(cfs_rq), se->load.weight); | 
| Bharata B Rao | b87f172 | 2008-09-25 09:53:54 +0530 | [diff] [blame] | 596 | 	if (entity_is_task(se)) { | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 597 | 		add_cfs_task_weight(cfs_rq, -se->load.weight); | 
| Bharata B Rao | b87f172 | 2008-09-25 09:53:54 +0530 | [diff] [blame] | 598 | 		list_del_init(&se->group_node); | 
 | 599 | 	} | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 600 | 	cfs_rq->nr_running--; | 
 | 601 | 	se->on_rq = 0; | 
 | 602 | } | 
 | 603 |  | 
| Ingo Molnar | 2396af6 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 604 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 605 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 606 | #ifdef CONFIG_SCHEDSTATS | 
 | 607 | 	if (se->sleep_start) { | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 608 | 		u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 609 | 		struct task_struct *tsk = task_of(se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 610 |  | 
 | 611 | 		if ((s64)delta < 0) | 
 | 612 | 			delta = 0; | 
 | 613 |  | 
 | 614 | 		if (unlikely(delta > se->sleep_max)) | 
 | 615 | 			se->sleep_max = delta; | 
 | 616 |  | 
 | 617 | 		se->sleep_start = 0; | 
 | 618 | 		se->sum_sleep_runtime += delta; | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 619 |  | 
 | 620 | 		account_scheduler_latency(tsk, delta >> 10, 1); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 621 | 	} | 
 | 622 | 	if (se->block_start) { | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 623 | 		u64 delta = rq_of(cfs_rq)->clock - se->block_start; | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 624 | 		struct task_struct *tsk = task_of(se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 625 |  | 
 | 626 | 		if ((s64)delta < 0) | 
 | 627 | 			delta = 0; | 
 | 628 |  | 
 | 629 | 		if (unlikely(delta > se->block_max)) | 
 | 630 | 			se->block_max = delta; | 
 | 631 |  | 
 | 632 | 		se->block_start = 0; | 
 | 633 | 		se->sum_sleep_runtime += delta; | 
| Ingo Molnar | 30084fb | 2007-10-02 14:13:08 +0200 | [diff] [blame] | 634 |  | 
 | 635 | 		/* | 
 | 636 | 		 * Blocking time is in units of nanosecs, so shift by 20 to | 
 | 637 | 		 * get a milliseconds-range estimation of the amount of | 
 | 638 | 		 * time that the task spent sleeping: | 
 | 639 | 		 */ | 
 | 640 | 		if (unlikely(prof_on == SLEEP_PROFILING)) { | 
| Ingo Molnar | e22f5bb | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 641 |  | 
| Ingo Molnar | 30084fb | 2007-10-02 14:13:08 +0200 | [diff] [blame] | 642 | 			profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), | 
 | 643 | 				     delta >> 20); | 
 | 644 | 		} | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 645 | 		account_scheduler_latency(tsk, delta >> 10, 0); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 646 | 	} | 
 | 647 | #endif | 
 | 648 | } | 
 | 649 |  | 
| Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 650 | static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
 | 651 | { | 
 | 652 | #ifdef CONFIG_SCHED_DEBUG | 
 | 653 | 	s64 d = se->vruntime - cfs_rq->min_vruntime; | 
 | 654 |  | 
 | 655 | 	if (d < 0) | 
 | 656 | 		d = -d; | 
 | 657 |  | 
 | 658 | 	if (d > 3*sysctl_sched_latency) | 
 | 659 | 		schedstat_inc(cfs_rq, nr_spread_over); | 
 | 660 | #endif | 
 | 661 | } | 
 | 662 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 663 | static void | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 664 | place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | 
 | 665 | { | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 666 | 	u64 vruntime = cfs_rq->min_vruntime; | 
| Peter Zijlstra | 94dfb5e | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 667 |  | 
| Peter Zijlstra | 2cb8600 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 668 | 	/* | 
 | 669 | 	 * The 'current' period is already promised to the current tasks, | 
 | 670 | 	 * however the extra weight of the new task will slow them down a | 
 | 671 | 	 * little, place the new task so that it fits in the slot that | 
 | 672 | 	 * stays open at the end. | 
 | 673 | 	 */ | 
| Peter Zijlstra | 94dfb5e | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 674 | 	if (initial && sched_feat(START_DEBIT)) | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 675 | 		vruntime += sched_vslice(cfs_rq, se); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 676 |  | 
| Ingo Molnar | 8465e79 | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 677 | 	if (!initial) { | 
| Peter Zijlstra | 2cb8600 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 678 | 		/* sleeps upto a single latency don't count. */ | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 679 | 		if (sched_feat(NEW_FAIR_SLEEPERS)) { | 
 | 680 | 			unsigned long thresh = sysctl_sched_latency; | 
 | 681 |  | 
 | 682 | 			/* | 
| Peter Zijlstra | 6bc912b | 2009-01-15 14:53:38 +0100 | [diff] [blame] | 683 | 			 * Convert the sleeper threshold into virtual time. | 
 | 684 | 			 * SCHED_IDLE is a special sub-class.  We care about | 
 | 685 | 			 * fairness only relative to other SCHED_IDLE tasks, | 
 | 686 | 			 * all of which have the same weight. | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 687 | 			 */ | 
| Peter Zijlstra | 6bc912b | 2009-01-15 14:53:38 +0100 | [diff] [blame] | 688 | 			if (sched_feat(NORMALIZED_SLEEPER) && | 
 | 689 | 					task_of(se)->policy != SCHED_IDLE) | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 690 | 				thresh = calc_delta_fair(thresh, se); | 
 | 691 |  | 
 | 692 | 			vruntime -= thresh; | 
 | 693 | 		} | 
| Ingo Molnar | 94359f0 | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 694 |  | 
| Peter Zijlstra | 2cb8600 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 695 | 		/* ensure we never gain time by being placed backwards. */ | 
 | 696 | 		vruntime = max_vruntime(se->vruntime, vruntime); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 697 | 	} | 
 | 698 |  | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 699 | 	se->vruntime = vruntime; | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 700 | } | 
 | 701 |  | 
 | 702 | static void | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 703 | enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 704 | { | 
 | 705 | 	/* | 
| Dmitry Adamushko | a2a2d68 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 706 | 	 * Update run-time statistics of the 'current'. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 707 | 	 */ | 
| Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 708 | 	update_curr(cfs_rq); | 
| Peter Zijlstra | a992241 | 2008-05-05 23:56:17 +0200 | [diff] [blame] | 709 | 	account_entity_enqueue(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 710 |  | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 711 | 	if (wakeup) { | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 712 | 		place_entity(cfs_rq, se, 0); | 
| Ingo Molnar | 2396af6 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 713 | 		enqueue_sleeper(cfs_rq, se); | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 714 | 	} | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 715 |  | 
| Ingo Molnar | d2417e5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 716 | 	update_stats_enqueue(cfs_rq, se); | 
| Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 717 | 	check_spread(cfs_rq, se); | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 718 | 	if (se != cfs_rq->curr) | 
 | 719 | 		__enqueue_entity(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 720 | } | 
 | 721 |  | 
| Peter Zijlstra | 2002c69 | 2008-11-11 11:52:33 +0100 | [diff] [blame] | 722 | static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
 | 723 | { | 
 | 724 | 	if (cfs_rq->last == se) | 
 | 725 | 		cfs_rq->last = NULL; | 
 | 726 |  | 
 | 727 | 	if (cfs_rq->next == se) | 
 | 728 | 		cfs_rq->next = NULL; | 
 | 729 | } | 
 | 730 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 731 | static void | 
| Ingo Molnar | 525c271 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 732 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 733 | { | 
| Dmitry Adamushko | a2a2d68 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 734 | 	/* | 
 | 735 | 	 * Update run-time statistics of the 'current'. | 
 | 736 | 	 */ | 
 | 737 | 	update_curr(cfs_rq); | 
 | 738 |  | 
| Ingo Molnar | 19b6a2e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 739 | 	update_stats_dequeue(cfs_rq, se); | 
| Dmitry Adamushko | db36cc7 | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 740 | 	if (sleep) { | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 741 | #ifdef CONFIG_SCHEDSTATS | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 742 | 		if (entity_is_task(se)) { | 
 | 743 | 			struct task_struct *tsk = task_of(se); | 
 | 744 |  | 
 | 745 | 			if (tsk->state & TASK_INTERRUPTIBLE) | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 746 | 				se->sleep_start = rq_of(cfs_rq)->clock; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 747 | 			if (tsk->state & TASK_UNINTERRUPTIBLE) | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 748 | 				se->block_start = rq_of(cfs_rq)->clock; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 749 | 		} | 
| Dmitry Adamushko | db36cc7 | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 750 | #endif | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 751 | 	} | 
 | 752 |  | 
| Peter Zijlstra | 2002c69 | 2008-11-11 11:52:33 +0100 | [diff] [blame] | 753 | 	clear_buddies(cfs_rq, se); | 
| Peter Zijlstra | 4793241 | 2008-11-04 21:25:09 +0100 | [diff] [blame] | 754 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 755 | 	if (se != cfs_rq->curr) | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 756 | 		__dequeue_entity(cfs_rq, se); | 
 | 757 | 	account_entity_dequeue(cfs_rq, se); | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 758 | 	update_min_vruntime(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 759 | } | 
 | 760 |  | 
 | 761 | /* | 
 | 762 |  * Preempt the current task with a newly woken task if needed: | 
 | 763 |  */ | 
| Peter Zijlstra | 7c92e54 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 764 | static void | 
| Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 765 | check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 766 | { | 
| Peter Zijlstra | 1169783 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 767 | 	unsigned long ideal_runtime, delta_exec; | 
 | 768 |  | 
| Peter Zijlstra | 6d0f0eb | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 769 | 	ideal_runtime = sched_slice(cfs_rq, curr); | 
| Peter Zijlstra | 1169783 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 770 | 	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | 
| Ingo Molnar | 3e3e13f | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 771 | 	if (delta_exec > ideal_runtime) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 772 | 		resched_task(rq_of(cfs_rq)->curr); | 
 | 773 | } | 
 | 774 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 775 | static void | 
| Ingo Molnar | 8494f41 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 776 | set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 777 | { | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 778 | 	/* 'current' is not kept within the tree. */ | 
 | 779 | 	if (se->on_rq) { | 
 | 780 | 		/* | 
 | 781 | 		 * Any task has to be enqueued before it get to execute on | 
 | 782 | 		 * a CPU. So account for the time it spent waiting on the | 
 | 783 | 		 * runqueue. | 
 | 784 | 		 */ | 
 | 785 | 		update_stats_wait_end(cfs_rq, se); | 
 | 786 | 		__dequeue_entity(cfs_rq, se); | 
 | 787 | 	} | 
 | 788 |  | 
| Ingo Molnar | 79303e9 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 789 | 	update_stats_curr_start(cfs_rq, se); | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 790 | 	cfs_rq->curr = se; | 
| Ingo Molnar | eba1ed4 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 791 | #ifdef CONFIG_SCHEDSTATS | 
 | 792 | 	/* | 
 | 793 | 	 * Track our maximum slice length, if the CPU's load is at | 
 | 794 | 	 * least twice that of our own weight (i.e. dont track it | 
 | 795 | 	 * when there are only lesser-weight tasks around): | 
 | 796 | 	 */ | 
| Dmitry Adamushko | 495eca4 | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 797 | 	if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { | 
| Ingo Molnar | eba1ed4 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 798 | 		se->slice_max = max(se->slice_max, | 
 | 799 | 			se->sum_exec_runtime - se->prev_sum_exec_runtime); | 
 | 800 | 	} | 
 | 801 | #endif | 
| Peter Zijlstra | 4a55b45 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 802 | 	se->prev_sum_exec_runtime = se->sum_exec_runtime; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 803 | } | 
 | 804 |  | 
| Peter Zijlstra | 3f3a490 | 2008-10-24 11:06:16 +0200 | [diff] [blame] | 805 | static int | 
 | 806 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); | 
 | 807 |  | 
| Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 808 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 809 | { | 
| Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 810 | 	struct sched_entity *se = __pick_next_entity(cfs_rq); | 
 | 811 |  | 
| Peter Zijlstra | 4793241 | 2008-11-04 21:25:09 +0100 | [diff] [blame] | 812 | 	if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1) | 
 | 813 | 		return cfs_rq->next; | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 814 |  | 
| Peter Zijlstra | 4793241 | 2008-11-04 21:25:09 +0100 | [diff] [blame] | 815 | 	if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1) | 
 | 816 | 		return cfs_rq->last; | 
 | 817 |  | 
 | 818 | 	return se; | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 819 | } | 
 | 820 |  | 
| Ingo Molnar | ab6cde2 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 821 | static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 822 | { | 
 | 823 | 	/* | 
 | 824 | 	 * If still on the runqueue then deactivate_task() | 
 | 825 | 	 * was not called and update_curr() has to be done: | 
 | 826 | 	 */ | 
 | 827 | 	if (prev->on_rq) | 
| Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 828 | 		update_curr(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 829 |  | 
| Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 830 | 	check_spread(cfs_rq, prev); | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 831 | 	if (prev->on_rq) { | 
| Ingo Molnar | 5870db5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 832 | 		update_stats_wait_start(cfs_rq, prev); | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 833 | 		/* Put 'current' back into the tree. */ | 
 | 834 | 		__enqueue_entity(cfs_rq, prev); | 
 | 835 | 	} | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 836 | 	cfs_rq->curr = NULL; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 837 | } | 
 | 838 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 839 | static void | 
 | 840 | entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 841 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 842 | 	/* | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 843 | 	 * Update run-time statistics of the 'current'. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 844 | 	 */ | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 845 | 	update_curr(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 846 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 847 | #ifdef CONFIG_SCHED_HRTICK | 
 | 848 | 	/* | 
 | 849 | 	 * queued ticks are scheduled to match the slice, so don't bother | 
 | 850 | 	 * validating it and just reschedule. | 
 | 851 | 	 */ | 
| Harvey Harrison | 983ed7a | 2008-04-24 18:17:55 -0700 | [diff] [blame] | 852 | 	if (queued) { | 
 | 853 | 		resched_task(rq_of(cfs_rq)->curr); | 
 | 854 | 		return; | 
 | 855 | 	} | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 856 | 	/* | 
 | 857 | 	 * don't let the period tick interfere with the hrtick preemption | 
 | 858 | 	 */ | 
 | 859 | 	if (!sched_feat(DOUBLE_TICK) && | 
 | 860 | 			hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) | 
 | 861 | 		return; | 
 | 862 | #endif | 
 | 863 |  | 
| Peter Zijlstra | ce6c131 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 864 | 	if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT)) | 
| Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 865 | 		check_preempt_tick(cfs_rq, curr); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 866 | } | 
 | 867 |  | 
 | 868 | /************************************************** | 
 | 869 |  * CFS operations on tasks: | 
 | 870 |  */ | 
 | 871 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 872 | #ifdef CONFIG_SCHED_HRTICK | 
 | 873 | static void hrtick_start_fair(struct rq *rq, struct task_struct *p) | 
 | 874 | { | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 875 | 	struct sched_entity *se = &p->se; | 
 | 876 | 	struct cfs_rq *cfs_rq = cfs_rq_of(se); | 
 | 877 |  | 
 | 878 | 	WARN_ON(task_rq(p) != rq); | 
 | 879 |  | 
 | 880 | 	if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) { | 
 | 881 | 		u64 slice = sched_slice(cfs_rq, se); | 
 | 882 | 		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; | 
 | 883 | 		s64 delta = slice - ran; | 
 | 884 |  | 
 | 885 | 		if (delta < 0) { | 
 | 886 | 			if (rq->curr == p) | 
 | 887 | 				resched_task(p); | 
 | 888 | 			return; | 
 | 889 | 		} | 
 | 890 |  | 
 | 891 | 		/* | 
 | 892 | 		 * Don't schedule slices shorter than 10000ns, that just | 
 | 893 | 		 * doesn't make sense. Rely on vruntime for fairness. | 
 | 894 | 		 */ | 
| Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 895 | 		if (rq->curr != p) | 
| Peter Zijlstra | 157124c | 2008-07-28 11:53:11 +0200 | [diff] [blame] | 896 | 			delta = max_t(s64, 10000LL, delta); | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 897 |  | 
| Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 898 | 		hrtick_start(rq, delta); | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 899 | 	} | 
 | 900 | } | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 901 |  | 
 | 902 | /* | 
 | 903 |  * called from enqueue/dequeue and updates the hrtick when the | 
 | 904 |  * current task is from our class and nr_running is low enough | 
 | 905 |  * to matter. | 
 | 906 |  */ | 
 | 907 | static void hrtick_update(struct rq *rq) | 
 | 908 | { | 
 | 909 | 	struct task_struct *curr = rq->curr; | 
 | 910 |  | 
 | 911 | 	if (curr->sched_class != &fair_sched_class) | 
 | 912 | 		return; | 
 | 913 |  | 
 | 914 | 	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) | 
 | 915 | 		hrtick_start_fair(rq, curr); | 
 | 916 | } | 
| Dhaval Giani | 55e12e5 | 2008-06-24 23:39:43 +0530 | [diff] [blame] | 917 | #else /* !CONFIG_SCHED_HRTICK */ | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 918 | static inline void | 
 | 919 | hrtick_start_fair(struct rq *rq, struct task_struct *p) | 
 | 920 | { | 
 | 921 | } | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 922 |  | 
 | 923 | static inline void hrtick_update(struct rq *rq) | 
 | 924 | { | 
 | 925 | } | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 926 | #endif | 
 | 927 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 928 | /* | 
 | 929 |  * The enqueue_task method is called before nr_running is | 
 | 930 |  * increased. Here we update the fair scheduling stats and | 
 | 931 |  * then put the task into the rbtree: | 
 | 932 |  */ | 
| Ingo Molnar | fd390f6 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 933 | static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 934 | { | 
 | 935 | 	struct cfs_rq *cfs_rq; | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 936 | 	struct sched_entity *se = &p->se; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 937 |  | 
 | 938 | 	for_each_sched_entity(se) { | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 939 | 		if (se->on_rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 940 | 			break; | 
 | 941 | 		cfs_rq = cfs_rq_of(se); | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 942 | 		enqueue_entity(cfs_rq, se, wakeup); | 
| Srivatsa Vaddagiri | b9fa3df | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 943 | 		wakeup = 1; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 944 | 	} | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 945 |  | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 946 | 	hrtick_update(rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 947 | } | 
 | 948 |  | 
 | 949 | /* | 
 | 950 |  * The dequeue_task method is called before nr_running is | 
 | 951 |  * decreased. We remove the task from the rbtree and | 
 | 952 |  * update the fair scheduling stats: | 
 | 953 |  */ | 
| Ingo Molnar | f02231e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 954 | static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 955 | { | 
 | 956 | 	struct cfs_rq *cfs_rq; | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 957 | 	struct sched_entity *se = &p->se; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 958 |  | 
 | 959 | 	for_each_sched_entity(se) { | 
 | 960 | 		cfs_rq = cfs_rq_of(se); | 
| Ingo Molnar | 525c271 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 961 | 		dequeue_entity(cfs_rq, se, sleep); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 962 | 		/* Don't dequeue parent if it has other entities besides us */ | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 963 | 		if (cfs_rq->load.weight) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 964 | 			break; | 
| Srivatsa Vaddagiri | b9fa3df | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 965 | 		sleep = 1; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 966 | 	} | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 967 |  | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 968 | 	hrtick_update(rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 969 | } | 
 | 970 |  | 
 | 971 | /* | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 972 |  * sched_yield() support is very simple - we dequeue and enqueue. | 
 | 973 |  * | 
 | 974 |  * If compat_yield is turned on then we requeue to the end of the tree. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 975 |  */ | 
| Dmitry Adamushko | 4530d7a | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 976 | static void yield_task_fair(struct rq *rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 977 | { | 
| Ingo Molnar | db292ca | 2007-12-04 17:04:39 +0100 | [diff] [blame] | 978 | 	struct task_struct *curr = rq->curr; | 
 | 979 | 	struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 
 | 980 | 	struct sched_entity *rightmost, *se = &curr->se; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 981 |  | 
 | 982 | 	/* | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 983 | 	 * Are we the only task in the tree? | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 984 | 	 */ | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 985 | 	if (unlikely(cfs_rq->nr_running == 1)) | 
 | 986 | 		return; | 
 | 987 |  | 
| Peter Zijlstra | 2002c69 | 2008-11-11 11:52:33 +0100 | [diff] [blame] | 988 | 	clear_buddies(cfs_rq, se); | 
 | 989 |  | 
| Ingo Molnar | db292ca | 2007-12-04 17:04:39 +0100 | [diff] [blame] | 990 | 	if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) { | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 991 | 		update_rq_clock(rq); | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 992 | 		/* | 
| Dmitry Adamushko | a2a2d68 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 993 | 		 * Update run-time statistics of the 'current'. | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 994 | 		 */ | 
| Dmitry Adamushko | 2b1e315 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 995 | 		update_curr(cfs_rq); | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 996 |  | 
 | 997 | 		return; | 
 | 998 | 	} | 
 | 999 | 	/* | 
 | 1000 | 	 * Find the rightmost entry in the rbtree: | 
 | 1001 | 	 */ | 
| Dmitry Adamushko | 2b1e315 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 1002 | 	rightmost = __pick_last_entity(cfs_rq); | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 1003 | 	/* | 
 | 1004 | 	 * Already in the rightmost position? | 
 | 1005 | 	 */ | 
| Peter Zijlstra | 79b3fef | 2008-02-18 13:39:37 +0100 | [diff] [blame] | 1006 | 	if (unlikely(!rightmost || rightmost->vruntime < se->vruntime)) | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 1007 | 		return; | 
 | 1008 |  | 
 | 1009 | 	/* | 
 | 1010 | 	 * Minimally necessary key value to be last in the tree: | 
| Dmitry Adamushko | 2b1e315 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 1011 | 	 * Upon rescheduling, sched_class::put_prev_task() will place | 
 | 1012 | 	 * 'current' within the tree based on its new key value. | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 1013 | 	 */ | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 1014 | 	se->vruntime = rightmost->vruntime + 1; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1015 | } | 
 | 1016 |  | 
 | 1017 | /* | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1018 |  * wake_idle() will wake a task on an idle cpu if task->cpu is | 
 | 1019 |  * not idle and an idle cpu is available.  The span of cpus to | 
 | 1020 |  * search starts with cpus closest then further out as needed, | 
 | 1021 |  * so we always favor a closer, idle cpu. | 
| Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 1022 |  * Domains may include CPUs that are not usable for migration, | 
| Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 1023 |  * hence we need to mask them out (cpu_active_mask) | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1024 |  * | 
 | 1025 |  * Returns the CPU we should wake onto. | 
 | 1026 |  */ | 
 | 1027 | #if defined(ARCH_HAS_SCHED_WAKE_IDLE) | 
 | 1028 | static int wake_idle(int cpu, struct task_struct *p) | 
 | 1029 | { | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1030 | 	struct sched_domain *sd; | 
 | 1031 | 	int i; | 
| Vaidyanathan Srinivasan | 7eb52df | 2008-12-18 23:26:29 +0530 | [diff] [blame] | 1032 | 	unsigned int chosen_wakeup_cpu; | 
 | 1033 | 	int this_cpu; | 
 | 1034 |  | 
 | 1035 | 	/* | 
 | 1036 | 	 * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu | 
 | 1037 | 	 * are idle and this is not a kernel thread and this task's affinity | 
 | 1038 | 	 * allows it to be moved to preferred cpu, then just move! | 
 | 1039 | 	 */ | 
 | 1040 |  | 
 | 1041 | 	this_cpu = smp_processor_id(); | 
 | 1042 | 	chosen_wakeup_cpu = | 
 | 1043 | 		cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu; | 
 | 1044 |  | 
 | 1045 | 	if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP && | 
 | 1046 | 		idle_cpu(cpu) && idle_cpu(this_cpu) && | 
 | 1047 | 		p->mm && !(p->flags & PF_KTHREAD) && | 
 | 1048 | 		cpu_isset(chosen_wakeup_cpu, p->cpus_allowed)) | 
 | 1049 | 		return chosen_wakeup_cpu; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1050 |  | 
 | 1051 | 	/* | 
 | 1052 | 	 * If it is idle, then it is the best cpu to run this task. | 
 | 1053 | 	 * | 
 | 1054 | 	 * This cpu is also the best, if it has more than one task already. | 
 | 1055 | 	 * Siblings must be also busy(in most cases) as they didn't already | 
 | 1056 | 	 * pickup the extra load from this cpu and hence we need not check | 
 | 1057 | 	 * sibling runqueue info. This will avoid the checks and cache miss | 
 | 1058 | 	 * penalities associated with that. | 
 | 1059 | 	 */ | 
| Gregory Haskins | 104f645 | 2008-04-28 12:40:01 -0400 | [diff] [blame] | 1060 | 	if (idle_cpu(cpu) || cpu_rq(cpu)->cfs.nr_running > 1) | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1061 | 		return cpu; | 
 | 1062 |  | 
 | 1063 | 	for_each_domain(cpu, sd) { | 
| Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 1064 | 		if ((sd->flags & SD_WAKE_IDLE) | 
 | 1065 | 		    || ((sd->flags & SD_WAKE_IDLE_FAR) | 
 | 1066 | 			&& !task_hot(p, task_rq(p)->clock, sd))) { | 
| Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 1067 | 			for_each_cpu_and(i, sched_domain_span(sd), | 
 | 1068 | 					 &p->cpus_allowed) { | 
 | 1069 | 				if (cpu_active(i) && idle_cpu(i)) { | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1070 | 					if (i != task_cpu(p)) { | 
 | 1071 | 						schedstat_inc(p, | 
 | 1072 | 						       se.nr_wakeups_idle); | 
 | 1073 | 					} | 
 | 1074 | 					return i; | 
 | 1075 | 				} | 
 | 1076 | 			} | 
 | 1077 | 		} else { | 
 | 1078 | 			break; | 
 | 1079 | 		} | 
 | 1080 | 	} | 
 | 1081 | 	return cpu; | 
 | 1082 | } | 
| Dhaval Giani | 55e12e5 | 2008-06-24 23:39:43 +0530 | [diff] [blame] | 1083 | #else /* !ARCH_HAS_SCHED_WAKE_IDLE*/ | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1084 | static inline int wake_idle(int cpu, struct task_struct *p) | 
 | 1085 | { | 
 | 1086 | 	return cpu; | 
 | 1087 | } | 
 | 1088 | #endif | 
 | 1089 |  | 
 | 1090 | #ifdef CONFIG_SMP | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1091 |  | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1092 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| Peter Zijlstra | f5bfb7d | 2008-06-27 13:41:39 +0200 | [diff] [blame] | 1093 | /* | 
 | 1094 |  * effective_load() calculates the load change as seen from the root_task_group | 
 | 1095 |  * | 
 | 1096 |  * Adding load to a group doesn't make a group heavier, but can cause movement | 
 | 1097 |  * of group shares between cpus. Assuming the shares were perfectly aligned one | 
 | 1098 |  * can calculate the shift in shares. | 
 | 1099 |  * | 
 | 1100 |  * The problem is that perfectly aligning the shares is rather expensive, hence | 
 | 1101 |  * we try to avoid doing that too often - see update_shares(), which ratelimits | 
 | 1102 |  * this change. | 
 | 1103 |  * | 
 | 1104 |  * We compensate this by not only taking the current delta into account, but | 
 | 1105 |  * also considering the delta between when the shares were last adjusted and | 
 | 1106 |  * now. | 
 | 1107 |  * | 
 | 1108 |  * We still saw a performance dip, some tracing learned us that between | 
 | 1109 |  * cgroup:/ and cgroup:/foo balancing the number of affine wakeups increased | 
 | 1110 |  * significantly. Therefore try to bias the error in direction of failing | 
 | 1111 |  * the affine wakeup. | 
 | 1112 |  * | 
 | 1113 |  */ | 
| Peter Zijlstra | f1d239f | 2008-06-27 13:41:38 +0200 | [diff] [blame] | 1114 | static long effective_load(struct task_group *tg, int cpu, | 
 | 1115 | 		long wl, long wg) | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1116 | { | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1117 | 	struct sched_entity *se = tg->se[cpu]; | 
| Peter Zijlstra | f1d239f | 2008-06-27 13:41:38 +0200 | [diff] [blame] | 1118 |  | 
 | 1119 | 	if (!tg->parent) | 
 | 1120 | 		return wl; | 
 | 1121 |  | 
 | 1122 | 	/* | 
| Peter Zijlstra | f5bfb7d | 2008-06-27 13:41:39 +0200 | [diff] [blame] | 1123 | 	 * By not taking the decrease of shares on the other cpu into | 
 | 1124 | 	 * account our error leans towards reducing the affine wakeups. | 
 | 1125 | 	 */ | 
 | 1126 | 	if (!wl && sched_feat(ASYM_EFF_LOAD)) | 
 | 1127 | 		return wl; | 
 | 1128 |  | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1129 | 	for_each_sched_entity(se) { | 
| Peter Zijlstra | cb5ef42 | 2008-06-27 13:41:32 +0200 | [diff] [blame] | 1130 | 		long S, rw, s, a, b; | 
| Peter Zijlstra | 940959e | 2008-09-23 15:33:42 +0200 | [diff] [blame] | 1131 | 		long more_w; | 
 | 1132 |  | 
 | 1133 | 		/* | 
 | 1134 | 		 * Instead of using this increment, also add the difference | 
 | 1135 | 		 * between when the shares were last updated and now. | 
 | 1136 | 		 */ | 
 | 1137 | 		more_w = se->my_q->load.weight - se->my_q->rq_weight; | 
 | 1138 | 		wl += more_w; | 
 | 1139 | 		wg += more_w; | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1140 |  | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1141 | 		S = se->my_q->tg->shares; | 
 | 1142 | 		s = se->my_q->shares; | 
| Peter Zijlstra | f1d239f | 2008-06-27 13:41:38 +0200 | [diff] [blame] | 1143 | 		rw = se->my_q->rq_weight; | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1144 |  | 
| Peter Zijlstra | cb5ef42 | 2008-06-27 13:41:32 +0200 | [diff] [blame] | 1145 | 		a = S*(rw + wl); | 
 | 1146 | 		b = S*rw + s*wg; | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1147 |  | 
| Peter Zijlstra | 940959e | 2008-09-23 15:33:42 +0200 | [diff] [blame] | 1148 | 		wl = s*(a-b); | 
 | 1149 |  | 
 | 1150 | 		if (likely(b)) | 
 | 1151 | 			wl /= b; | 
 | 1152 |  | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1153 | 		/* | 
 | 1154 | 		 * Assume the group is already running and will | 
 | 1155 | 		 * thus already be accounted for in the weight. | 
 | 1156 | 		 * | 
 | 1157 | 		 * That is, moving shares between CPUs, does not | 
 | 1158 | 		 * alter the group weight. | 
 | 1159 | 		 */ | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1160 | 		wg = 0; | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1161 | 	} | 
 | 1162 |  | 
 | 1163 | 	return wl; | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1164 | } | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1165 |  | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1166 | #else | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1167 |  | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1168 | static inline unsigned long effective_load(struct task_group *tg, int cpu, | 
 | 1169 | 		unsigned long wl, unsigned long wg) | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1170 | { | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1171 | 	return wl; | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1172 | } | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1173 |  | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1174 | #endif | 
 | 1175 |  | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1176 | static int | 
| Amit K. Arora | 64b9e02 | 2008-09-30 17:15:39 +0530 | [diff] [blame] | 1177 | wake_affine(struct sched_domain *this_sd, struct rq *this_rq, | 
| Ingo Molnar | 4ae7d5c | 2008-03-19 01:42:00 +0100 | [diff] [blame] | 1178 | 	    struct task_struct *p, int prev_cpu, int this_cpu, int sync, | 
 | 1179 | 	    int idx, unsigned long load, unsigned long this_load, | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1180 | 	    unsigned int imbalance) | 
 | 1181 | { | 
| Ingo Molnar | 4ae7d5c | 2008-03-19 01:42:00 +0100 | [diff] [blame] | 1182 | 	struct task_struct *curr = this_rq->curr; | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1183 | 	struct task_group *tg; | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1184 | 	unsigned long tl = this_load; | 
 | 1185 | 	unsigned long tl_per_task; | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1186 | 	unsigned long weight; | 
| Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 1187 | 	int balanced; | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1188 |  | 
| Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 1189 | 	if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1190 | 		return 0; | 
 | 1191 |  | 
| Mike Galbraith | 0d13033 | 2008-10-24 11:06:14 +0200 | [diff] [blame] | 1192 | 	if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost || | 
 | 1193 | 			p->se.avg_overlap > sysctl_sched_migration_cost)) | 
 | 1194 | 		sync = 0; | 
| Peter Zijlstra | 2fb7635 | 2008-10-08 09:16:04 +0200 | [diff] [blame] | 1195 |  | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1196 | 	/* | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1197 | 	 * If sync wakeup then subtract the (maximum possible) | 
 | 1198 | 	 * effect of the currently running task from the load | 
 | 1199 | 	 * of the current CPU: | 
 | 1200 | 	 */ | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1201 | 	if (sync) { | 
 | 1202 | 		tg = task_group(current); | 
 | 1203 | 		weight = current->se.load.weight; | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1204 |  | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1205 | 		tl += effective_load(tg, this_cpu, -weight, -weight); | 
 | 1206 | 		load += effective_load(tg, prev_cpu, 0, -weight); | 
 | 1207 | 	} | 
 | 1208 |  | 
 | 1209 | 	tg = task_group(p); | 
 | 1210 | 	weight = p->se.load.weight; | 
 | 1211 |  | 
 | 1212 | 	balanced = 100*(tl + effective_load(tg, this_cpu, weight, weight)) <= | 
 | 1213 | 		imbalance*(load + effective_load(tg, prev_cpu, 0, weight)); | 
| Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 1214 |  | 
 | 1215 | 	/* | 
 | 1216 | 	 * If the currently running task will sleep within | 
 | 1217 | 	 * a reasonable amount of time then attract this newly | 
 | 1218 | 	 * woken task: | 
 | 1219 | 	 */ | 
| Peter Zijlstra | 2fb7635 | 2008-10-08 09:16:04 +0200 | [diff] [blame] | 1220 | 	if (sync && balanced) | 
 | 1221 | 		return 1; | 
| Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 1222 |  | 
 | 1223 | 	schedstat_inc(p, se.nr_wakeups_affine_attempts); | 
 | 1224 | 	tl_per_task = cpu_avg_load_per_task(this_cpu); | 
 | 1225 |  | 
| Amit K. Arora | 64b9e02 | 2008-09-30 17:15:39 +0530 | [diff] [blame] | 1226 | 	if (balanced || (tl <= load && tl + target_load(prev_cpu, idx) <= | 
 | 1227 | 			tl_per_task)) { | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1228 | 		/* | 
 | 1229 | 		 * This domain has SD_WAKE_AFFINE and | 
 | 1230 | 		 * p is cache cold in this domain, and | 
 | 1231 | 		 * there is no bad imbalance. | 
 | 1232 | 		 */ | 
 | 1233 | 		schedstat_inc(this_sd, ttwu_move_affine); | 
 | 1234 | 		schedstat_inc(p, se.nr_wakeups_affine); | 
 | 1235 |  | 
 | 1236 | 		return 1; | 
 | 1237 | 	} | 
 | 1238 | 	return 0; | 
 | 1239 | } | 
 | 1240 |  | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1241 | static int select_task_rq_fair(struct task_struct *p, int sync) | 
 | 1242 | { | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1243 | 	struct sched_domain *sd, *this_sd = NULL; | 
| Ingo Molnar | ac192d3 | 2008-03-16 20:56:26 +0100 | [diff] [blame] | 1244 | 	int prev_cpu, this_cpu, new_cpu; | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1245 | 	unsigned long load, this_load; | 
| Amit K. Arora | 64b9e02 | 2008-09-30 17:15:39 +0530 | [diff] [blame] | 1246 | 	struct rq *this_rq; | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1247 | 	unsigned int imbalance; | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1248 | 	int idx; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1249 |  | 
| Ingo Molnar | ac192d3 | 2008-03-16 20:56:26 +0100 | [diff] [blame] | 1250 | 	prev_cpu	= task_cpu(p); | 
| Ingo Molnar | ac192d3 | 2008-03-16 20:56:26 +0100 | [diff] [blame] | 1251 | 	this_cpu	= smp_processor_id(); | 
| Ingo Molnar | 4ae7d5c | 2008-03-19 01:42:00 +0100 | [diff] [blame] | 1252 | 	this_rq		= cpu_rq(this_cpu); | 
| Ingo Molnar | ac192d3 | 2008-03-16 20:56:26 +0100 | [diff] [blame] | 1253 | 	new_cpu		= prev_cpu; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1254 |  | 
| Amit K. Arora | 64b9e02 | 2008-09-30 17:15:39 +0530 | [diff] [blame] | 1255 | 	if (prev_cpu == this_cpu) | 
 | 1256 | 		goto out; | 
| Ingo Molnar | ac192d3 | 2008-03-16 20:56:26 +0100 | [diff] [blame] | 1257 | 	/* | 
 | 1258 | 	 * 'this_sd' is the first domain that both | 
 | 1259 | 	 * this_cpu and prev_cpu are present in: | 
 | 1260 | 	 */ | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1261 | 	for_each_domain(this_cpu, sd) { | 
| Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 1262 | 		if (cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) { | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1263 | 			this_sd = sd; | 
 | 1264 | 			break; | 
 | 1265 | 		} | 
 | 1266 | 	} | 
 | 1267 |  | 
| Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 1268 | 	if (unlikely(!cpumask_test_cpu(this_cpu, &p->cpus_allowed))) | 
| Ingo Molnar | f482738 | 2008-03-16 21:21:47 +0100 | [diff] [blame] | 1269 | 		goto out; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1270 |  | 
 | 1271 | 	/* | 
 | 1272 | 	 * Check for affine wakeup and passive balancing possibilities. | 
 | 1273 | 	 */ | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1274 | 	if (!this_sd) | 
| Ingo Molnar | f482738 | 2008-03-16 21:21:47 +0100 | [diff] [blame] | 1275 | 		goto out; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1276 |  | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1277 | 	idx = this_sd->wake_idx; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1278 |  | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1279 | 	imbalance = 100 + (this_sd->imbalance_pct - 100) / 2; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1280 |  | 
| Ingo Molnar | ac192d3 | 2008-03-16 20:56:26 +0100 | [diff] [blame] | 1281 | 	load = source_load(prev_cpu, idx); | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1282 | 	this_load = target_load(this_cpu, idx); | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1283 |  | 
| Amit K. Arora | 64b9e02 | 2008-09-30 17:15:39 +0530 | [diff] [blame] | 1284 | 	if (wake_affine(this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx, | 
| Ingo Molnar | 4ae7d5c | 2008-03-19 01:42:00 +0100 | [diff] [blame] | 1285 | 				     load, this_load, imbalance)) | 
 | 1286 | 		return this_cpu; | 
 | 1287 |  | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1288 | 	/* | 
 | 1289 | 	 * Start passive balancing when half the imbalance_pct | 
 | 1290 | 	 * limit is reached. | 
 | 1291 | 	 */ | 
 | 1292 | 	if (this_sd->flags & SD_WAKE_BALANCE) { | 
 | 1293 | 		if (imbalance*this_load <= 100*load) { | 
 | 1294 | 			schedstat_inc(this_sd, ttwu_move_balance); | 
 | 1295 | 			schedstat_inc(p, se.nr_wakeups_passive); | 
| Ingo Molnar | 4ae7d5c | 2008-03-19 01:42:00 +0100 | [diff] [blame] | 1296 | 			return this_cpu; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1297 | 		} | 
 | 1298 | 	} | 
 | 1299 |  | 
| Ingo Molnar | f482738 | 2008-03-16 21:21:47 +0100 | [diff] [blame] | 1300 | out: | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1301 | 	return wake_idle(new_cpu, p); | 
 | 1302 | } | 
 | 1303 | #endif /* CONFIG_SMP */ | 
 | 1304 |  | 
| Peter Zijlstra | 0bbd333 | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 1305 | static unsigned long wakeup_gran(struct sched_entity *se) | 
 | 1306 | { | 
 | 1307 | 	unsigned long gran = sysctl_sched_wakeup_granularity; | 
 | 1308 |  | 
 | 1309 | 	/* | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 1310 | 	 * More easily preempt - nice tasks, while not making it harder for | 
 | 1311 | 	 * + nice tasks. | 
| Peter Zijlstra | 0bbd333 | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 1312 | 	 */ | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 1313 | 	if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD) | 
 | 1314 | 		gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se); | 
| Peter Zijlstra | 0bbd333 | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 1315 |  | 
 | 1316 | 	return gran; | 
 | 1317 | } | 
 | 1318 |  | 
 | 1319 | /* | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 1320 |  * Should 'se' preempt 'curr'. | 
 | 1321 |  * | 
 | 1322 |  *             |s1 | 
 | 1323 |  *        |s2 | 
 | 1324 |  *   |s3 | 
 | 1325 |  *         g | 
 | 1326 |  *      |<--->|c | 
 | 1327 |  * | 
 | 1328 |  *  w(c, s1) = -1 | 
 | 1329 |  *  w(c, s2) =  0 | 
 | 1330 |  *  w(c, s3) =  1 | 
 | 1331 |  * | 
 | 1332 |  */ | 
 | 1333 | static int | 
 | 1334 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) | 
 | 1335 | { | 
 | 1336 | 	s64 gran, vdiff = curr->vruntime - se->vruntime; | 
 | 1337 |  | 
 | 1338 | 	if (vdiff <= 0) | 
 | 1339 | 		return -1; | 
 | 1340 |  | 
 | 1341 | 	gran = wakeup_gran(curr); | 
 | 1342 | 	if (vdiff > gran) | 
 | 1343 | 		return 1; | 
 | 1344 |  | 
 | 1345 | 	return 0; | 
 | 1346 | } | 
 | 1347 |  | 
| Peter Zijlstra | 0247909 | 2008-11-04 21:25:10 +0100 | [diff] [blame] | 1348 | static void set_last_buddy(struct sched_entity *se) | 
 | 1349 | { | 
| Peter Zijlstra | 6bc912b | 2009-01-15 14:53:38 +0100 | [diff] [blame] | 1350 | 	if (likely(task_of(se)->policy != SCHED_IDLE)) { | 
 | 1351 | 		for_each_sched_entity(se) | 
 | 1352 | 			cfs_rq_of(se)->last = se; | 
 | 1353 | 	} | 
| Peter Zijlstra | 0247909 | 2008-11-04 21:25:10 +0100 | [diff] [blame] | 1354 | } | 
 | 1355 |  | 
 | 1356 | static void set_next_buddy(struct sched_entity *se) | 
 | 1357 | { | 
| Peter Zijlstra | 6bc912b | 2009-01-15 14:53:38 +0100 | [diff] [blame] | 1358 | 	if (likely(task_of(se)->policy != SCHED_IDLE)) { | 
 | 1359 | 		for_each_sched_entity(se) | 
 | 1360 | 			cfs_rq_of(se)->next = se; | 
 | 1361 | 	} | 
| Peter Zijlstra | 0247909 | 2008-11-04 21:25:10 +0100 | [diff] [blame] | 1362 | } | 
 | 1363 |  | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 1364 | /* | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1365 |  * Preempt the current task with a newly woken task if needed: | 
 | 1366 |  */ | 
| Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 1367 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1368 | { | 
 | 1369 | 	struct task_struct *curr = rq->curr; | 
| Srivatsa Vaddagiri | 8651a86 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 1370 | 	struct sched_entity *se = &curr->se, *pse = &p->se; | 
| Mike Galbraith | 03e89e4 | 2008-12-16 08:45:30 +0100 | [diff] [blame] | 1371 | 	struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 
 | 1372 |  | 
 | 1373 | 	update_curr(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1374 |  | 
 | 1375 | 	if (unlikely(rt_prio(p->prio))) { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1376 | 		resched_task(curr); | 
 | 1377 | 		return; | 
 | 1378 | 	} | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 1379 |  | 
| Peter Zijlstra | d95f98d | 2008-11-04 21:25:08 +0100 | [diff] [blame] | 1380 | 	if (unlikely(p->sched_class != &fair_sched_class)) | 
 | 1381 | 		return; | 
 | 1382 |  | 
| Ingo Molnar | 4ae7d5c | 2008-03-19 01:42:00 +0100 | [diff] [blame] | 1383 | 	if (unlikely(se == pse)) | 
 | 1384 | 		return; | 
 | 1385 |  | 
| Peter Zijlstra | 4793241 | 2008-11-04 21:25:09 +0100 | [diff] [blame] | 1386 | 	/* | 
 | 1387 | 	 * Only set the backward buddy when the current task is still on the | 
 | 1388 | 	 * rq. This can happen when a wakeup gets interleaved with schedule on | 
 | 1389 | 	 * the ->pre_schedule() or idle_balance() point, either of which can | 
 | 1390 | 	 * drop the rq lock. | 
 | 1391 | 	 * | 
 | 1392 | 	 * Also, during early boot the idle thread is in the fair class, for | 
 | 1393 | 	 * obvious reasons its a bad idea to schedule back to the idle thread. | 
 | 1394 | 	 */ | 
 | 1395 | 	if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle)) | 
| Peter Zijlstra | 0247909 | 2008-11-04 21:25:10 +0100 | [diff] [blame] | 1396 | 		set_last_buddy(se); | 
 | 1397 | 	set_next_buddy(pse); | 
| Peter Zijlstra | 57fdc26 | 2008-09-23 15:33:45 +0200 | [diff] [blame] | 1398 |  | 
| Bharata B Rao | aec0a51 | 2008-08-28 14:42:49 +0530 | [diff] [blame] | 1399 | 	/* | 
 | 1400 | 	 * We can come here with TIF_NEED_RESCHED already set from new task | 
 | 1401 | 	 * wake up path. | 
 | 1402 | 	 */ | 
 | 1403 | 	if (test_tsk_need_resched(curr)) | 
 | 1404 | 		return; | 
 | 1405 |  | 
| Ingo Molnar | 91c234b | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 1406 | 	/* | 
| Peter Zijlstra | 6bc912b | 2009-01-15 14:53:38 +0100 | [diff] [blame] | 1407 | 	 * Batch and idle tasks do not preempt (their preemption is driven by | 
| Ingo Molnar | 91c234b | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 1408 | 	 * the tick): | 
 | 1409 | 	 */ | 
| Peter Zijlstra | 6bc912b | 2009-01-15 14:53:38 +0100 | [diff] [blame] | 1410 | 	if (unlikely(p->policy != SCHED_NORMAL)) | 
| Ingo Molnar | 91c234b | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 1411 | 		return; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1412 |  | 
| Peter Zijlstra | 6bc912b | 2009-01-15 14:53:38 +0100 | [diff] [blame] | 1413 | 	/* Idle tasks are by definition preempted by everybody. */ | 
 | 1414 | 	if (unlikely(curr->policy == SCHED_IDLE)) { | 
 | 1415 | 		resched_task(curr); | 
 | 1416 | 		return; | 
 | 1417 | 	} | 
 | 1418 |  | 
| Ingo Molnar | 77d9cc4 | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 1419 | 	if (!sched_feat(WAKEUP_PREEMPT)) | 
 | 1420 | 		return; | 
| Peter Zijlstra | ce6c131 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 1421 |  | 
| Peter Zijlstra | 2fb7635 | 2008-10-08 09:16:04 +0200 | [diff] [blame] | 1422 | 	if (sched_feat(WAKEUP_OVERLAP) && (sync || | 
 | 1423 | 			(se->avg_overlap < sysctl_sched_migration_cost && | 
 | 1424 | 			 pse->avg_overlap < sysctl_sched_migration_cost))) { | 
| Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 1425 | 		resched_task(curr); | 
 | 1426 | 		return; | 
 | 1427 | 	} | 
 | 1428 |  | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 1429 | 	find_matching_se(&se, &pse); | 
 | 1430 |  | 
 | 1431 | 	while (se) { | 
 | 1432 | 		BUG_ON(!pse); | 
 | 1433 |  | 
 | 1434 | 		if (wakeup_preempt_entity(se, pse) == 1) { | 
 | 1435 | 			resched_task(curr); | 
 | 1436 | 			break; | 
 | 1437 | 		} | 
 | 1438 |  | 
 | 1439 | 		se = parent_entity(se); | 
 | 1440 | 		pse = parent_entity(pse); | 
 | 1441 | 	} | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1442 | } | 
 | 1443 |  | 
| Ingo Molnar | fb8d472 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1444 | static struct task_struct *pick_next_task_fair(struct rq *rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1445 | { | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1446 | 	struct task_struct *p; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1447 | 	struct cfs_rq *cfs_rq = &rq->cfs; | 
 | 1448 | 	struct sched_entity *se; | 
 | 1449 |  | 
 | 1450 | 	if (unlikely(!cfs_rq->nr_running)) | 
 | 1451 | 		return NULL; | 
 | 1452 |  | 
 | 1453 | 	do { | 
| Ingo Molnar | 9948f4b | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1454 | 		se = pick_next_entity(cfs_rq); | 
| Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 1455 | 		set_next_entity(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1456 | 		cfs_rq = group_cfs_rq(se); | 
 | 1457 | 	} while (cfs_rq); | 
 | 1458 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1459 | 	p = task_of(se); | 
 | 1460 | 	hrtick_start_fair(rq, p); | 
 | 1461 |  | 
 | 1462 | 	return p; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1463 | } | 
 | 1464 |  | 
 | 1465 | /* | 
 | 1466 |  * Account for a descheduled task: | 
 | 1467 |  */ | 
| Ingo Molnar | 31ee529 | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 1468 | static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1469 | { | 
 | 1470 | 	struct sched_entity *se = &prev->se; | 
 | 1471 | 	struct cfs_rq *cfs_rq; | 
 | 1472 |  | 
 | 1473 | 	for_each_sched_entity(se) { | 
 | 1474 | 		cfs_rq = cfs_rq_of(se); | 
| Ingo Molnar | ab6cde2 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1475 | 		put_prev_entity(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1476 | 	} | 
 | 1477 | } | 
 | 1478 |  | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1479 | #ifdef CONFIG_SMP | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1480 | /************************************************** | 
 | 1481 |  * Fair scheduling class load-balancing methods: | 
 | 1482 |  */ | 
 | 1483 |  | 
 | 1484 | /* | 
 | 1485 |  * Load-balancing iterator. Note: while the runqueue stays locked | 
 | 1486 |  * during the whole iteration, the current task might be | 
 | 1487 |  * dequeued so the iterator has to be dequeue-safe. Here we | 
 | 1488 |  * achieve that by always pre-iterating before returning | 
 | 1489 |  * the current task: | 
 | 1490 |  */ | 
| Alexey Dobriyan | a995744 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 1491 | static struct task_struct * | 
| Peter Zijlstra | 4a55bd5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 1492 | __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1493 | { | 
| Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 1494 | 	struct task_struct *p = NULL; | 
 | 1495 | 	struct sched_entity *se; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1496 |  | 
| Mike Galbraith | 77ae651 | 2008-08-11 13:32:02 +0200 | [diff] [blame] | 1497 | 	if (next == &cfs_rq->tasks) | 
 | 1498 | 		return NULL; | 
 | 1499 |  | 
| Bharata B Rao | b87f172 | 2008-09-25 09:53:54 +0530 | [diff] [blame] | 1500 | 	se = list_entry(next, struct sched_entity, group_node); | 
 | 1501 | 	p = task_of(se); | 
 | 1502 | 	cfs_rq->balance_iterator = next->next; | 
| Mike Galbraith | 77ae651 | 2008-08-11 13:32:02 +0200 | [diff] [blame] | 1503 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1504 | 	return p; | 
 | 1505 | } | 
 | 1506 |  | 
 | 1507 | static struct task_struct *load_balance_start_fair(void *arg) | 
 | 1508 | { | 
 | 1509 | 	struct cfs_rq *cfs_rq = arg; | 
 | 1510 |  | 
| Peter Zijlstra | 4a55bd5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 1511 | 	return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1512 | } | 
 | 1513 |  | 
 | 1514 | static struct task_struct *load_balance_next_fair(void *arg) | 
 | 1515 | { | 
 | 1516 | 	struct cfs_rq *cfs_rq = arg; | 
 | 1517 |  | 
| Peter Zijlstra | 4a55bd5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 1518 | 	return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1519 | } | 
 | 1520 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1521 | static unsigned long | 
 | 1522 | __load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
 | 1523 | 		unsigned long max_load_move, struct sched_domain *sd, | 
 | 1524 | 		enum cpu_idle_type idle, int *all_pinned, int *this_best_prio, | 
 | 1525 | 		struct cfs_rq *cfs_rq) | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 1526 | { | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1527 | 	struct rq_iterator cfs_rq_iterator; | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1528 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1529 | 	cfs_rq_iterator.start = load_balance_start_fair; | 
 | 1530 | 	cfs_rq_iterator.next = load_balance_next_fair; | 
 | 1531 | 	cfs_rq_iterator.arg = cfs_rq; | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1532 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1533 | 	return balance_tasks(this_rq, this_cpu, busiest, | 
 | 1534 | 			max_load_move, sd, idle, all_pinned, | 
 | 1535 | 			this_best_prio, &cfs_rq_iterator); | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1536 | } | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1537 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1538 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1539 | static unsigned long | 
 | 1540 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
 | 1541 | 		  unsigned long max_load_move, | 
 | 1542 | 		  struct sched_domain *sd, enum cpu_idle_type idle, | 
 | 1543 | 		  int *all_pinned, int *this_best_prio) | 
 | 1544 | { | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1545 | 	long rem_load_move = max_load_move; | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1546 | 	int busiest_cpu = cpu_of(busiest); | 
 | 1547 | 	struct task_group *tg; | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 1548 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1549 | 	rcu_read_lock(); | 
| Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1550 | 	update_h_load(busiest_cpu); | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 1551 |  | 
| Chris Friesen | caea8a0 | 2008-09-22 11:06:09 -0600 | [diff] [blame] | 1552 | 	list_for_each_entry_rcu(tg, &task_groups, list) { | 
| Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1553 | 		struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu]; | 
| Peter Zijlstra | 42a3ac7 | 2008-06-27 13:41:29 +0200 | [diff] [blame] | 1554 | 		unsigned long busiest_h_load = busiest_cfs_rq->h_load; | 
 | 1555 | 		unsigned long busiest_weight = busiest_cfs_rq->load.weight; | 
| Srivatsa Vaddagiri | 243e0e7 | 2008-06-27 13:41:36 +0200 | [diff] [blame] | 1556 | 		u64 rem_load, moved_load; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1557 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1558 | 		/* | 
 | 1559 | 		 * empty group | 
 | 1560 | 		 */ | 
| Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1561 | 		if (!busiest_cfs_rq->task_weight) | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1562 | 			continue; | 
 | 1563 |  | 
| Srivatsa Vaddagiri | 243e0e7 | 2008-06-27 13:41:36 +0200 | [diff] [blame] | 1564 | 		rem_load = (u64)rem_load_move * busiest_weight; | 
 | 1565 | 		rem_load = div_u64(rem_load, busiest_h_load + 1); | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1566 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1567 | 		moved_load = __load_balance_fair(this_rq, this_cpu, busiest, | 
| Srivatsa Vaddagiri | 53fecd8 | 2008-06-27 13:41:20 +0200 | [diff] [blame] | 1568 | 				rem_load, sd, idle, all_pinned, this_best_prio, | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1569 | 				tg->cfs_rq[busiest_cpu]); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1570 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1571 | 		if (!moved_load) | 
 | 1572 | 			continue; | 
 | 1573 |  | 
| Peter Zijlstra | 42a3ac7 | 2008-06-27 13:41:29 +0200 | [diff] [blame] | 1574 | 		moved_load *= busiest_h_load; | 
| Srivatsa Vaddagiri | 243e0e7 | 2008-06-27 13:41:36 +0200 | [diff] [blame] | 1575 | 		moved_load = div_u64(moved_load, busiest_weight + 1); | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1576 |  | 
 | 1577 | 		rem_load_move -= moved_load; | 
 | 1578 | 		if (rem_load_move < 0) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1579 | 			break; | 
 | 1580 | 	} | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1581 | 	rcu_read_unlock(); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1582 |  | 
| Peter Williams | 4301065 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 1583 | 	return max_load_move - rem_load_move; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1584 | } | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1585 | #else | 
 | 1586 | static unsigned long | 
 | 1587 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
 | 1588 | 		  unsigned long max_load_move, | 
 | 1589 | 		  struct sched_domain *sd, enum cpu_idle_type idle, | 
 | 1590 | 		  int *all_pinned, int *this_best_prio) | 
 | 1591 | { | 
 | 1592 | 	return __load_balance_fair(this_rq, this_cpu, busiest, | 
 | 1593 | 			max_load_move, sd, idle, all_pinned, | 
 | 1594 | 			this_best_prio, &busiest->cfs); | 
 | 1595 | } | 
 | 1596 | #endif | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1597 |  | 
| Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1598 | static int | 
 | 1599 | move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
 | 1600 | 		   struct sched_domain *sd, enum cpu_idle_type idle) | 
 | 1601 | { | 
 | 1602 | 	struct cfs_rq *busy_cfs_rq; | 
 | 1603 | 	struct rq_iterator cfs_rq_iterator; | 
 | 1604 |  | 
 | 1605 | 	cfs_rq_iterator.start = load_balance_start_fair; | 
 | 1606 | 	cfs_rq_iterator.next = load_balance_next_fair; | 
 | 1607 |  | 
 | 1608 | 	for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { | 
 | 1609 | 		/* | 
 | 1610 | 		 * pass busy_cfs_rq argument into | 
 | 1611 | 		 * load_balance_[start|next]_fair iterators | 
 | 1612 | 		 */ | 
 | 1613 | 		cfs_rq_iterator.arg = busy_cfs_rq; | 
 | 1614 | 		if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle, | 
 | 1615 | 				       &cfs_rq_iterator)) | 
 | 1616 | 		    return 1; | 
 | 1617 | 	} | 
 | 1618 |  | 
 | 1619 | 	return 0; | 
 | 1620 | } | 
| Dhaval Giani | 55e12e5 | 2008-06-24 23:39:43 +0530 | [diff] [blame] | 1621 | #endif /* CONFIG_SMP */ | 
| Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1622 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1623 | /* | 
 | 1624 |  * scheduler tick hitting a task of our scheduling class: | 
 | 1625 |  */ | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1626 | static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1627 | { | 
 | 1628 | 	struct cfs_rq *cfs_rq; | 
 | 1629 | 	struct sched_entity *se = &curr->se; | 
 | 1630 |  | 
 | 1631 | 	for_each_sched_entity(se) { | 
 | 1632 | 		cfs_rq = cfs_rq_of(se); | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1633 | 		entity_tick(cfs_rq, se, queued); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1634 | 	} | 
 | 1635 | } | 
 | 1636 |  | 
 | 1637 | /* | 
 | 1638 |  * Share the fairness runtime between parent and child, thus the | 
 | 1639 |  * total amount of pressure for CPU stays equal - new tasks | 
 | 1640 |  * get a chance to run but frequent forkers are not allowed to | 
 | 1641 |  * monopolize the CPU. Note: the parent runqueue is locked, | 
 | 1642 |  * the child is not running yet. | 
 | 1643 |  */ | 
| Ingo Molnar | ee0827d | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 1644 | static void task_new_fair(struct rq *rq, struct task_struct *p) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1645 | { | 
 | 1646 | 	struct cfs_rq *cfs_rq = task_cfs_rq(p); | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 1647 | 	struct sched_entity *se = &p->se, *curr = cfs_rq->curr; | 
| Ingo Molnar | 00bf7bf | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 1648 | 	int this_cpu = smp_processor_id(); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1649 |  | 
 | 1650 | 	sched_info_queued(p); | 
 | 1651 |  | 
| Ting Yang | 7109c44 | 2007-08-28 12:53:24 +0200 | [diff] [blame] | 1652 | 	update_curr(cfs_rq); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1653 | 	place_entity(cfs_rq, se, 1); | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1654 |  | 
| Srivatsa Vaddagiri | 3c90e6e | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 1655 | 	/* 'curr' will be NULL if the child belongs to a different group */ | 
| Ingo Molnar | 00bf7bf | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 1656 | 	if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) && | 
| Srivatsa Vaddagiri | 3c90e6e | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 1657 | 			curr && curr->vruntime < se->vruntime) { | 
| Dmitry Adamushko | 87fefa3 | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1658 | 		/* | 
| Ingo Molnar | edcb60a | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1659 | 		 * Upon rescheduling, sched_class::put_prev_task() will place | 
 | 1660 | 		 * 'current' within the tree based on its new key value. | 
 | 1661 | 		 */ | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1662 | 		swap(curr->vruntime, se->vruntime); | 
| Bharata B Rao | aec0a51 | 2008-08-28 14:42:49 +0530 | [diff] [blame] | 1663 | 		resched_task(rq->curr); | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1664 | 	} | 
 | 1665 |  | 
| Srivatsa Vaddagiri | b9dca1e | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 1666 | 	enqueue_task_fair(rq, p, 0); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1667 | } | 
 | 1668 |  | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 1669 | /* | 
 | 1670 |  * Priority of the task has changed. Check to see if we preempt | 
 | 1671 |  * the current task. | 
 | 1672 |  */ | 
 | 1673 | static void prio_changed_fair(struct rq *rq, struct task_struct *p, | 
 | 1674 | 			      int oldprio, int running) | 
 | 1675 | { | 
 | 1676 | 	/* | 
 | 1677 | 	 * Reschedule if we are currently running on this runqueue and | 
 | 1678 | 	 * our priority decreased, or if we are not currently running on | 
 | 1679 | 	 * this runqueue and our priority is higher than the current's | 
 | 1680 | 	 */ | 
 | 1681 | 	if (running) { | 
 | 1682 | 		if (p->prio > oldprio) | 
 | 1683 | 			resched_task(rq->curr); | 
 | 1684 | 	} else | 
| Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 1685 | 		check_preempt_curr(rq, p, 0); | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 1686 | } | 
 | 1687 |  | 
 | 1688 | /* | 
 | 1689 |  * We switched to the sched_fair class. | 
 | 1690 |  */ | 
 | 1691 | static void switched_to_fair(struct rq *rq, struct task_struct *p, | 
 | 1692 | 			     int running) | 
 | 1693 | { | 
 | 1694 | 	/* | 
 | 1695 | 	 * We were most likely switched from sched_rt, so | 
 | 1696 | 	 * kick off the schedule if running, otherwise just see | 
 | 1697 | 	 * if we can still preempt the current task. | 
 | 1698 | 	 */ | 
 | 1699 | 	if (running) | 
 | 1700 | 		resched_task(rq->curr); | 
 | 1701 | 	else | 
| Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 1702 | 		check_preempt_curr(rq, p, 0); | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 1703 | } | 
 | 1704 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1705 | /* Account for a task changing its policy or group. | 
 | 1706 |  * | 
 | 1707 |  * This routine is mostly called to set cfs_rq->curr field when a task | 
 | 1708 |  * migrates between groups/classes. | 
 | 1709 |  */ | 
 | 1710 | static void set_curr_task_fair(struct rq *rq) | 
 | 1711 | { | 
 | 1712 | 	struct sched_entity *se = &rq->curr->se; | 
 | 1713 |  | 
 | 1714 | 	for_each_sched_entity(se) | 
 | 1715 | 		set_next_entity(cfs_rq_of(se), se); | 
 | 1716 | } | 
 | 1717 |  | 
| Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 1718 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
 | 1719 | static void moved_group_fair(struct task_struct *p) | 
 | 1720 | { | 
 | 1721 | 	struct cfs_rq *cfs_rq = task_cfs_rq(p); | 
 | 1722 |  | 
 | 1723 | 	update_curr(cfs_rq); | 
 | 1724 | 	place_entity(cfs_rq, &p->se, 1); | 
 | 1725 | } | 
 | 1726 | #endif | 
 | 1727 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1728 | /* | 
 | 1729 |  * All the scheduling class methods: | 
 | 1730 |  */ | 
| Ingo Molnar | 5522d5d | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 1731 | static const struct sched_class fair_sched_class = { | 
 | 1732 | 	.next			= &idle_sched_class, | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1733 | 	.enqueue_task		= enqueue_task_fair, | 
 | 1734 | 	.dequeue_task		= dequeue_task_fair, | 
 | 1735 | 	.yield_task		= yield_task_fair, | 
 | 1736 |  | 
| Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1737 | 	.check_preempt_curr	= check_preempt_wakeup, | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1738 |  | 
 | 1739 | 	.pick_next_task		= pick_next_task_fair, | 
 | 1740 | 	.put_prev_task		= put_prev_task_fair, | 
 | 1741 |  | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1742 | #ifdef CONFIG_SMP | 
| Li Zefan | 4ce72a2 | 2008-10-22 15:25:26 +0800 | [diff] [blame] | 1743 | 	.select_task_rq		= select_task_rq_fair, | 
 | 1744 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1745 | 	.load_balance		= load_balance_fair, | 
| Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1746 | 	.move_one_task		= move_one_task_fair, | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1747 | #endif | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1748 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1749 | 	.set_curr_task          = set_curr_task_fair, | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1750 | 	.task_tick		= task_tick_fair, | 
 | 1751 | 	.task_new		= task_new_fair, | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 1752 |  | 
 | 1753 | 	.prio_changed		= prio_changed_fair, | 
 | 1754 | 	.switched_to		= switched_to_fair, | 
| Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 1755 |  | 
 | 1756 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
 | 1757 | 	.moved_group		= moved_group_fair, | 
 | 1758 | #endif | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1759 | }; | 
 | 1760 |  | 
 | 1761 | #ifdef CONFIG_SCHED_DEBUG | 
| Ingo Molnar | 5cef9ec | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1762 | static void print_cfs_stats(struct seq_file *m, int cpu) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1763 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1764 | 	struct cfs_rq *cfs_rq; | 
 | 1765 |  | 
| Peter Zijlstra | 5973e5b | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 1766 | 	rcu_read_lock(); | 
| Ingo Molnar | c3b64f1 | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 1767 | 	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) | 
| Ingo Molnar | 5cef9ec | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1768 | 		print_cfs_rq(m, cpu, cfs_rq); | 
| Peter Zijlstra | 5973e5b | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 1769 | 	rcu_read_unlock(); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1770 | } | 
 | 1771 | #endif |