| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1 | /* | 
 | 2 |  * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH) | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 
 | 5 |  * | 
 | 6 |  *  Interactivity improvements by Mike Galbraith | 
 | 7 |  *  (C) 2007 Mike Galbraith <efault@gmx.de> | 
 | 8 |  * | 
 | 9 |  *  Various enhancements by Dmitry Adamushko. | 
 | 10 |  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com> | 
 | 11 |  * | 
 | 12 |  *  Group scheduling enhancements by Srivatsa Vaddagiri | 
 | 13 |  *  Copyright IBM Corporation, 2007 | 
 | 14 |  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> | 
 | 15 |  * | 
 | 16 |  *  Scaled math optimizations by Thomas Gleixner | 
 | 17 |  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 18 |  * | 
 | 19 |  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra | 
 | 20 |  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 21 |  */ | 
 | 22 |  | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 23 | #include <linux/latencytop.h> | 
 | 24 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 25 | /* | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 26 |  * Targeted preemption latency for CPU-bound tasks: | 
| Zou Nan hai | 722aab0 | 2007-11-26 21:21:49 +0100 | [diff] [blame] | 27 |  * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 28 |  * | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 29 |  * NOTE: this latency value is not the same as the concept of | 
| Ingo Molnar | d274a4c | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 30 |  * 'timeslice length' - timeslices in CFS are of variable length | 
 | 31 |  * and have no persistent notion like in traditional, time-slice | 
 | 32 |  * based scheduling concepts. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 33 |  * | 
| Ingo Molnar | d274a4c | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 34 |  * (to see the precise effective timeslice length of your workload, | 
 | 35 |  *  run vmstat and monitor the context-switches (cs) field) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 36 |  */ | 
| Ingo Molnar | 19978ca | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 37 | unsigned int sysctl_sched_latency = 20000000ULL; | 
| Ingo Molnar | 2bd8e6d | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 38 |  | 
 | 39 | /* | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 40 |  * Minimal preemption granularity for CPU-bound tasks: | 
| Zou Nan hai | 722aab0 | 2007-11-26 21:21:49 +0100 | [diff] [blame] | 41 |  * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds) | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 42 |  */ | 
| Zou Nan hai | 722aab0 | 2007-11-26 21:21:49 +0100 | [diff] [blame] | 43 | unsigned int sysctl_sched_min_granularity = 4000000ULL; | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 44 |  | 
 | 45 | /* | 
 | 46 |  * is kept at sysctl_sched_latency / sysctl_sched_min_granularity | 
 | 47 |  */ | 
| Zou Nan hai | 722aab0 | 2007-11-26 21:21:49 +0100 | [diff] [blame] | 48 | static unsigned int sched_nr_latency = 5; | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 49 |  | 
 | 50 | /* | 
| Ingo Molnar | 2bd8e6d | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 51 |  * After fork, child runs first. (default) If set to 0 then | 
 | 52 |  * parent will (try to) run first. | 
 | 53 |  */ | 
 | 54 | const_debug unsigned int sysctl_sched_child_runs_first = 1; | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 55 |  | 
 | 56 | /* | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 57 |  * sys_sched_yield() compat mode | 
 | 58 |  * | 
 | 59 |  * This option switches the agressive yield implementation of the | 
 | 60 |  * old scheduler back on. | 
 | 61 |  */ | 
 | 62 | unsigned int __read_mostly sysctl_sched_compat_yield; | 
 | 63 |  | 
 | 64 | /* | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 65 |  * SCHED_BATCH wake-up granularity. | 
| Zou Nan hai | 722aab0 | 2007-11-26 21:21:49 +0100 | [diff] [blame] | 66 |  * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 67 |  * | 
 | 68 |  * This option delays the preemption effects of decoupled workloads | 
 | 69 |  * and reduces their over-scheduling. Synchronous workloads will still | 
 | 70 |  * have immediate wakeup/sleep latencies. | 
 | 71 |  */ | 
| Ingo Molnar | 19978ca | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 72 | unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 73 |  | 
 | 74 | /* | 
 | 75 |  * SCHED_OTHER wake-up granularity. | 
| Zou Nan hai | 722aab0 | 2007-11-26 21:21:49 +0100 | [diff] [blame] | 76 |  * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 77 |  * | 
 | 78 |  * This option delays the preemption effects of decoupled workloads | 
 | 79 |  * and reduces their over-scheduling. Synchronous workloads will still | 
 | 80 |  * have immediate wakeup/sleep latencies. | 
 | 81 |  */ | 
| Ingo Molnar | 19978ca | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 82 | unsigned int sysctl_sched_wakeup_granularity = 10000000UL; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 83 |  | 
| Ingo Molnar | da84d96 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 84 | const_debug unsigned int sysctl_sched_migration_cost = 500000UL; | 
 | 85 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 86 | /************************************************************** | 
 | 87 |  * CFS operations on generic schedulable entities: | 
 | 88 |  */ | 
 | 89 |  | 
 | 90 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
 | 91 |  | 
 | 92 | /* cpu runqueue to which this cfs_rq is attached */ | 
 | 93 | static inline struct rq *rq_of(struct cfs_rq *cfs_rq) | 
 | 94 | { | 
 | 95 | 	return cfs_rq->rq; | 
 | 96 | } | 
 | 97 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 98 | /* An entity is a task if it doesn't "own" a runqueue */ | 
 | 99 | #define entity_is_task(se)	(!se->my_q) | 
 | 100 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 101 | #else	/* CONFIG_FAIR_GROUP_SCHED */ | 
 | 102 |  | 
 | 103 | static inline struct rq *rq_of(struct cfs_rq *cfs_rq) | 
 | 104 | { | 
 | 105 | 	return container_of(cfs_rq, struct rq, cfs); | 
 | 106 | } | 
 | 107 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 108 | #define entity_is_task(se)	1 | 
 | 109 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 110 | #endif	/* CONFIG_FAIR_GROUP_SCHED */ | 
 | 111 |  | 
 | 112 | static inline struct task_struct *task_of(struct sched_entity *se) | 
 | 113 | { | 
 | 114 | 	return container_of(se, struct task_struct, se); | 
 | 115 | } | 
 | 116 |  | 
 | 117 |  | 
 | 118 | /************************************************************** | 
 | 119 |  * Scheduling class tree data structure manipulation methods: | 
 | 120 |  */ | 
 | 121 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 122 | static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime) | 
| Peter Zijlstra | 02e0431 | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 123 | { | 
| Peter Zijlstra | 368059a | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 124 | 	s64 delta = (s64)(vruntime - min_vruntime); | 
 | 125 | 	if (delta > 0) | 
| Peter Zijlstra | 02e0431 | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 126 | 		min_vruntime = vruntime; | 
 | 127 |  | 
 | 128 | 	return min_vruntime; | 
 | 129 | } | 
 | 130 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 131 | static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) | 
| Peter Zijlstra | b0ffd24 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 132 | { | 
 | 133 | 	s64 delta = (s64)(vruntime - min_vruntime); | 
 | 134 | 	if (delta < 0) | 
 | 135 | 		min_vruntime = vruntime; | 
 | 136 |  | 
 | 137 | 	return min_vruntime; | 
 | 138 | } | 
 | 139 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 140 | static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Peter Zijlstra | 9014623 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 141 | { | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 142 | 	return se->vruntime - cfs_rq->min_vruntime; | 
| Peter Zijlstra | 9014623 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 143 | } | 
 | 144 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 145 | /* | 
 | 146 |  * Enqueue an entity into the rb-tree: | 
 | 147 |  */ | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 148 | static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 149 | { | 
 | 150 | 	struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; | 
 | 151 | 	struct rb_node *parent = NULL; | 
 | 152 | 	struct sched_entity *entry; | 
| Peter Zijlstra | 9014623 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 153 | 	s64 key = entity_key(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 154 | 	int leftmost = 1; | 
 | 155 |  | 
 | 156 | 	/* | 
 | 157 | 	 * Find the right place in the rbtree: | 
 | 158 | 	 */ | 
 | 159 | 	while (*link) { | 
 | 160 | 		parent = *link; | 
 | 161 | 		entry = rb_entry(parent, struct sched_entity, run_node); | 
 | 162 | 		/* | 
 | 163 | 		 * We dont care about collisions. Nodes with | 
 | 164 | 		 * the same key stay together. | 
 | 165 | 		 */ | 
| Peter Zijlstra | 9014623 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 166 | 		if (key < entity_key(cfs_rq, entry)) { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 167 | 			link = &parent->rb_left; | 
 | 168 | 		} else { | 
 | 169 | 			link = &parent->rb_right; | 
 | 170 | 			leftmost = 0; | 
 | 171 | 		} | 
 | 172 | 	} | 
 | 173 |  | 
 | 174 | 	/* | 
 | 175 | 	 * Maintain a cache of leftmost tree entries (it is frequently | 
 | 176 | 	 * used): | 
 | 177 | 	 */ | 
 | 178 | 	if (leftmost) | 
| Ingo Molnar | 57cb499 | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 179 | 		cfs_rq->rb_leftmost = &se->run_node; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 180 |  | 
 | 181 | 	rb_link_node(&se->run_node, parent, link); | 
 | 182 | 	rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 183 | } | 
 | 184 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 185 | static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 186 | { | 
 | 187 | 	if (cfs_rq->rb_leftmost == &se->run_node) | 
| Ingo Molnar | 57cb499 | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 188 | 		cfs_rq->rb_leftmost = rb_next(&se->run_node); | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 189 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 190 | 	rb_erase(&se->run_node, &cfs_rq->tasks_timeline); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 191 | } | 
 | 192 |  | 
 | 193 | static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq) | 
 | 194 | { | 
 | 195 | 	return cfs_rq->rb_leftmost; | 
 | 196 | } | 
 | 197 |  | 
 | 198 | static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq) | 
 | 199 | { | 
 | 200 | 	return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node); | 
 | 201 | } | 
 | 202 |  | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 203 | static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) | 
 | 204 | { | 
 | 205 | 	struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; | 
 | 206 | 	struct sched_entity *se = NULL; | 
 | 207 | 	struct rb_node *parent; | 
 | 208 |  | 
 | 209 | 	while (*link) { | 
 | 210 | 		parent = *link; | 
 | 211 | 		se = rb_entry(parent, struct sched_entity, run_node); | 
 | 212 | 		link = &parent->rb_right; | 
 | 213 | 	} | 
 | 214 |  | 
 | 215 | 	return se; | 
 | 216 | } | 
 | 217 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 218 | /************************************************************** | 
 | 219 |  * Scheduling class statistics methods: | 
 | 220 |  */ | 
 | 221 |  | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 222 | #ifdef CONFIG_SCHED_DEBUG | 
 | 223 | int sched_nr_latency_handler(struct ctl_table *table, int write, | 
 | 224 | 		struct file *filp, void __user *buffer, size_t *lenp, | 
 | 225 | 		loff_t *ppos) | 
 | 226 | { | 
 | 227 | 	int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); | 
 | 228 |  | 
 | 229 | 	if (ret || !write) | 
 | 230 | 		return ret; | 
 | 231 |  | 
 | 232 | 	sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, | 
 | 233 | 					sysctl_sched_min_granularity); | 
 | 234 |  | 
 | 235 | 	return 0; | 
 | 236 | } | 
 | 237 | #endif | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 238 |  | 
 | 239 | /* | 
 | 240 |  * The idea is to set a period in which each task runs once. | 
 | 241 |  * | 
 | 242 |  * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch | 
 | 243 |  * this period because otherwise the slices get too small. | 
 | 244 |  * | 
 | 245 |  * p = (nr <= nl) ? l : l*nr/nl | 
 | 246 |  */ | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 247 | static u64 __sched_period(unsigned long nr_running) | 
 | 248 | { | 
 | 249 | 	u64 period = sysctl_sched_latency; | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 250 | 	unsigned long nr_latency = sched_nr_latency; | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 251 |  | 
 | 252 | 	if (unlikely(nr_running > nr_latency)) { | 
| Peter Zijlstra | 4bf0b77 | 2008-01-25 21:08:21 +0100 | [diff] [blame] | 253 | 		period = sysctl_sched_min_granularity; | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 254 | 		period *= nr_running; | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 255 | 	} | 
 | 256 |  | 
 | 257 | 	return period; | 
 | 258 | } | 
 | 259 |  | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 260 | /* | 
 | 261 |  * We calculate the wall-time slice from the period by taking a part | 
 | 262 |  * proportional to the weight. | 
 | 263 |  * | 
 | 264 |  * s = p*w/rw | 
 | 265 |  */ | 
| Peter Zijlstra | 6d0f0eb | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 266 | static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 267 | { | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 268 | 	u64 slice = __sched_period(cfs_rq->nr_running); | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 269 |  | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 270 | 	slice *= se->load.weight; | 
 | 271 | 	do_div(slice, cfs_rq->load.weight); | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 272 |  | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 273 | 	return slice; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 274 | } | 
 | 275 |  | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 276 | /* | 
 | 277 |  * We calculate the vruntime slice. | 
 | 278 |  * | 
 | 279 |  * vs = s/w = p/rw | 
 | 280 |  */ | 
 | 281 | static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running) | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 282 | { | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 283 | 	u64 vslice = __sched_period(nr_running); | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 284 |  | 
| Peter Zijlstra | 10b7772 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 285 | 	vslice *= NICE_0_LOAD; | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 286 | 	do_div(vslice, rq_weight); | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 287 |  | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 288 | 	return vslice; | 
 | 289 | } | 
| Peter Zijlstra | 5f6d858e | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 290 |  | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 291 | static u64 sched_vslice(struct cfs_rq *cfs_rq) | 
 | 292 | { | 
 | 293 | 	return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running); | 
 | 294 | } | 
 | 295 |  | 
 | 296 | static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
 | 297 | { | 
 | 298 | 	return __sched_vslice(cfs_rq->load.weight + se->load.weight, | 
 | 299 | 			cfs_rq->nr_running + 1); | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 300 | } | 
 | 301 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 302 | /* | 
 | 303 |  * Update the current task's runtime statistics. Skip current tasks that | 
 | 304 |  * are not in our scheduling class. | 
 | 305 |  */ | 
 | 306 | static inline void | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 307 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | 
 | 308 | 	      unsigned long delta_exec) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 309 | { | 
| Ingo Molnar | bbdba7c | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 310 | 	unsigned long delta_exec_weighted; | 
| Peter Zijlstra | b0ffd24 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 311 | 	u64 vruntime; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 312 |  | 
| Ingo Molnar | 8179ca2 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 313 | 	schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 314 |  | 
 | 315 | 	curr->sum_exec_runtime += delta_exec; | 
| Ingo Molnar | 7a62eab | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 316 | 	schedstat_add(cfs_rq, exec_clock, delta_exec); | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 317 | 	delta_exec_weighted = delta_exec; | 
 | 318 | 	if (unlikely(curr->load.weight != NICE_0_LOAD)) { | 
 | 319 | 		delta_exec_weighted = calc_delta_fair(delta_exec_weighted, | 
 | 320 | 							&curr->load); | 
 | 321 | 	} | 
 | 322 | 	curr->vruntime += delta_exec_weighted; | 
| Peter Zijlstra | 02e0431 | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 323 |  | 
 | 324 | 	/* | 
 | 325 | 	 * maintain cfs_rq->min_vruntime to be a monotonic increasing | 
 | 326 | 	 * value tracking the leftmost vruntime in the tree. | 
 | 327 | 	 */ | 
 | 328 | 	if (first_fair(cfs_rq)) { | 
| Peter Zijlstra | b0ffd24 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 329 | 		vruntime = min_vruntime(curr->vruntime, | 
 | 330 | 				__pick_next_entity(cfs_rq)->vruntime); | 
| Peter Zijlstra | 02e0431 | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 331 | 	} else | 
| Peter Zijlstra | b0ffd24 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 332 | 		vruntime = curr->vruntime; | 
| Peter Zijlstra | 02e0431 | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 333 |  | 
 | 334 | 	cfs_rq->min_vruntime = | 
| Peter Zijlstra | b0ffd24 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 335 | 		max_vruntime(cfs_rq->min_vruntime, vruntime); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 336 | } | 
 | 337 |  | 
| Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 338 | static void update_curr(struct cfs_rq *cfs_rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 339 | { | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 340 | 	struct sched_entity *curr = cfs_rq->curr; | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 341 | 	u64 now = rq_of(cfs_rq)->clock; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 342 | 	unsigned long delta_exec; | 
 | 343 |  | 
 | 344 | 	if (unlikely(!curr)) | 
 | 345 | 		return; | 
 | 346 |  | 
 | 347 | 	/* | 
 | 348 | 	 * Get the amount of time the current task was running | 
 | 349 | 	 * since the last time we changed load (this cannot | 
 | 350 | 	 * overflow on 32 bits): | 
 | 351 | 	 */ | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 352 | 	delta_exec = (unsigned long)(now - curr->exec_start); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 353 |  | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 354 | 	__update_curr(cfs_rq, curr, delta_exec); | 
 | 355 | 	curr->exec_start = now; | 
| Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 356 |  | 
 | 357 | 	if (entity_is_task(curr)) { | 
 | 358 | 		struct task_struct *curtask = task_of(curr); | 
 | 359 |  | 
 | 360 | 		cpuacct_charge(curtask, delta_exec); | 
 | 361 | 	} | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 362 | } | 
 | 363 |  | 
 | 364 | static inline void | 
| Ingo Molnar | 5870db5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 365 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 366 | { | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 367 | 	schedstat_set(se->wait_start, rq_of(cfs_rq)->clock); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 368 | } | 
 | 369 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 370 | /* | 
 | 371 |  * Task is being enqueued - update stats: | 
 | 372 |  */ | 
| Ingo Molnar | d2417e5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 373 | static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 374 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 375 | 	/* | 
 | 376 | 	 * Are we enqueueing a waiting task? (for current tasks | 
 | 377 | 	 * a dequeue/enqueue event is a NOP) | 
 | 378 | 	 */ | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 379 | 	if (se != cfs_rq->curr) | 
| Ingo Molnar | 5870db5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 380 | 		update_stats_wait_start(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 381 | } | 
 | 382 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 383 | static void | 
| Ingo Molnar | 9ef0a96 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 384 | update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 385 | { | 
| Ingo Molnar | bbdba7c | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 386 | 	schedstat_set(se->wait_max, max(se->wait_max, | 
 | 387 | 			rq_of(cfs_rq)->clock - se->wait_start)); | 
| Arjan van de Ven | 6d08259 | 2008-01-25 21:08:35 +0100 | [diff] [blame] | 388 | 	schedstat_set(se->wait_count, se->wait_count + 1); | 
 | 389 | 	schedstat_set(se->wait_sum, se->wait_sum + | 
 | 390 | 			rq_of(cfs_rq)->clock - se->wait_start); | 
| Ingo Molnar | 6cfb0d5 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 391 | 	schedstat_set(se->wait_start, 0); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 392 | } | 
 | 393 |  | 
 | 394 | static inline void | 
| Ingo Molnar | 19b6a2e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 395 | update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 396 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 397 | 	/* | 
 | 398 | 	 * Mark the end of the wait period if dequeueing a | 
 | 399 | 	 * waiting task: | 
 | 400 | 	 */ | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 401 | 	if (se != cfs_rq->curr) | 
| Ingo Molnar | 9ef0a96 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 402 | 		update_stats_wait_end(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 403 | } | 
 | 404 |  | 
 | 405 | /* | 
 | 406 |  * We are picking a new current task - update its stats: | 
 | 407 |  */ | 
 | 408 | static inline void | 
| Ingo Molnar | 79303e9 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 409 | update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 410 | { | 
 | 411 | 	/* | 
 | 412 | 	 * We are starting a new run period: | 
 | 413 | 	 */ | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 414 | 	se->exec_start = rq_of(cfs_rq)->clock; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 415 | } | 
 | 416 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 417 | /************************************************** | 
 | 418 |  * Scheduling class queueing methods: | 
 | 419 |  */ | 
 | 420 |  | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 421 | static void | 
 | 422 | account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
 | 423 | { | 
 | 424 | 	update_load_add(&cfs_rq->load, se->load.weight); | 
 | 425 | 	cfs_rq->nr_running++; | 
 | 426 | 	se->on_rq = 1; | 
 | 427 | } | 
 | 428 |  | 
 | 429 | static void | 
 | 430 | account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
 | 431 | { | 
 | 432 | 	update_load_sub(&cfs_rq->load, se->load.weight); | 
 | 433 | 	cfs_rq->nr_running--; | 
 | 434 | 	se->on_rq = 0; | 
 | 435 | } | 
 | 436 |  | 
| Ingo Molnar | 2396af6 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 437 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 438 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 439 | #ifdef CONFIG_SCHEDSTATS | 
 | 440 | 	if (se->sleep_start) { | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 441 | 		u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 442 | 		struct task_struct *tsk = task_of(se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 443 |  | 
 | 444 | 		if ((s64)delta < 0) | 
 | 445 | 			delta = 0; | 
 | 446 |  | 
 | 447 | 		if (unlikely(delta > se->sleep_max)) | 
 | 448 | 			se->sleep_max = delta; | 
 | 449 |  | 
 | 450 | 		se->sleep_start = 0; | 
 | 451 | 		se->sum_sleep_runtime += delta; | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 452 |  | 
 | 453 | 		account_scheduler_latency(tsk, delta >> 10, 1); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 454 | 	} | 
 | 455 | 	if (se->block_start) { | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 456 | 		u64 delta = rq_of(cfs_rq)->clock - se->block_start; | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 457 | 		struct task_struct *tsk = task_of(se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 458 |  | 
 | 459 | 		if ((s64)delta < 0) | 
 | 460 | 			delta = 0; | 
 | 461 |  | 
 | 462 | 		if (unlikely(delta > se->block_max)) | 
 | 463 | 			se->block_max = delta; | 
 | 464 |  | 
 | 465 | 		se->block_start = 0; | 
 | 466 | 		se->sum_sleep_runtime += delta; | 
| Ingo Molnar | 30084fb | 2007-10-02 14:13:08 +0200 | [diff] [blame] | 467 |  | 
 | 468 | 		/* | 
 | 469 | 		 * Blocking time is in units of nanosecs, so shift by 20 to | 
 | 470 | 		 * get a milliseconds-range estimation of the amount of | 
 | 471 | 		 * time that the task spent sleeping: | 
 | 472 | 		 */ | 
 | 473 | 		if (unlikely(prof_on == SLEEP_PROFILING)) { | 
| Ingo Molnar | e22f5bb | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 474 |  | 
| Ingo Molnar | 30084fb | 2007-10-02 14:13:08 +0200 | [diff] [blame] | 475 | 			profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), | 
 | 476 | 				     delta >> 20); | 
 | 477 | 		} | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 478 | 		account_scheduler_latency(tsk, delta >> 10, 0); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 479 | 	} | 
 | 480 | #endif | 
 | 481 | } | 
 | 482 |  | 
| Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 483 | static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
 | 484 | { | 
 | 485 | #ifdef CONFIG_SCHED_DEBUG | 
 | 486 | 	s64 d = se->vruntime - cfs_rq->min_vruntime; | 
 | 487 |  | 
 | 488 | 	if (d < 0) | 
 | 489 | 		d = -d; | 
 | 490 |  | 
 | 491 | 	if (d > 3*sysctl_sched_latency) | 
 | 492 | 		schedstat_inc(cfs_rq, nr_spread_over); | 
 | 493 | #endif | 
 | 494 | } | 
 | 495 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 496 | static void | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 497 | place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | 
 | 498 | { | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 499 | 	u64 vruntime; | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 500 |  | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 501 | 	vruntime = cfs_rq->min_vruntime; | 
| Peter Zijlstra | 94dfb5e | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 502 |  | 
| Ingo Molnar | 06877c3 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 503 | 	if (sched_feat(TREE_AVG)) { | 
| Peter Zijlstra | 94dfb5e | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 504 | 		struct sched_entity *last = __pick_last_entity(cfs_rq); | 
 | 505 | 		if (last) { | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 506 | 			vruntime += last->vruntime; | 
 | 507 | 			vruntime >>= 1; | 
| Peter Zijlstra | 94dfb5e | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 508 | 		} | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 509 | 	} else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running) | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 510 | 		vruntime += sched_vslice(cfs_rq)/2; | 
| Peter Zijlstra | 94dfb5e | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 511 |  | 
| Peter Zijlstra | 2cb8600 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 512 | 	/* | 
 | 513 | 	 * The 'current' period is already promised to the current tasks, | 
 | 514 | 	 * however the extra weight of the new task will slow them down a | 
 | 515 | 	 * little, place the new task so that it fits in the slot that | 
 | 516 | 	 * stays open at the end. | 
 | 517 | 	 */ | 
| Peter Zijlstra | 94dfb5e | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 518 | 	if (initial && sched_feat(START_DEBIT)) | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 519 | 		vruntime += sched_vslice_add(cfs_rq, se); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 520 |  | 
| Ingo Molnar | 8465e79 | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 521 | 	if (!initial) { | 
| Peter Zijlstra | 2cb8600 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 522 | 		/* sleeps upto a single latency don't count. */ | 
| Srivatsa Vaddagiri | 296825c | 2008-01-31 22:45:22 +0100 | [diff] [blame] | 523 | 		if (sched_feat(NEW_FAIR_SLEEPERS)) | 
| Ingo Molnar | 94359f0 | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 524 | 			vruntime -= sysctl_sched_latency; | 
 | 525 |  | 
| Peter Zijlstra | 2cb8600 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 526 | 		/* ensure we never gain time by being placed backwards. */ | 
 | 527 | 		vruntime = max_vruntime(se->vruntime, vruntime); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 528 | 	} | 
 | 529 |  | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 530 | 	se->vruntime = vruntime; | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 531 | } | 
 | 532 |  | 
 | 533 | static void | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 534 | enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 535 | { | 
 | 536 | 	/* | 
| Dmitry Adamushko | a2a2d68 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 537 | 	 * Update run-time statistics of the 'current'. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 538 | 	 */ | 
| Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 539 | 	update_curr(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 540 |  | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 541 | 	if (wakeup) { | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 542 | 		place_entity(cfs_rq, se, 0); | 
| Ingo Molnar | 2396af6 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 543 | 		enqueue_sleeper(cfs_rq, se); | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 544 | 	} | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 545 |  | 
| Ingo Molnar | d2417e5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 546 | 	update_stats_enqueue(cfs_rq, se); | 
| Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 547 | 	check_spread(cfs_rq, se); | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 548 | 	if (se != cfs_rq->curr) | 
 | 549 | 		__enqueue_entity(cfs_rq, se); | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 550 | 	account_entity_enqueue(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 551 | } | 
 | 552 |  | 
 | 553 | static void | 
| Ingo Molnar | 525c271 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 554 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 555 | { | 
| Dmitry Adamushko | a2a2d68 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 556 | 	/* | 
 | 557 | 	 * Update run-time statistics of the 'current'. | 
 | 558 | 	 */ | 
 | 559 | 	update_curr(cfs_rq); | 
 | 560 |  | 
| Ingo Molnar | 19b6a2e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 561 | 	update_stats_dequeue(cfs_rq, se); | 
| Dmitry Adamushko | db36cc7 | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 562 | 	if (sleep) { | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 563 | #ifdef CONFIG_SCHEDSTATS | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 564 | 		if (entity_is_task(se)) { | 
 | 565 | 			struct task_struct *tsk = task_of(se); | 
 | 566 |  | 
 | 567 | 			if (tsk->state & TASK_INTERRUPTIBLE) | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 568 | 				se->sleep_start = rq_of(cfs_rq)->clock; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 569 | 			if (tsk->state & TASK_UNINTERRUPTIBLE) | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 570 | 				se->block_start = rq_of(cfs_rq)->clock; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 571 | 		} | 
| Dmitry Adamushko | db36cc7 | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 572 | #endif | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 573 | 	} | 
 | 574 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 575 | 	if (se != cfs_rq->curr) | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 576 | 		__dequeue_entity(cfs_rq, se); | 
 | 577 | 	account_entity_dequeue(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 578 | } | 
 | 579 |  | 
 | 580 | /* | 
 | 581 |  * Preempt the current task with a newly woken task if needed: | 
 | 582 |  */ | 
| Peter Zijlstra | 7c92e54 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 583 | static void | 
| Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 584 | check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 585 | { | 
| Peter Zijlstra | 1169783 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 586 | 	unsigned long ideal_runtime, delta_exec; | 
 | 587 |  | 
| Peter Zijlstra | 6d0f0eb | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 588 | 	ideal_runtime = sched_slice(cfs_rq, curr); | 
| Peter Zijlstra | 1169783 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 589 | 	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | 
| Ingo Molnar | 3e3e13f | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 590 | 	if (delta_exec > ideal_runtime) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 591 | 		resched_task(rq_of(cfs_rq)->curr); | 
 | 592 | } | 
 | 593 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 594 | static void | 
| Ingo Molnar | 8494f41 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 595 | set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 596 | { | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 597 | 	/* 'current' is not kept within the tree. */ | 
 | 598 | 	if (se->on_rq) { | 
 | 599 | 		/* | 
 | 600 | 		 * Any task has to be enqueued before it get to execute on | 
 | 601 | 		 * a CPU. So account for the time it spent waiting on the | 
 | 602 | 		 * runqueue. | 
 | 603 | 		 */ | 
 | 604 | 		update_stats_wait_end(cfs_rq, se); | 
 | 605 | 		__dequeue_entity(cfs_rq, se); | 
 | 606 | 	} | 
 | 607 |  | 
| Ingo Molnar | 79303e9 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 608 | 	update_stats_curr_start(cfs_rq, se); | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 609 | 	cfs_rq->curr = se; | 
| Ingo Molnar | eba1ed4 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 610 | #ifdef CONFIG_SCHEDSTATS | 
 | 611 | 	/* | 
 | 612 | 	 * Track our maximum slice length, if the CPU's load is at | 
 | 613 | 	 * least twice that of our own weight (i.e. dont track it | 
 | 614 | 	 * when there are only lesser-weight tasks around): | 
 | 615 | 	 */ | 
| Dmitry Adamushko | 495eca4 | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 616 | 	if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { | 
| Ingo Molnar | eba1ed4 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 617 | 		se->slice_max = max(se->slice_max, | 
 | 618 | 			se->sum_exec_runtime - se->prev_sum_exec_runtime); | 
 | 619 | 	} | 
 | 620 | #endif | 
| Peter Zijlstra | 4a55b45 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 621 | 	se->prev_sum_exec_runtime = se->sum_exec_runtime; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 622 | } | 
 | 623 |  | 
| Ingo Molnar | 9948f4b | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 624 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 625 | { | 
| Dmitry Adamushko | 08ec3df | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 626 | 	struct sched_entity *se = NULL; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 627 |  | 
| Dmitry Adamushko | 08ec3df | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 628 | 	if (first_fair(cfs_rq)) { | 
 | 629 | 		se = __pick_next_entity(cfs_rq); | 
 | 630 | 		set_next_entity(cfs_rq, se); | 
 | 631 | 	} | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 632 |  | 
 | 633 | 	return se; | 
 | 634 | } | 
 | 635 |  | 
| Ingo Molnar | ab6cde2 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 636 | static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 637 | { | 
 | 638 | 	/* | 
 | 639 | 	 * If still on the runqueue then deactivate_task() | 
 | 640 | 	 * was not called and update_curr() has to be done: | 
 | 641 | 	 */ | 
 | 642 | 	if (prev->on_rq) | 
| Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 643 | 		update_curr(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 644 |  | 
| Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 645 | 	check_spread(cfs_rq, prev); | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 646 | 	if (prev->on_rq) { | 
| Ingo Molnar | 5870db5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 647 | 		update_stats_wait_start(cfs_rq, prev); | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 648 | 		/* Put 'current' back into the tree. */ | 
 | 649 | 		__enqueue_entity(cfs_rq, prev); | 
 | 650 | 	} | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 651 | 	cfs_rq->curr = NULL; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 652 | } | 
 | 653 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 654 | static void | 
 | 655 | entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 656 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 657 | 	/* | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 658 | 	 * Update run-time statistics of the 'current'. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 659 | 	 */ | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 660 | 	update_curr(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 661 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 662 | #ifdef CONFIG_SCHED_HRTICK | 
 | 663 | 	/* | 
 | 664 | 	 * queued ticks are scheduled to match the slice, so don't bother | 
 | 665 | 	 * validating it and just reschedule. | 
 | 666 | 	 */ | 
 | 667 | 	if (queued) | 
 | 668 | 		return resched_task(rq_of(cfs_rq)->curr); | 
 | 669 | 	/* | 
 | 670 | 	 * don't let the period tick interfere with the hrtick preemption | 
 | 671 | 	 */ | 
 | 672 | 	if (!sched_feat(DOUBLE_TICK) && | 
 | 673 | 			hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) | 
 | 674 | 		return; | 
 | 675 | #endif | 
 | 676 |  | 
| Peter Zijlstra | ce6c131 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 677 | 	if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT)) | 
| Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 678 | 		check_preempt_tick(cfs_rq, curr); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 679 | } | 
 | 680 |  | 
 | 681 | /************************************************** | 
 | 682 |  * CFS operations on tasks: | 
 | 683 |  */ | 
 | 684 |  | 
 | 685 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
 | 686 |  | 
 | 687 | /* Walk up scheduling entities hierarchy */ | 
 | 688 | #define for_each_sched_entity(se) \ | 
 | 689 | 		for (; se; se = se->parent) | 
 | 690 |  | 
 | 691 | static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) | 
 | 692 | { | 
 | 693 | 	return p->se.cfs_rq; | 
 | 694 | } | 
 | 695 |  | 
 | 696 | /* runqueue on which this entity is (to be) queued */ | 
 | 697 | static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) | 
 | 698 | { | 
 | 699 | 	return se->cfs_rq; | 
 | 700 | } | 
 | 701 |  | 
 | 702 | /* runqueue "owned" by this group */ | 
 | 703 | static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) | 
 | 704 | { | 
 | 705 | 	return grp->my_q; | 
 | 706 | } | 
 | 707 |  | 
 | 708 | /* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on | 
 | 709 |  * another cpu ('this_cpu') | 
 | 710 |  */ | 
 | 711 | static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) | 
 | 712 | { | 
| Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 713 | 	return cfs_rq->tg->cfs_rq[this_cpu]; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 714 | } | 
 | 715 |  | 
 | 716 | /* Iterate thr' all leaf cfs_rq's on a runqueue */ | 
 | 717 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ | 
| Srivatsa Vaddagiri | ec2c507 | 2008-01-25 21:07:59 +0100 | [diff] [blame] | 718 | 	list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 719 |  | 
| Srivatsa Vaddagiri | fad095a | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 720 | /* Do the two (enqueued) entities belong to the same group ? */ | 
 | 721 | static inline int | 
 | 722 | is_same_group(struct sched_entity *se, struct sched_entity *pse) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 723 | { | 
| Srivatsa Vaddagiri | fad095a | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 724 | 	if (se->cfs_rq == pse->cfs_rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 725 | 		return 1; | 
 | 726 |  | 
 | 727 | 	return 0; | 
 | 728 | } | 
 | 729 |  | 
| Srivatsa Vaddagiri | fad095a | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 730 | static inline struct sched_entity *parent_entity(struct sched_entity *se) | 
 | 731 | { | 
 | 732 | 	return se->parent; | 
 | 733 | } | 
 | 734 |  | 
| Srivatsa Vaddagiri | 6b2d770 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 735 | #define GROUP_IMBALANCE_PCT	20 | 
 | 736 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 737 | #else	/* CONFIG_FAIR_GROUP_SCHED */ | 
 | 738 |  | 
 | 739 | #define for_each_sched_entity(se) \ | 
 | 740 | 		for (; se; se = NULL) | 
 | 741 |  | 
 | 742 | static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) | 
 | 743 | { | 
 | 744 | 	return &task_rq(p)->cfs; | 
 | 745 | } | 
 | 746 |  | 
 | 747 | static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) | 
 | 748 | { | 
 | 749 | 	struct task_struct *p = task_of(se); | 
 | 750 | 	struct rq *rq = task_rq(p); | 
 | 751 |  | 
 | 752 | 	return &rq->cfs; | 
 | 753 | } | 
 | 754 |  | 
 | 755 | /* runqueue "owned" by this group */ | 
 | 756 | static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) | 
 | 757 | { | 
 | 758 | 	return NULL; | 
 | 759 | } | 
 | 760 |  | 
 | 761 | static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) | 
 | 762 | { | 
 | 763 | 	return &cpu_rq(this_cpu)->cfs; | 
 | 764 | } | 
 | 765 |  | 
 | 766 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ | 
 | 767 | 		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) | 
 | 768 |  | 
| Srivatsa Vaddagiri | fad095a | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 769 | static inline int | 
 | 770 | is_same_group(struct sched_entity *se, struct sched_entity *pse) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 771 | { | 
 | 772 | 	return 1; | 
 | 773 | } | 
 | 774 |  | 
| Srivatsa Vaddagiri | fad095a | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 775 | static inline struct sched_entity *parent_entity(struct sched_entity *se) | 
 | 776 | { | 
 | 777 | 	return NULL; | 
 | 778 | } | 
 | 779 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 780 | #endif	/* CONFIG_FAIR_GROUP_SCHED */ | 
 | 781 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 782 | #ifdef CONFIG_SCHED_HRTICK | 
 | 783 | static void hrtick_start_fair(struct rq *rq, struct task_struct *p) | 
 | 784 | { | 
 | 785 | 	int requeue = rq->curr == p; | 
 | 786 | 	struct sched_entity *se = &p->se; | 
 | 787 | 	struct cfs_rq *cfs_rq = cfs_rq_of(se); | 
 | 788 |  | 
 | 789 | 	WARN_ON(task_rq(p) != rq); | 
 | 790 |  | 
 | 791 | 	if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) { | 
 | 792 | 		u64 slice = sched_slice(cfs_rq, se); | 
 | 793 | 		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; | 
 | 794 | 		s64 delta = slice - ran; | 
 | 795 |  | 
 | 796 | 		if (delta < 0) { | 
 | 797 | 			if (rq->curr == p) | 
 | 798 | 				resched_task(p); | 
 | 799 | 			return; | 
 | 800 | 		} | 
 | 801 |  | 
 | 802 | 		/* | 
 | 803 | 		 * Don't schedule slices shorter than 10000ns, that just | 
 | 804 | 		 * doesn't make sense. Rely on vruntime for fairness. | 
 | 805 | 		 */ | 
 | 806 | 		if (!requeue) | 
 | 807 | 			delta = max(10000LL, delta); | 
 | 808 |  | 
 | 809 | 		hrtick_start(rq, delta, requeue); | 
 | 810 | 	} | 
 | 811 | } | 
 | 812 | #else | 
 | 813 | static inline void | 
 | 814 | hrtick_start_fair(struct rq *rq, struct task_struct *p) | 
 | 815 | { | 
 | 816 | } | 
 | 817 | #endif | 
 | 818 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 819 | /* | 
 | 820 |  * The enqueue_task method is called before nr_running is | 
 | 821 |  * increased. Here we update the fair scheduling stats and | 
 | 822 |  * then put the task into the rbtree: | 
 | 823 |  */ | 
| Ingo Molnar | fd390f6 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 824 | static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 825 | { | 
 | 826 | 	struct cfs_rq *cfs_rq; | 
| Srivatsa Vaddagiri | 58e2d4c | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 827 | 	struct sched_entity *se = &p->se, | 
 | 828 | 			    *topse = NULL;	/* Highest schedulable entity */ | 
 | 829 | 	int incload = 1; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 830 |  | 
 | 831 | 	for_each_sched_entity(se) { | 
| Srivatsa Vaddagiri | 58e2d4c | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 832 | 		topse = se; | 
 | 833 | 		if (se->on_rq) { | 
 | 834 | 			incload = 0; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 835 | 			break; | 
| Srivatsa Vaddagiri | 58e2d4c | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 836 | 		} | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 837 | 		cfs_rq = cfs_rq_of(se); | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 838 | 		enqueue_entity(cfs_rq, se, wakeup); | 
| Srivatsa Vaddagiri | b9fa3df | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 839 | 		wakeup = 1; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 840 | 	} | 
| Srivatsa Vaddagiri | 58e2d4c | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 841 | 	/* Increment cpu load if we just enqueued the first task of a group on | 
 | 842 | 	 * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs | 
 | 843 | 	 * at the highest grouping level. | 
 | 844 | 	 */ | 
 | 845 | 	if (incload) | 
 | 846 | 		inc_cpu_load(rq, topse->load.weight); | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 847 |  | 
 | 848 | 	hrtick_start_fair(rq, rq->curr); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 849 | } | 
 | 850 |  | 
 | 851 | /* | 
 | 852 |  * The dequeue_task method is called before nr_running is | 
 | 853 |  * decreased. We remove the task from the rbtree and | 
 | 854 |  * update the fair scheduling stats: | 
 | 855 |  */ | 
| Ingo Molnar | f02231e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 856 | static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 857 | { | 
 | 858 | 	struct cfs_rq *cfs_rq; | 
| Srivatsa Vaddagiri | 58e2d4c | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 859 | 	struct sched_entity *se = &p->se, | 
 | 860 | 			    *topse = NULL; 	/* Highest schedulable entity */ | 
 | 861 | 	int decload = 1; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 862 |  | 
 | 863 | 	for_each_sched_entity(se) { | 
| Srivatsa Vaddagiri | 58e2d4c | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 864 | 		topse = se; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 865 | 		cfs_rq = cfs_rq_of(se); | 
| Ingo Molnar | 525c271 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 866 | 		dequeue_entity(cfs_rq, se, sleep); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 867 | 		/* Don't dequeue parent if it has other entities besides us */ | 
| Srivatsa Vaddagiri | 58e2d4c | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 868 | 		if (cfs_rq->load.weight) { | 
 | 869 | 			if (parent_entity(se)) | 
 | 870 | 				decload = 0; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 871 | 			break; | 
| Srivatsa Vaddagiri | 58e2d4c | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 872 | 		} | 
| Srivatsa Vaddagiri | b9fa3df | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 873 | 		sleep = 1; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 874 | 	} | 
| Srivatsa Vaddagiri | 58e2d4c | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 875 | 	/* Decrement cpu load if we just dequeued the last task of a group on | 
 | 876 | 	 * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs | 
 | 877 | 	 * at the highest grouping level. | 
 | 878 | 	 */ | 
 | 879 | 	if (decload) | 
 | 880 | 		dec_cpu_load(rq, topse->load.weight); | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 881 |  | 
 | 882 | 	hrtick_start_fair(rq, rq->curr); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 883 | } | 
 | 884 |  | 
 | 885 | /* | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 886 |  * sched_yield() support is very simple - we dequeue and enqueue. | 
 | 887 |  * | 
 | 888 |  * If compat_yield is turned on then we requeue to the end of the tree. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 889 |  */ | 
| Dmitry Adamushko | 4530d7a | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 890 | static void yield_task_fair(struct rq *rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 891 | { | 
| Ingo Molnar | db292ca | 2007-12-04 17:04:39 +0100 | [diff] [blame] | 892 | 	struct task_struct *curr = rq->curr; | 
 | 893 | 	struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 
 | 894 | 	struct sched_entity *rightmost, *se = &curr->se; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 895 |  | 
 | 896 | 	/* | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 897 | 	 * Are we the only task in the tree? | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 898 | 	 */ | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 899 | 	if (unlikely(cfs_rq->nr_running == 1)) | 
 | 900 | 		return; | 
 | 901 |  | 
| Ingo Molnar | db292ca | 2007-12-04 17:04:39 +0100 | [diff] [blame] | 902 | 	if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) { | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 903 | 		__update_rq_clock(rq); | 
 | 904 | 		/* | 
| Dmitry Adamushko | a2a2d68 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 905 | 		 * Update run-time statistics of the 'current'. | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 906 | 		 */ | 
| Dmitry Adamushko | 2b1e315 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 907 | 		update_curr(cfs_rq); | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 908 |  | 
 | 909 | 		return; | 
 | 910 | 	} | 
 | 911 | 	/* | 
 | 912 | 	 * Find the rightmost entry in the rbtree: | 
 | 913 | 	 */ | 
| Dmitry Adamushko | 2b1e315 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 914 | 	rightmost = __pick_last_entity(cfs_rq); | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 915 | 	/* | 
 | 916 | 	 * Already in the rightmost position? | 
 | 917 | 	 */ | 
| Dmitry Adamushko | 2b1e315 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 918 | 	if (unlikely(rightmost->vruntime < se->vruntime)) | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 919 | 		return; | 
 | 920 |  | 
 | 921 | 	/* | 
 | 922 | 	 * Minimally necessary key value to be last in the tree: | 
| Dmitry Adamushko | 2b1e315 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 923 | 	 * Upon rescheduling, sched_class::put_prev_task() will place | 
 | 924 | 	 * 'current' within the tree based on its new key value. | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 925 | 	 */ | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 926 | 	se->vruntime = rightmost->vruntime + 1; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 927 | } | 
 | 928 |  | 
 | 929 | /* | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 930 |  * wake_idle() will wake a task on an idle cpu if task->cpu is | 
 | 931 |  * not idle and an idle cpu is available.  The span of cpus to | 
 | 932 |  * search starts with cpus closest then further out as needed, | 
 | 933 |  * so we always favor a closer, idle cpu. | 
 | 934 |  * | 
 | 935 |  * Returns the CPU we should wake onto. | 
 | 936 |  */ | 
 | 937 | #if defined(ARCH_HAS_SCHED_WAKE_IDLE) | 
 | 938 | static int wake_idle(int cpu, struct task_struct *p) | 
 | 939 | { | 
 | 940 | 	cpumask_t tmp; | 
 | 941 | 	struct sched_domain *sd; | 
 | 942 | 	int i; | 
 | 943 |  | 
 | 944 | 	/* | 
 | 945 | 	 * If it is idle, then it is the best cpu to run this task. | 
 | 946 | 	 * | 
 | 947 | 	 * This cpu is also the best, if it has more than one task already. | 
 | 948 | 	 * Siblings must be also busy(in most cases) as they didn't already | 
 | 949 | 	 * pickup the extra load from this cpu and hence we need not check | 
 | 950 | 	 * sibling runqueue info. This will avoid the checks and cache miss | 
 | 951 | 	 * penalities associated with that. | 
 | 952 | 	 */ | 
 | 953 | 	if (idle_cpu(cpu) || cpu_rq(cpu)->nr_running > 1) | 
 | 954 | 		return cpu; | 
 | 955 |  | 
 | 956 | 	for_each_domain(cpu, sd) { | 
 | 957 | 		if (sd->flags & SD_WAKE_IDLE) { | 
 | 958 | 			cpus_and(tmp, sd->span, p->cpus_allowed); | 
 | 959 | 			for_each_cpu_mask(i, tmp) { | 
 | 960 | 				if (idle_cpu(i)) { | 
 | 961 | 					if (i != task_cpu(p)) { | 
 | 962 | 						schedstat_inc(p, | 
 | 963 | 						       se.nr_wakeups_idle); | 
 | 964 | 					} | 
 | 965 | 					return i; | 
 | 966 | 				} | 
 | 967 | 			} | 
 | 968 | 		} else { | 
 | 969 | 			break; | 
 | 970 | 		} | 
 | 971 | 	} | 
 | 972 | 	return cpu; | 
 | 973 | } | 
 | 974 | #else | 
 | 975 | static inline int wake_idle(int cpu, struct task_struct *p) | 
 | 976 | { | 
 | 977 | 	return cpu; | 
 | 978 | } | 
 | 979 | #endif | 
 | 980 |  | 
 | 981 | #ifdef CONFIG_SMP | 
 | 982 | static int select_task_rq_fair(struct task_struct *p, int sync) | 
 | 983 | { | 
 | 984 | 	int cpu, this_cpu; | 
 | 985 | 	struct rq *rq; | 
 | 986 | 	struct sched_domain *sd, *this_sd = NULL; | 
 | 987 | 	int new_cpu; | 
 | 988 |  | 
 | 989 | 	cpu      = task_cpu(p); | 
 | 990 | 	rq       = task_rq(p); | 
 | 991 | 	this_cpu = smp_processor_id(); | 
 | 992 | 	new_cpu  = cpu; | 
 | 993 |  | 
| Dmitry Adamushko | 9ec3b77 | 2008-01-25 21:08:21 +0100 | [diff] [blame] | 994 | 	if (cpu == this_cpu) | 
 | 995 | 		goto out_set_cpu; | 
 | 996 |  | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 997 | 	for_each_domain(this_cpu, sd) { | 
 | 998 | 		if (cpu_isset(cpu, sd->span)) { | 
 | 999 | 			this_sd = sd; | 
 | 1000 | 			break; | 
 | 1001 | 		} | 
 | 1002 | 	} | 
 | 1003 |  | 
 | 1004 | 	if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) | 
 | 1005 | 		goto out_set_cpu; | 
 | 1006 |  | 
 | 1007 | 	/* | 
 | 1008 | 	 * Check for affine wakeup and passive balancing possibilities. | 
 | 1009 | 	 */ | 
 | 1010 | 	if (this_sd) { | 
 | 1011 | 		int idx = this_sd->wake_idx; | 
 | 1012 | 		unsigned int imbalance; | 
 | 1013 | 		unsigned long load, this_load; | 
 | 1014 |  | 
 | 1015 | 		imbalance = 100 + (this_sd->imbalance_pct - 100) / 2; | 
 | 1016 |  | 
 | 1017 | 		load = source_load(cpu, idx); | 
 | 1018 | 		this_load = target_load(this_cpu, idx); | 
 | 1019 |  | 
 | 1020 | 		new_cpu = this_cpu; /* Wake to this CPU if we can */ | 
 | 1021 |  | 
 | 1022 | 		if (this_sd->flags & SD_WAKE_AFFINE) { | 
 | 1023 | 			unsigned long tl = this_load; | 
 | 1024 | 			unsigned long tl_per_task; | 
 | 1025 |  | 
 | 1026 | 			/* | 
 | 1027 | 			 * Attract cache-cold tasks on sync wakeups: | 
 | 1028 | 			 */ | 
 | 1029 | 			if (sync && !task_hot(p, rq->clock, this_sd)) | 
 | 1030 | 				goto out_set_cpu; | 
 | 1031 |  | 
 | 1032 | 			schedstat_inc(p, se.nr_wakeups_affine_attempts); | 
 | 1033 | 			tl_per_task = cpu_avg_load_per_task(this_cpu); | 
 | 1034 |  | 
 | 1035 | 			/* | 
 | 1036 | 			 * If sync wakeup then subtract the (maximum possible) | 
 | 1037 | 			 * effect of the currently running task from the load | 
 | 1038 | 			 * of the current CPU: | 
 | 1039 | 			 */ | 
 | 1040 | 			if (sync) | 
 | 1041 | 				tl -= current->se.load.weight; | 
 | 1042 |  | 
 | 1043 | 			if ((tl <= load && | 
 | 1044 | 				tl + target_load(cpu, idx) <= tl_per_task) || | 
 | 1045 | 			       100*(tl + p->se.load.weight) <= imbalance*load) { | 
 | 1046 | 				/* | 
 | 1047 | 				 * This domain has SD_WAKE_AFFINE and | 
 | 1048 | 				 * p is cache cold in this domain, and | 
 | 1049 | 				 * there is no bad imbalance. | 
 | 1050 | 				 */ | 
 | 1051 | 				schedstat_inc(this_sd, ttwu_move_affine); | 
 | 1052 | 				schedstat_inc(p, se.nr_wakeups_affine); | 
 | 1053 | 				goto out_set_cpu; | 
 | 1054 | 			} | 
 | 1055 | 		} | 
 | 1056 |  | 
 | 1057 | 		/* | 
 | 1058 | 		 * Start passive balancing when half the imbalance_pct | 
 | 1059 | 		 * limit is reached. | 
 | 1060 | 		 */ | 
 | 1061 | 		if (this_sd->flags & SD_WAKE_BALANCE) { | 
 | 1062 | 			if (imbalance*this_load <= 100*load) { | 
 | 1063 | 				schedstat_inc(this_sd, ttwu_move_balance); | 
 | 1064 | 				schedstat_inc(p, se.nr_wakeups_passive); | 
 | 1065 | 				goto out_set_cpu; | 
 | 1066 | 			} | 
 | 1067 | 		} | 
 | 1068 | 	} | 
 | 1069 |  | 
 | 1070 | 	new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */ | 
 | 1071 | out_set_cpu: | 
 | 1072 | 	return wake_idle(new_cpu, p); | 
 | 1073 | } | 
 | 1074 | #endif /* CONFIG_SMP */ | 
 | 1075 |  | 
 | 1076 |  | 
 | 1077 | /* | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1078 |  * Preempt the current task with a newly woken task if needed: | 
 | 1079 |  */ | 
| Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1080 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1081 | { | 
 | 1082 | 	struct task_struct *curr = rq->curr; | 
| Srivatsa Vaddagiri | fad095a | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 1083 | 	struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 
| Srivatsa Vaddagiri | 8651a86 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 1084 | 	struct sched_entity *se = &curr->se, *pse = &p->se; | 
| Ingo Molnar | 502d26b | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 1085 | 	unsigned long gran; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1086 |  | 
 | 1087 | 	if (unlikely(rt_prio(p->prio))) { | 
| Ingo Molnar | a8e504d | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1088 | 		update_rq_clock(rq); | 
| Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1089 | 		update_curr(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1090 | 		resched_task(curr); | 
 | 1091 | 		return; | 
 | 1092 | 	} | 
| Ingo Molnar | 91c234b | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 1093 | 	/* | 
 | 1094 | 	 * Batch tasks do not preempt (their preemption is driven by | 
 | 1095 | 	 * the tick): | 
 | 1096 | 	 */ | 
 | 1097 | 	if (unlikely(p->policy == SCHED_BATCH)) | 
 | 1098 | 		return; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1099 |  | 
| Ingo Molnar | 77d9cc4 | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 1100 | 	if (!sched_feat(WAKEUP_PREEMPT)) | 
 | 1101 | 		return; | 
| Peter Zijlstra | ce6c131 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 1102 |  | 
| Ingo Molnar | 77d9cc4 | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 1103 | 	while (!is_same_group(se, pse)) { | 
 | 1104 | 		se = parent_entity(se); | 
 | 1105 | 		pse = parent_entity(pse); | 
| Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1106 | 	} | 
| Ingo Molnar | 77d9cc4 | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 1107 |  | 
| Ingo Molnar | 77d9cc4 | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 1108 | 	gran = sysctl_sched_wakeup_granularity; | 
| Peter Zijlstra | ef9884e | 2008-01-31 22:45:22 +0100 | [diff] [blame] | 1109 | 	/* | 
 | 1110 | 	 * More easily preempt - nice tasks, while not making | 
 | 1111 | 	 * it harder for + nice tasks. | 
 | 1112 | 	 */ | 
 | 1113 | 	if (unlikely(se->load.weight > NICE_0_LOAD)) | 
| Ingo Molnar | 77d9cc4 | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 1114 | 		gran = calc_delta_fair(gran, &se->load); | 
 | 1115 |  | 
| Ingo Molnar | 502d26b | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 1116 | 	if (pse->vruntime + gran < se->vruntime) | 
| Ingo Molnar | 77d9cc4 | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 1117 | 		resched_task(curr); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1118 | } | 
 | 1119 |  | 
| Ingo Molnar | fb8d472 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1120 | static struct task_struct *pick_next_task_fair(struct rq *rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1121 | { | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1122 | 	struct task_struct *p; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1123 | 	struct cfs_rq *cfs_rq = &rq->cfs; | 
 | 1124 | 	struct sched_entity *se; | 
 | 1125 |  | 
 | 1126 | 	if (unlikely(!cfs_rq->nr_running)) | 
 | 1127 | 		return NULL; | 
 | 1128 |  | 
 | 1129 | 	do { | 
| Ingo Molnar | 9948f4b | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1130 | 		se = pick_next_entity(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1131 | 		cfs_rq = group_cfs_rq(se); | 
 | 1132 | 	} while (cfs_rq); | 
 | 1133 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1134 | 	p = task_of(se); | 
 | 1135 | 	hrtick_start_fair(rq, p); | 
 | 1136 |  | 
 | 1137 | 	return p; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1138 | } | 
 | 1139 |  | 
 | 1140 | /* | 
 | 1141 |  * Account for a descheduled task: | 
 | 1142 |  */ | 
| Ingo Molnar | 31ee529 | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 1143 | static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1144 | { | 
 | 1145 | 	struct sched_entity *se = &prev->se; | 
 | 1146 | 	struct cfs_rq *cfs_rq; | 
 | 1147 |  | 
 | 1148 | 	for_each_sched_entity(se) { | 
 | 1149 | 		cfs_rq = cfs_rq_of(se); | 
| Ingo Molnar | ab6cde2 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1150 | 		put_prev_entity(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1151 | 	} | 
 | 1152 | } | 
 | 1153 |  | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1154 | #ifdef CONFIG_SMP | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1155 | /************************************************** | 
 | 1156 |  * Fair scheduling class load-balancing methods: | 
 | 1157 |  */ | 
 | 1158 |  | 
 | 1159 | /* | 
 | 1160 |  * Load-balancing iterator. Note: while the runqueue stays locked | 
 | 1161 |  * during the whole iteration, the current task might be | 
 | 1162 |  * dequeued so the iterator has to be dequeue-safe. Here we | 
 | 1163 |  * achieve that by always pre-iterating before returning | 
 | 1164 |  * the current task: | 
 | 1165 |  */ | 
| Alexey Dobriyan | a995744 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 1166 | static struct task_struct * | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1167 | __load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr) | 
 | 1168 | { | 
 | 1169 | 	struct task_struct *p; | 
 | 1170 |  | 
 | 1171 | 	if (!curr) | 
 | 1172 | 		return NULL; | 
 | 1173 |  | 
 | 1174 | 	p = rb_entry(curr, struct task_struct, se.run_node); | 
 | 1175 | 	cfs_rq->rb_load_balance_curr = rb_next(curr); | 
 | 1176 |  | 
 | 1177 | 	return p; | 
 | 1178 | } | 
 | 1179 |  | 
 | 1180 | static struct task_struct *load_balance_start_fair(void *arg) | 
 | 1181 | { | 
 | 1182 | 	struct cfs_rq *cfs_rq = arg; | 
 | 1183 |  | 
 | 1184 | 	return __load_balance_iterator(cfs_rq, first_fair(cfs_rq)); | 
 | 1185 | } | 
 | 1186 |  | 
 | 1187 | static struct task_struct *load_balance_next_fair(void *arg) | 
 | 1188 | { | 
 | 1189 | 	struct cfs_rq *cfs_rq = arg; | 
 | 1190 |  | 
 | 1191 | 	return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr); | 
 | 1192 | } | 
 | 1193 |  | 
| Peter Williams | 4301065 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 1194 | static unsigned long | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1195 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
| Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1196 | 		  unsigned long max_load_move, | 
| Peter Williams | a4ac01c | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 1197 | 		  struct sched_domain *sd, enum cpu_idle_type idle, | 
 | 1198 | 		  int *all_pinned, int *this_best_prio) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1199 | { | 
 | 1200 | 	struct cfs_rq *busy_cfs_rq; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1201 | 	long rem_load_move = max_load_move; | 
 | 1202 | 	struct rq_iterator cfs_rq_iterator; | 
| Srivatsa Vaddagiri | 6b2d770 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 1203 | 	unsigned long load_moved; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1204 |  | 
 | 1205 | 	cfs_rq_iterator.start = load_balance_start_fair; | 
 | 1206 | 	cfs_rq_iterator.next = load_balance_next_fair; | 
 | 1207 |  | 
 | 1208 | 	for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { | 
| Peter Williams | a4ac01c | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 1209 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| Srivatsa Vaddagiri | 6b2d770 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 1210 | 		struct cfs_rq *this_cfs_rq = busy_cfs_rq->tg->cfs_rq[this_cpu]; | 
 | 1211 | 		unsigned long maxload, task_load, group_weight; | 
 | 1212 | 		unsigned long thisload, per_task_load; | 
 | 1213 | 		struct sched_entity *se = busy_cfs_rq->tg->se[busiest->cpu]; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1214 |  | 
| Srivatsa Vaddagiri | 6b2d770 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 1215 | 		task_load = busy_cfs_rq->load.weight; | 
 | 1216 | 		group_weight = se->load.weight; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1217 |  | 
| Srivatsa Vaddagiri | 6b2d770 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 1218 | 		/* | 
 | 1219 | 		 * 'group_weight' is contributed by tasks of total weight | 
 | 1220 | 		 * 'task_load'. To move 'rem_load_move' worth of weight only, | 
 | 1221 | 		 * we need to move a maximum task load of: | 
 | 1222 | 		 * | 
 | 1223 | 		 * 	maxload = (remload / group_weight) * task_load; | 
 | 1224 | 		 */ | 
 | 1225 | 		maxload = (rem_load_move * task_load) / group_weight; | 
 | 1226 |  | 
 | 1227 | 		if (!maxload || !task_load) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1228 | 			continue; | 
 | 1229 |  | 
| Srivatsa Vaddagiri | 6b2d770 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 1230 | 		per_task_load = task_load / busy_cfs_rq->nr_running; | 
 | 1231 | 		/* | 
 | 1232 | 		 * balance_tasks will try to forcibly move atleast one task if | 
 | 1233 | 		 * possible (because of SCHED_LOAD_SCALE_FUZZ). Avoid that if | 
 | 1234 | 		 * maxload is less than GROUP_IMBALANCE_FUZZ% the per_task_load. | 
 | 1235 | 		 */ | 
 | 1236 | 		 if (100 * maxload < GROUP_IMBALANCE_PCT * per_task_load) | 
 | 1237 | 			continue; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1238 |  | 
| Srivatsa Vaddagiri | 6b2d770 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 1239 | 		/* Disable priority-based load balance */ | 
 | 1240 | 		*this_best_prio = 0; | 
 | 1241 | 		thisload = this_cfs_rq->load.weight; | 
| Peter Williams | a4ac01c | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 1242 | #else | 
| Ingo Molnar | e56f31a | 2007-08-10 23:05:11 +0200 | [diff] [blame] | 1243 | # define maxload rem_load_move | 
| Peter Williams | a4ac01c | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 1244 | #endif | 
| Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1245 | 		/* | 
 | 1246 | 		 * pass busy_cfs_rq argument into | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1247 | 		 * load_balance_[start|next]_fair iterators | 
 | 1248 | 		 */ | 
 | 1249 | 		cfs_rq_iterator.arg = busy_cfs_rq; | 
| Srivatsa Vaddagiri | 6b2d770 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 1250 | 		load_moved = balance_tasks(this_rq, this_cpu, busiest, | 
| Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1251 | 					       maxload, sd, idle, all_pinned, | 
 | 1252 | 					       this_best_prio, | 
 | 1253 | 					       &cfs_rq_iterator); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1254 |  | 
| Srivatsa Vaddagiri | 6b2d770 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 1255 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
 | 1256 | 		/* | 
 | 1257 | 		 * load_moved holds the task load that was moved. The | 
 | 1258 | 		 * effective (group) weight moved would be: | 
 | 1259 | 		 * 	load_moved_eff = load_moved/task_load * group_weight; | 
 | 1260 | 		 */ | 
 | 1261 | 		load_moved = (group_weight * load_moved) / task_load; | 
 | 1262 |  | 
 | 1263 | 		/* Adjust shares on both cpus to reflect load_moved */ | 
 | 1264 | 		group_weight -= load_moved; | 
 | 1265 | 		set_se_shares(se, group_weight); | 
 | 1266 |  | 
 | 1267 | 		se = busy_cfs_rq->tg->se[this_cpu]; | 
 | 1268 | 		if (!thisload) | 
 | 1269 | 			group_weight = load_moved; | 
 | 1270 | 		else | 
 | 1271 | 			group_weight = se->load.weight + load_moved; | 
 | 1272 | 		set_se_shares(se, group_weight); | 
 | 1273 | #endif | 
 | 1274 |  | 
 | 1275 | 		rem_load_move -= load_moved; | 
 | 1276 |  | 
| Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1277 | 		if (rem_load_move <= 0) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1278 | 			break; | 
 | 1279 | 	} | 
 | 1280 |  | 
| Peter Williams | 4301065 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 1281 | 	return max_load_move - rem_load_move; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1282 | } | 
 | 1283 |  | 
| Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1284 | static int | 
 | 1285 | move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
 | 1286 | 		   struct sched_domain *sd, enum cpu_idle_type idle) | 
 | 1287 | { | 
 | 1288 | 	struct cfs_rq *busy_cfs_rq; | 
 | 1289 | 	struct rq_iterator cfs_rq_iterator; | 
 | 1290 |  | 
 | 1291 | 	cfs_rq_iterator.start = load_balance_start_fair; | 
 | 1292 | 	cfs_rq_iterator.next = load_balance_next_fair; | 
 | 1293 |  | 
 | 1294 | 	for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { | 
 | 1295 | 		/* | 
 | 1296 | 		 * pass busy_cfs_rq argument into | 
 | 1297 | 		 * load_balance_[start|next]_fair iterators | 
 | 1298 | 		 */ | 
 | 1299 | 		cfs_rq_iterator.arg = busy_cfs_rq; | 
 | 1300 | 		if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle, | 
 | 1301 | 				       &cfs_rq_iterator)) | 
 | 1302 | 		    return 1; | 
 | 1303 | 	} | 
 | 1304 |  | 
 | 1305 | 	return 0; | 
 | 1306 | } | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1307 | #endif | 
| Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1308 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1309 | /* | 
 | 1310 |  * scheduler tick hitting a task of our scheduling class: | 
 | 1311 |  */ | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1312 | static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1313 | { | 
 | 1314 | 	struct cfs_rq *cfs_rq; | 
 | 1315 | 	struct sched_entity *se = &curr->se; | 
 | 1316 |  | 
 | 1317 | 	for_each_sched_entity(se) { | 
 | 1318 | 		cfs_rq = cfs_rq_of(se); | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1319 | 		entity_tick(cfs_rq, se, queued); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1320 | 	} | 
 | 1321 | } | 
 | 1322 |  | 
| Ingo Molnar | 8eb172d | 2007-10-29 21:18:11 +0100 | [diff] [blame] | 1323 | #define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0) | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1324 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1325 | /* | 
 | 1326 |  * Share the fairness runtime between parent and child, thus the | 
 | 1327 |  * total amount of pressure for CPU stays equal - new tasks | 
 | 1328 |  * get a chance to run but frequent forkers are not allowed to | 
 | 1329 |  * monopolize the CPU. Note: the parent runqueue is locked, | 
 | 1330 |  * the child is not running yet. | 
 | 1331 |  */ | 
| Ingo Molnar | ee0827d | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 1332 | static void task_new_fair(struct rq *rq, struct task_struct *p) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1333 | { | 
 | 1334 | 	struct cfs_rq *cfs_rq = task_cfs_rq(p); | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 1335 | 	struct sched_entity *se = &p->se, *curr = cfs_rq->curr; | 
| Ingo Molnar | 00bf7bf | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 1336 | 	int this_cpu = smp_processor_id(); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1337 |  | 
 | 1338 | 	sched_info_queued(p); | 
 | 1339 |  | 
| Ting Yang | 7109c44 | 2007-08-28 12:53:24 +0200 | [diff] [blame] | 1340 | 	update_curr(cfs_rq); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1341 | 	place_entity(cfs_rq, se, 1); | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1342 |  | 
| Srivatsa Vaddagiri | 3c90e6e | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 1343 | 	/* 'curr' will be NULL if the child belongs to a different group */ | 
| Ingo Molnar | 00bf7bf | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 1344 | 	if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) && | 
| Srivatsa Vaddagiri | 3c90e6e | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 1345 | 			curr && curr->vruntime < se->vruntime) { | 
| Dmitry Adamushko | 87fefa3 | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1346 | 		/* | 
| Ingo Molnar | edcb60a | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1347 | 		 * Upon rescheduling, sched_class::put_prev_task() will place | 
 | 1348 | 		 * 'current' within the tree based on its new key value. | 
 | 1349 | 		 */ | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1350 | 		swap(curr->vruntime, se->vruntime); | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1351 | 	} | 
 | 1352 |  | 
| Srivatsa Vaddagiri | b9dca1e | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 1353 | 	enqueue_task_fair(rq, p, 0); | 
| Ingo Molnar | bb61c21 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 1354 | 	resched_task(rq->curr); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1355 | } | 
 | 1356 |  | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 1357 | /* | 
 | 1358 |  * Priority of the task has changed. Check to see if we preempt | 
 | 1359 |  * the current task. | 
 | 1360 |  */ | 
 | 1361 | static void prio_changed_fair(struct rq *rq, struct task_struct *p, | 
 | 1362 | 			      int oldprio, int running) | 
 | 1363 | { | 
 | 1364 | 	/* | 
 | 1365 | 	 * Reschedule if we are currently running on this runqueue and | 
 | 1366 | 	 * our priority decreased, or if we are not currently running on | 
 | 1367 | 	 * this runqueue and our priority is higher than the current's | 
 | 1368 | 	 */ | 
 | 1369 | 	if (running) { | 
 | 1370 | 		if (p->prio > oldprio) | 
 | 1371 | 			resched_task(rq->curr); | 
 | 1372 | 	} else | 
 | 1373 | 		check_preempt_curr(rq, p); | 
 | 1374 | } | 
 | 1375 |  | 
 | 1376 | /* | 
 | 1377 |  * We switched to the sched_fair class. | 
 | 1378 |  */ | 
 | 1379 | static void switched_to_fair(struct rq *rq, struct task_struct *p, | 
 | 1380 | 			     int running) | 
 | 1381 | { | 
 | 1382 | 	/* | 
 | 1383 | 	 * We were most likely switched from sched_rt, so | 
 | 1384 | 	 * kick off the schedule if running, otherwise just see | 
 | 1385 | 	 * if we can still preempt the current task. | 
 | 1386 | 	 */ | 
 | 1387 | 	if (running) | 
 | 1388 | 		resched_task(rq->curr); | 
 | 1389 | 	else | 
 | 1390 | 		check_preempt_curr(rq, p); | 
 | 1391 | } | 
 | 1392 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1393 | /* Account for a task changing its policy or group. | 
 | 1394 |  * | 
 | 1395 |  * This routine is mostly called to set cfs_rq->curr field when a task | 
 | 1396 |  * migrates between groups/classes. | 
 | 1397 |  */ | 
 | 1398 | static void set_curr_task_fair(struct rq *rq) | 
 | 1399 | { | 
 | 1400 | 	struct sched_entity *se = &rq->curr->se; | 
 | 1401 |  | 
 | 1402 | 	for_each_sched_entity(se) | 
 | 1403 | 		set_next_entity(cfs_rq_of(se), se); | 
 | 1404 | } | 
 | 1405 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1406 | /* | 
 | 1407 |  * All the scheduling class methods: | 
 | 1408 |  */ | 
| Ingo Molnar | 5522d5d | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 1409 | static const struct sched_class fair_sched_class = { | 
 | 1410 | 	.next			= &idle_sched_class, | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1411 | 	.enqueue_task		= enqueue_task_fair, | 
 | 1412 | 	.dequeue_task		= dequeue_task_fair, | 
 | 1413 | 	.yield_task		= yield_task_fair, | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1414 | #ifdef CONFIG_SMP | 
 | 1415 | 	.select_task_rq		= select_task_rq_fair, | 
 | 1416 | #endif /* CONFIG_SMP */ | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1417 |  | 
| Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1418 | 	.check_preempt_curr	= check_preempt_wakeup, | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1419 |  | 
 | 1420 | 	.pick_next_task		= pick_next_task_fair, | 
 | 1421 | 	.put_prev_task		= put_prev_task_fair, | 
 | 1422 |  | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1423 | #ifdef CONFIG_SMP | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1424 | 	.load_balance		= load_balance_fair, | 
| Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1425 | 	.move_one_task		= move_one_task_fair, | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1426 | #endif | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1427 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1428 | 	.set_curr_task          = set_curr_task_fair, | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1429 | 	.task_tick		= task_tick_fair, | 
 | 1430 | 	.task_new		= task_new_fair, | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 1431 |  | 
 | 1432 | 	.prio_changed		= prio_changed_fair, | 
 | 1433 | 	.switched_to		= switched_to_fair, | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1434 | }; | 
 | 1435 |  | 
 | 1436 | #ifdef CONFIG_SCHED_DEBUG | 
| Ingo Molnar | 5cef9ec | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1437 | static void print_cfs_stats(struct seq_file *m, int cpu) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1438 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1439 | 	struct cfs_rq *cfs_rq; | 
 | 1440 |  | 
| Srivatsa Vaddagiri | 75c28ac | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 1441 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
 | 1442 | 	print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs); | 
 | 1443 | #endif | 
| Peter Zijlstra | 5973e5b | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 1444 | 	rcu_read_lock(); | 
| Ingo Molnar | c3b64f1 | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 1445 | 	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) | 
| Ingo Molnar | 5cef9ec | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1446 | 		print_cfs_rq(m, cpu, cfs_rq); | 
| Peter Zijlstra | 5973e5b | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 1447 | 	rcu_read_unlock(); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1448 | } | 
 | 1449 | #endif |