| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1 | /* | 
|  | 2 | * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH) | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 
|  | 5 | * | 
|  | 6 | *  Interactivity improvements by Mike Galbraith | 
|  | 7 | *  (C) 2007 Mike Galbraith <efault@gmx.de> | 
|  | 8 | * | 
|  | 9 | *  Various enhancements by Dmitry Adamushko. | 
|  | 10 | *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com> | 
|  | 11 | * | 
|  | 12 | *  Group scheduling enhancements by Srivatsa Vaddagiri | 
|  | 13 | *  Copyright IBM Corporation, 2007 | 
|  | 14 | *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> | 
|  | 15 | * | 
|  | 16 | *  Scaled math optimizations by Thomas Gleixner | 
|  | 17 | *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 18 | * | 
|  | 19 | *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra | 
|  | 20 | *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 21 | */ | 
|  | 22 |  | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 23 | #include <linux/latencytop.h> | 
|  | 24 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 25 | /* | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 26 | * Targeted preemption latency for CPU-bound tasks: | 
| Zou Nan hai | 722aab0 | 2007-11-26 21:21:49 +0100 | [diff] [blame] | 27 | * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 28 | * | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 29 | * NOTE: this latency value is not the same as the concept of | 
| Ingo Molnar | d274a4c | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 30 | * 'timeslice length' - timeslices in CFS are of variable length | 
|  | 31 | * and have no persistent notion like in traditional, time-slice | 
|  | 32 | * based scheduling concepts. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 33 | * | 
| Ingo Molnar | d274a4c | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 34 | * (to see the precise effective timeslice length of your workload, | 
|  | 35 | *  run vmstat and monitor the context-switches (cs) field) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 36 | */ | 
| Ingo Molnar | 19978ca | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 37 | unsigned int sysctl_sched_latency = 20000000ULL; | 
| Ingo Molnar | 2bd8e6d | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 38 |  | 
|  | 39 | /* | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 40 | * Minimal preemption granularity for CPU-bound tasks: | 
| Zou Nan hai | 722aab0 | 2007-11-26 21:21:49 +0100 | [diff] [blame] | 41 | * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds) | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 42 | */ | 
| Zou Nan hai | 722aab0 | 2007-11-26 21:21:49 +0100 | [diff] [blame] | 43 | unsigned int sysctl_sched_min_granularity = 4000000ULL; | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 44 |  | 
|  | 45 | /* | 
|  | 46 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity | 
|  | 47 | */ | 
| Zou Nan hai | 722aab0 | 2007-11-26 21:21:49 +0100 | [diff] [blame] | 48 | static unsigned int sched_nr_latency = 5; | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 49 |  | 
|  | 50 | /* | 
| Ingo Molnar | 2bd8e6d | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 51 | * After fork, child runs first. (default) If set to 0 then | 
|  | 52 | * parent will (try to) run first. | 
|  | 53 | */ | 
|  | 54 | const_debug unsigned int sysctl_sched_child_runs_first = 1; | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 55 |  | 
|  | 56 | /* | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 57 | * sys_sched_yield() compat mode | 
|  | 58 | * | 
|  | 59 | * This option switches the agressive yield implementation of the | 
|  | 60 | * old scheduler back on. | 
|  | 61 | */ | 
|  | 62 | unsigned int __read_mostly sysctl_sched_compat_yield; | 
|  | 63 |  | 
|  | 64 | /* | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 65 | * SCHED_OTHER wake-up granularity. | 
| Peter Zijlstra | 103638d9 | 2008-06-27 13:41:16 +0200 | [diff] [blame] | 66 | * (default: 5 msec * (1 + ilog(ncpus)), units: nanoseconds) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 67 | * | 
|  | 68 | * This option delays the preemption effects of decoupled workloads | 
|  | 69 | * and reduces their over-scheduling. Synchronous workloads will still | 
|  | 70 | * have immediate wakeup/sleep latencies. | 
|  | 71 | */ | 
| Peter Zijlstra | 103638d9 | 2008-06-27 13:41:16 +0200 | [diff] [blame] | 72 | unsigned int sysctl_sched_wakeup_granularity = 5000000UL; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 73 |  | 
| Ingo Molnar | da84d96 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 74 | const_debug unsigned int sysctl_sched_migration_cost = 500000UL; | 
|  | 75 |  | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 76 | static const struct sched_class fair_sched_class; | 
|  | 77 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 78 | /************************************************************** | 
|  | 79 | * CFS operations on generic schedulable entities: | 
|  | 80 | */ | 
|  | 81 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 82 | static inline struct task_struct *task_of(struct sched_entity *se) | 
|  | 83 | { | 
|  | 84 | return container_of(se, struct task_struct, se); | 
|  | 85 | } | 
|  | 86 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 87 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
|  | 88 |  | 
|  | 89 | /* cpu runqueue to which this cfs_rq is attached */ | 
|  | 90 | static inline struct rq *rq_of(struct cfs_rq *cfs_rq) | 
|  | 91 | { | 
|  | 92 | return cfs_rq->rq; | 
|  | 93 | } | 
|  | 94 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 95 | /* An entity is a task if it doesn't "own" a runqueue */ | 
|  | 96 | #define entity_is_task(se)	(!se->my_q) | 
|  | 97 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 98 | /* Walk up scheduling entities hierarchy */ | 
|  | 99 | #define for_each_sched_entity(se) \ | 
|  | 100 | for (; se; se = se->parent) | 
|  | 101 |  | 
|  | 102 | static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) | 
|  | 103 | { | 
|  | 104 | return p->se.cfs_rq; | 
|  | 105 | } | 
|  | 106 |  | 
|  | 107 | /* runqueue on which this entity is (to be) queued */ | 
|  | 108 | static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) | 
|  | 109 | { | 
|  | 110 | return se->cfs_rq; | 
|  | 111 | } | 
|  | 112 |  | 
|  | 113 | /* runqueue "owned" by this group */ | 
|  | 114 | static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) | 
|  | 115 | { | 
|  | 116 | return grp->my_q; | 
|  | 117 | } | 
|  | 118 |  | 
|  | 119 | /* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on | 
|  | 120 | * another cpu ('this_cpu') | 
|  | 121 | */ | 
|  | 122 | static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) | 
|  | 123 | { | 
|  | 124 | return cfs_rq->tg->cfs_rq[this_cpu]; | 
|  | 125 | } | 
|  | 126 |  | 
|  | 127 | /* Iterate thr' all leaf cfs_rq's on a runqueue */ | 
|  | 128 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ | 
|  | 129 | list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) | 
|  | 130 |  | 
|  | 131 | /* Do the two (enqueued) entities belong to the same group ? */ | 
|  | 132 | static inline int | 
|  | 133 | is_same_group(struct sched_entity *se, struct sched_entity *pse) | 
|  | 134 | { | 
|  | 135 | if (se->cfs_rq == pse->cfs_rq) | 
|  | 136 | return 1; | 
|  | 137 |  | 
|  | 138 | return 0; | 
|  | 139 | } | 
|  | 140 |  | 
|  | 141 | static inline struct sched_entity *parent_entity(struct sched_entity *se) | 
|  | 142 | { | 
|  | 143 | return se->parent; | 
|  | 144 | } | 
|  | 145 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 146 | #else	/* CONFIG_FAIR_GROUP_SCHED */ | 
|  | 147 |  | 
|  | 148 | static inline struct rq *rq_of(struct cfs_rq *cfs_rq) | 
|  | 149 | { | 
|  | 150 | return container_of(cfs_rq, struct rq, cfs); | 
|  | 151 | } | 
|  | 152 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 153 | #define entity_is_task(se)	1 | 
|  | 154 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 155 | #define for_each_sched_entity(se) \ | 
|  | 156 | for (; se; se = NULL) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 157 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 158 | static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 159 | { | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 160 | return &task_rq(p)->cfs; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 161 | } | 
|  | 162 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 163 | static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) | 
|  | 164 | { | 
|  | 165 | struct task_struct *p = task_of(se); | 
|  | 166 | struct rq *rq = task_rq(p); | 
|  | 167 |  | 
|  | 168 | return &rq->cfs; | 
|  | 169 | } | 
|  | 170 |  | 
|  | 171 | /* runqueue "owned" by this group */ | 
|  | 172 | static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) | 
|  | 173 | { | 
|  | 174 | return NULL; | 
|  | 175 | } | 
|  | 176 |  | 
|  | 177 | static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) | 
|  | 178 | { | 
|  | 179 | return &cpu_rq(this_cpu)->cfs; | 
|  | 180 | } | 
|  | 181 |  | 
|  | 182 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ | 
|  | 183 | for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) | 
|  | 184 |  | 
|  | 185 | static inline int | 
|  | 186 | is_same_group(struct sched_entity *se, struct sched_entity *pse) | 
|  | 187 | { | 
|  | 188 | return 1; | 
|  | 189 | } | 
|  | 190 |  | 
|  | 191 | static inline struct sched_entity *parent_entity(struct sched_entity *se) | 
|  | 192 | { | 
|  | 193 | return NULL; | 
|  | 194 | } | 
|  | 195 |  | 
|  | 196 | #endif	/* CONFIG_FAIR_GROUP_SCHED */ | 
|  | 197 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 198 |  | 
|  | 199 | /************************************************************** | 
|  | 200 | * Scheduling class tree data structure manipulation methods: | 
|  | 201 | */ | 
|  | 202 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 203 | static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime) | 
| Peter Zijlstra | 02e0431 | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 204 | { | 
| Peter Zijlstra | 368059a | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 205 | s64 delta = (s64)(vruntime - min_vruntime); | 
|  | 206 | if (delta > 0) | 
| Peter Zijlstra | 02e0431 | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 207 | min_vruntime = vruntime; | 
|  | 208 |  | 
|  | 209 | return min_vruntime; | 
|  | 210 | } | 
|  | 211 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 212 | static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) | 
| Peter Zijlstra | b0ffd24 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 213 | { | 
|  | 214 | s64 delta = (s64)(vruntime - min_vruntime); | 
|  | 215 | if (delta < 0) | 
|  | 216 | min_vruntime = vruntime; | 
|  | 217 |  | 
|  | 218 | return min_vruntime; | 
|  | 219 | } | 
|  | 220 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 221 | static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Peter Zijlstra | 9014623 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 222 | { | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 223 | return se->vruntime - cfs_rq->min_vruntime; | 
| Peter Zijlstra | 9014623 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 224 | } | 
|  | 225 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 226 | /* | 
|  | 227 | * Enqueue an entity into the rb-tree: | 
|  | 228 | */ | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 229 | static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 230 | { | 
|  | 231 | struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; | 
|  | 232 | struct rb_node *parent = NULL; | 
|  | 233 | struct sched_entity *entry; | 
| Peter Zijlstra | 9014623 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 234 | s64 key = entity_key(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 235 | int leftmost = 1; | 
|  | 236 |  | 
|  | 237 | /* | 
|  | 238 | * Find the right place in the rbtree: | 
|  | 239 | */ | 
|  | 240 | while (*link) { | 
|  | 241 | parent = *link; | 
|  | 242 | entry = rb_entry(parent, struct sched_entity, run_node); | 
|  | 243 | /* | 
|  | 244 | * We dont care about collisions. Nodes with | 
|  | 245 | * the same key stay together. | 
|  | 246 | */ | 
| Peter Zijlstra | 9014623 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 247 | if (key < entity_key(cfs_rq, entry)) { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 248 | link = &parent->rb_left; | 
|  | 249 | } else { | 
|  | 250 | link = &parent->rb_right; | 
|  | 251 | leftmost = 0; | 
|  | 252 | } | 
|  | 253 | } | 
|  | 254 |  | 
|  | 255 | /* | 
|  | 256 | * Maintain a cache of leftmost tree entries (it is frequently | 
|  | 257 | * used): | 
|  | 258 | */ | 
| Peter Zijlstra | 3fe6974 | 2008-03-14 20:55:51 +0100 | [diff] [blame] | 259 | if (leftmost) { | 
| Ingo Molnar | 57cb499 | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 260 | cfs_rq->rb_leftmost = &se->run_node; | 
| Peter Zijlstra | 3fe6974 | 2008-03-14 20:55:51 +0100 | [diff] [blame] | 261 | /* | 
|  | 262 | * maintain cfs_rq->min_vruntime to be a monotonic increasing | 
|  | 263 | * value tracking the leftmost vruntime in the tree. | 
|  | 264 | */ | 
|  | 265 | cfs_rq->min_vruntime = | 
|  | 266 | max_vruntime(cfs_rq->min_vruntime, se->vruntime); | 
|  | 267 | } | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 268 |  | 
|  | 269 | rb_link_node(&se->run_node, parent, link); | 
|  | 270 | rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 271 | } | 
|  | 272 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 273 | static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 274 | { | 
| Peter Zijlstra | 3fe6974 | 2008-03-14 20:55:51 +0100 | [diff] [blame] | 275 | if (cfs_rq->rb_leftmost == &se->run_node) { | 
|  | 276 | struct rb_node *next_node; | 
|  | 277 | struct sched_entity *next; | 
|  | 278 |  | 
|  | 279 | next_node = rb_next(&se->run_node); | 
|  | 280 | cfs_rq->rb_leftmost = next_node; | 
|  | 281 |  | 
|  | 282 | if (next_node) { | 
|  | 283 | next = rb_entry(next_node, | 
|  | 284 | struct sched_entity, run_node); | 
|  | 285 | cfs_rq->min_vruntime = | 
|  | 286 | max_vruntime(cfs_rq->min_vruntime, | 
|  | 287 | next->vruntime); | 
|  | 288 | } | 
|  | 289 | } | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 290 |  | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 291 | if (cfs_rq->next == se) | 
|  | 292 | cfs_rq->next = NULL; | 
|  | 293 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 294 | rb_erase(&se->run_node, &cfs_rq->tasks_timeline); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 295 | } | 
|  | 296 |  | 
|  | 297 | static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq) | 
|  | 298 | { | 
|  | 299 | return cfs_rq->rb_leftmost; | 
|  | 300 | } | 
|  | 301 |  | 
|  | 302 | static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq) | 
|  | 303 | { | 
|  | 304 | return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node); | 
|  | 305 | } | 
|  | 306 |  | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 307 | static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) | 
|  | 308 | { | 
| Ingo Molnar | 7eee3e6 | 2008-02-22 10:32:21 +0100 | [diff] [blame] | 309 | struct rb_node *last = rb_last(&cfs_rq->tasks_timeline); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 310 |  | 
| Balbir Singh | 70eee74 | 2008-02-22 13:25:53 +0530 | [diff] [blame] | 311 | if (!last) | 
|  | 312 | return NULL; | 
| Ingo Molnar | 7eee3e6 | 2008-02-22 10:32:21 +0100 | [diff] [blame] | 313 |  | 
|  | 314 | return rb_entry(last, struct sched_entity, run_node); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 315 | } | 
|  | 316 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 317 | /************************************************************** | 
|  | 318 | * Scheduling class statistics methods: | 
|  | 319 | */ | 
|  | 320 |  | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 321 | #ifdef CONFIG_SCHED_DEBUG | 
|  | 322 | int sched_nr_latency_handler(struct ctl_table *table, int write, | 
|  | 323 | struct file *filp, void __user *buffer, size_t *lenp, | 
|  | 324 | loff_t *ppos) | 
|  | 325 | { | 
|  | 326 | int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); | 
|  | 327 |  | 
|  | 328 | if (ret || !write) | 
|  | 329 | return ret; | 
|  | 330 |  | 
|  | 331 | sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, | 
|  | 332 | sysctl_sched_min_granularity); | 
|  | 333 |  | 
|  | 334 | return 0; | 
|  | 335 | } | 
|  | 336 | #endif | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 337 |  | 
|  | 338 | /* | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 339 | * delta *= P[w / rw] | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 340 | */ | 
|  | 341 | static inline unsigned long | 
|  | 342 | calc_delta_weight(unsigned long delta, struct sched_entity *se) | 
|  | 343 | { | 
|  | 344 | for_each_sched_entity(se) { | 
|  | 345 | delta = calc_delta_mine(delta, | 
|  | 346 | se->load.weight, &cfs_rq_of(se)->load); | 
|  | 347 | } | 
|  | 348 |  | 
|  | 349 | return delta; | 
|  | 350 | } | 
|  | 351 |  | 
|  | 352 | /* | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 353 | * delta /= w | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 354 | */ | 
|  | 355 | static inline unsigned long | 
|  | 356 | calc_delta_fair(unsigned long delta, struct sched_entity *se) | 
|  | 357 | { | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 358 | if (unlikely(se->load.weight != NICE_0_LOAD)) | 
|  | 359 | delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load); | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 360 |  | 
|  | 361 | return delta; | 
|  | 362 | } | 
|  | 363 |  | 
|  | 364 | /* | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 365 | * The idea is to set a period in which each task runs once. | 
|  | 366 | * | 
|  | 367 | * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch | 
|  | 368 | * this period because otherwise the slices get too small. | 
|  | 369 | * | 
|  | 370 | * p = (nr <= nl) ? l : l*nr/nl | 
|  | 371 | */ | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 372 | static u64 __sched_period(unsigned long nr_running) | 
|  | 373 | { | 
|  | 374 | u64 period = sysctl_sched_latency; | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 375 | unsigned long nr_latency = sched_nr_latency; | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 376 |  | 
|  | 377 | if (unlikely(nr_running > nr_latency)) { | 
| Peter Zijlstra | 4bf0b77 | 2008-01-25 21:08:21 +0100 | [diff] [blame] | 378 | period = sysctl_sched_min_granularity; | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 379 | period *= nr_running; | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 380 | } | 
|  | 381 |  | 
|  | 382 | return period; | 
|  | 383 | } | 
|  | 384 |  | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 385 | /* | 
|  | 386 | * We calculate the wall-time slice from the period by taking a part | 
|  | 387 | * proportional to the weight. | 
|  | 388 | * | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 389 | * s = p*P[w/rw] | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 390 | */ | 
| Peter Zijlstra | 6d0f0eb | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 391 | static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 392 | { | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 393 | unsigned long nr_running = cfs_rq->nr_running; | 
|  | 394 |  | 
|  | 395 | if (unlikely(!se->on_rq)) | 
|  | 396 | nr_running++; | 
|  | 397 |  | 
|  | 398 | return calc_delta_weight(__sched_period(nr_running), se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 399 | } | 
|  | 400 |  | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 401 | /* | 
| Peter Zijlstra | ac884de | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 402 | * We calculate the vruntime slice of a to be inserted task | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 403 | * | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 404 | * vs = s/w | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 405 | */ | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 406 | static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 407 | { | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 408 | return calc_delta_fair(sched_slice(cfs_rq, se), se); | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 409 | } | 
|  | 410 |  | 
|  | 411 | /* | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 412 | * Update the current task's runtime statistics. Skip current tasks that | 
|  | 413 | * are not in our scheduling class. | 
|  | 414 | */ | 
|  | 415 | static inline void | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 416 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | 
|  | 417 | unsigned long delta_exec) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 418 | { | 
| Ingo Molnar | bbdba7c | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 419 | unsigned long delta_exec_weighted; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 420 |  | 
| Ingo Molnar | 8179ca2 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 421 | schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 422 |  | 
|  | 423 | curr->sum_exec_runtime += delta_exec; | 
| Ingo Molnar | 7a62eab | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 424 | schedstat_add(cfs_rq, exec_clock, delta_exec); | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 425 | delta_exec_weighted = calc_delta_fair(delta_exec, curr); | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 426 | curr->vruntime += delta_exec_weighted; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 427 | } | 
|  | 428 |  | 
| Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 429 | static void update_curr(struct cfs_rq *cfs_rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 430 | { | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 431 | struct sched_entity *curr = cfs_rq->curr; | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 432 | u64 now = rq_of(cfs_rq)->clock; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 433 | unsigned long delta_exec; | 
|  | 434 |  | 
|  | 435 | if (unlikely(!curr)) | 
|  | 436 | return; | 
|  | 437 |  | 
|  | 438 | /* | 
|  | 439 | * Get the amount of time the current task was running | 
|  | 440 | * since the last time we changed load (this cannot | 
|  | 441 | * overflow on 32 bits): | 
|  | 442 | */ | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 443 | delta_exec = (unsigned long)(now - curr->exec_start); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 444 |  | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 445 | __update_curr(cfs_rq, curr, delta_exec); | 
|  | 446 | curr->exec_start = now; | 
| Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 447 |  | 
|  | 448 | if (entity_is_task(curr)) { | 
|  | 449 | struct task_struct *curtask = task_of(curr); | 
|  | 450 |  | 
|  | 451 | cpuacct_charge(curtask, delta_exec); | 
| Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 452 | account_group_exec_runtime(curtask, delta_exec); | 
| Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 453 | } | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 454 | } | 
|  | 455 |  | 
|  | 456 | static inline void | 
| Ingo Molnar | 5870db5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 457 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 458 | { | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 459 | schedstat_set(se->wait_start, rq_of(cfs_rq)->clock); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 460 | } | 
|  | 461 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 462 | /* | 
|  | 463 | * Task is being enqueued - update stats: | 
|  | 464 | */ | 
| Ingo Molnar | d2417e5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 465 | static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 466 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 467 | /* | 
|  | 468 | * Are we enqueueing a waiting task? (for current tasks | 
|  | 469 | * a dequeue/enqueue event is a NOP) | 
|  | 470 | */ | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 471 | if (se != cfs_rq->curr) | 
| Ingo Molnar | 5870db5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 472 | update_stats_wait_start(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 473 | } | 
|  | 474 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 475 | static void | 
| Ingo Molnar | 9ef0a96 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 476 | update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 477 | { | 
| Ingo Molnar | bbdba7c | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 478 | schedstat_set(se->wait_max, max(se->wait_max, | 
|  | 479 | rq_of(cfs_rq)->clock - se->wait_start)); | 
| Arjan van de Ven | 6d08259 | 2008-01-25 21:08:35 +0100 | [diff] [blame] | 480 | schedstat_set(se->wait_count, se->wait_count + 1); | 
|  | 481 | schedstat_set(se->wait_sum, se->wait_sum + | 
|  | 482 | rq_of(cfs_rq)->clock - se->wait_start); | 
| Ingo Molnar | 6cfb0d5 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 483 | schedstat_set(se->wait_start, 0); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 484 | } | 
|  | 485 |  | 
|  | 486 | static inline void | 
| Ingo Molnar | 19b6a2e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 487 | update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 488 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 489 | /* | 
|  | 490 | * Mark the end of the wait period if dequeueing a | 
|  | 491 | * waiting task: | 
|  | 492 | */ | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 493 | if (se != cfs_rq->curr) | 
| Ingo Molnar | 9ef0a96 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 494 | update_stats_wait_end(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 495 | } | 
|  | 496 |  | 
|  | 497 | /* | 
|  | 498 | * We are picking a new current task - update its stats: | 
|  | 499 | */ | 
|  | 500 | static inline void | 
| Ingo Molnar | 79303e9 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 501 | update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 502 | { | 
|  | 503 | /* | 
|  | 504 | * We are starting a new run period: | 
|  | 505 | */ | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 506 | se->exec_start = rq_of(cfs_rq)->clock; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 507 | } | 
|  | 508 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 509 | /************************************************** | 
|  | 510 | * Scheduling class queueing methods: | 
|  | 511 | */ | 
|  | 512 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 513 | #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED | 
|  | 514 | static void | 
|  | 515 | add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight) | 
|  | 516 | { | 
|  | 517 | cfs_rq->task_weight += weight; | 
|  | 518 | } | 
|  | 519 | #else | 
|  | 520 | static inline void | 
|  | 521 | add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight) | 
|  | 522 | { | 
|  | 523 | } | 
|  | 524 | #endif | 
|  | 525 |  | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 526 | static void | 
|  | 527 | account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
|  | 528 | { | 
|  | 529 | update_load_add(&cfs_rq->load, se->load.weight); | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 530 | if (!parent_entity(se)) | 
|  | 531 | inc_cpu_load(rq_of(cfs_rq), se->load.weight); | 
| Bharata B Rao | b87f172 | 2008-09-25 09:53:54 +0530 | [diff] [blame] | 532 | if (entity_is_task(se)) { | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 533 | add_cfs_task_weight(cfs_rq, se->load.weight); | 
| Bharata B Rao | b87f172 | 2008-09-25 09:53:54 +0530 | [diff] [blame] | 534 | list_add(&se->group_node, &cfs_rq->tasks); | 
|  | 535 | } | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 536 | cfs_rq->nr_running++; | 
|  | 537 | se->on_rq = 1; | 
|  | 538 | } | 
|  | 539 |  | 
|  | 540 | static void | 
|  | 541 | account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
|  | 542 | { | 
|  | 543 | update_load_sub(&cfs_rq->load, se->load.weight); | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 544 | if (!parent_entity(se)) | 
|  | 545 | dec_cpu_load(rq_of(cfs_rq), se->load.weight); | 
| Bharata B Rao | b87f172 | 2008-09-25 09:53:54 +0530 | [diff] [blame] | 546 | if (entity_is_task(se)) { | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 547 | add_cfs_task_weight(cfs_rq, -se->load.weight); | 
| Bharata B Rao | b87f172 | 2008-09-25 09:53:54 +0530 | [diff] [blame] | 548 | list_del_init(&se->group_node); | 
|  | 549 | } | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 550 | cfs_rq->nr_running--; | 
|  | 551 | se->on_rq = 0; | 
|  | 552 | } | 
|  | 553 |  | 
| Ingo Molnar | 2396af6 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 554 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 555 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 556 | #ifdef CONFIG_SCHEDSTATS | 
|  | 557 | if (se->sleep_start) { | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 558 | u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 559 | struct task_struct *tsk = task_of(se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 560 |  | 
|  | 561 | if ((s64)delta < 0) | 
|  | 562 | delta = 0; | 
|  | 563 |  | 
|  | 564 | if (unlikely(delta > se->sleep_max)) | 
|  | 565 | se->sleep_max = delta; | 
|  | 566 |  | 
|  | 567 | se->sleep_start = 0; | 
|  | 568 | se->sum_sleep_runtime += delta; | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 569 |  | 
|  | 570 | account_scheduler_latency(tsk, delta >> 10, 1); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 571 | } | 
|  | 572 | if (se->block_start) { | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 573 | u64 delta = rq_of(cfs_rq)->clock - se->block_start; | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 574 | struct task_struct *tsk = task_of(se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 575 |  | 
|  | 576 | if ((s64)delta < 0) | 
|  | 577 | delta = 0; | 
|  | 578 |  | 
|  | 579 | if (unlikely(delta > se->block_max)) | 
|  | 580 | se->block_max = delta; | 
|  | 581 |  | 
|  | 582 | se->block_start = 0; | 
|  | 583 | se->sum_sleep_runtime += delta; | 
| Ingo Molnar | 30084fb | 2007-10-02 14:13:08 +0200 | [diff] [blame] | 584 |  | 
|  | 585 | /* | 
|  | 586 | * Blocking time is in units of nanosecs, so shift by 20 to | 
|  | 587 | * get a milliseconds-range estimation of the amount of | 
|  | 588 | * time that the task spent sleeping: | 
|  | 589 | */ | 
|  | 590 | if (unlikely(prof_on == SLEEP_PROFILING)) { | 
| Ingo Molnar | e22f5bb | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 591 |  | 
| Ingo Molnar | 30084fb | 2007-10-02 14:13:08 +0200 | [diff] [blame] | 592 | profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), | 
|  | 593 | delta >> 20); | 
|  | 594 | } | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 595 | account_scheduler_latency(tsk, delta >> 10, 0); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 596 | } | 
|  | 597 | #endif | 
|  | 598 | } | 
|  | 599 |  | 
| Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 600 | static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
|  | 601 | { | 
|  | 602 | #ifdef CONFIG_SCHED_DEBUG | 
|  | 603 | s64 d = se->vruntime - cfs_rq->min_vruntime; | 
|  | 604 |  | 
|  | 605 | if (d < 0) | 
|  | 606 | d = -d; | 
|  | 607 |  | 
|  | 608 | if (d > 3*sysctl_sched_latency) | 
|  | 609 | schedstat_inc(cfs_rq, nr_spread_over); | 
|  | 610 | #endif | 
|  | 611 | } | 
|  | 612 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 613 | static void | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 614 | place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | 
|  | 615 | { | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 616 | u64 vruntime; | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 617 |  | 
| Peter Zijlstra | 3fe6974 | 2008-03-14 20:55:51 +0100 | [diff] [blame] | 618 | if (first_fair(cfs_rq)) { | 
|  | 619 | vruntime = min_vruntime(cfs_rq->min_vruntime, | 
|  | 620 | __pick_next_entity(cfs_rq)->vruntime); | 
|  | 621 | } else | 
|  | 622 | vruntime = cfs_rq->min_vruntime; | 
| Peter Zijlstra | 94dfb5e | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 623 |  | 
| Peter Zijlstra | 2cb8600 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 624 | /* | 
|  | 625 | * The 'current' period is already promised to the current tasks, | 
|  | 626 | * however the extra weight of the new task will slow them down a | 
|  | 627 | * little, place the new task so that it fits in the slot that | 
|  | 628 | * stays open at the end. | 
|  | 629 | */ | 
| Peter Zijlstra | 94dfb5e | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 630 | if (initial && sched_feat(START_DEBIT)) | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 631 | vruntime += sched_vslice(cfs_rq, se); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 632 |  | 
| Ingo Molnar | 8465e79 | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 633 | if (!initial) { | 
| Peter Zijlstra | 2cb8600 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 634 | /* sleeps upto a single latency don't count. */ | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 635 | if (sched_feat(NEW_FAIR_SLEEPERS)) { | 
|  | 636 | unsigned long thresh = sysctl_sched_latency; | 
|  | 637 |  | 
|  | 638 | /* | 
|  | 639 | * convert the sleeper threshold into virtual time | 
|  | 640 | */ | 
|  | 641 | if (sched_feat(NORMALIZED_SLEEPER)) | 
|  | 642 | thresh = calc_delta_fair(thresh, se); | 
|  | 643 |  | 
|  | 644 | vruntime -= thresh; | 
|  | 645 | } | 
| Ingo Molnar | 94359f0 | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 646 |  | 
| Peter Zijlstra | 2cb8600 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 647 | /* ensure we never gain time by being placed backwards. */ | 
|  | 648 | vruntime = max_vruntime(se->vruntime, vruntime); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 649 | } | 
|  | 650 |  | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 651 | se->vruntime = vruntime; | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 652 | } | 
|  | 653 |  | 
|  | 654 | static void | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 655 | enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 656 | { | 
|  | 657 | /* | 
| Dmitry Adamushko | a2a2d68 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 658 | * Update run-time statistics of the 'current'. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 659 | */ | 
| Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 660 | update_curr(cfs_rq); | 
| Peter Zijlstra | a992241 | 2008-05-05 23:56:17 +0200 | [diff] [blame] | 661 | account_entity_enqueue(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 662 |  | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 663 | if (wakeup) { | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 664 | place_entity(cfs_rq, se, 0); | 
| Ingo Molnar | 2396af6 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 665 | enqueue_sleeper(cfs_rq, se); | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 666 | } | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 667 |  | 
| Ingo Molnar | d2417e5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 668 | update_stats_enqueue(cfs_rq, se); | 
| Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 669 | check_spread(cfs_rq, se); | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 670 | if (se != cfs_rq->curr) | 
|  | 671 | __enqueue_entity(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 672 | } | 
|  | 673 |  | 
|  | 674 | static void | 
| Ingo Molnar | 525c271 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 675 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 676 | { | 
| Dmitry Adamushko | a2a2d68 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 677 | /* | 
|  | 678 | * Update run-time statistics of the 'current'. | 
|  | 679 | */ | 
|  | 680 | update_curr(cfs_rq); | 
|  | 681 |  | 
| Ingo Molnar | 19b6a2e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 682 | update_stats_dequeue(cfs_rq, se); | 
| Dmitry Adamushko | db36cc7 | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 683 | if (sleep) { | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 684 | #ifdef CONFIG_SCHEDSTATS | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 685 | if (entity_is_task(se)) { | 
|  | 686 | struct task_struct *tsk = task_of(se); | 
|  | 687 |  | 
|  | 688 | if (tsk->state & TASK_INTERRUPTIBLE) | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 689 | se->sleep_start = rq_of(cfs_rq)->clock; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 690 | if (tsk->state & TASK_UNINTERRUPTIBLE) | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 691 | se->block_start = rq_of(cfs_rq)->clock; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 692 | } | 
| Dmitry Adamushko | db36cc7 | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 693 | #endif | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 694 | } | 
|  | 695 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 696 | if (se != cfs_rq->curr) | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 697 | __dequeue_entity(cfs_rq, se); | 
|  | 698 | account_entity_dequeue(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 699 | } | 
|  | 700 |  | 
|  | 701 | /* | 
|  | 702 | * Preempt the current task with a newly woken task if needed: | 
|  | 703 | */ | 
| Peter Zijlstra | 7c92e54 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 704 | static void | 
| Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 705 | check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 706 | { | 
| Peter Zijlstra | 1169783 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 707 | unsigned long ideal_runtime, delta_exec; | 
|  | 708 |  | 
| Peter Zijlstra | 6d0f0eb | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 709 | ideal_runtime = sched_slice(cfs_rq, curr); | 
| Peter Zijlstra | 1169783 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 710 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | 
| Ingo Molnar | 3e3e13f | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 711 | if (delta_exec > ideal_runtime) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 712 | resched_task(rq_of(cfs_rq)->curr); | 
|  | 713 | } | 
|  | 714 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 715 | static void | 
| Ingo Molnar | 8494f41 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 716 | set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 717 | { | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 718 | /* 'current' is not kept within the tree. */ | 
|  | 719 | if (se->on_rq) { | 
|  | 720 | /* | 
|  | 721 | * Any task has to be enqueued before it get to execute on | 
|  | 722 | * a CPU. So account for the time it spent waiting on the | 
|  | 723 | * runqueue. | 
|  | 724 | */ | 
|  | 725 | update_stats_wait_end(cfs_rq, se); | 
|  | 726 | __dequeue_entity(cfs_rq, se); | 
|  | 727 | } | 
|  | 728 |  | 
| Ingo Molnar | 79303e9 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 729 | update_stats_curr_start(cfs_rq, se); | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 730 | cfs_rq->curr = se; | 
| Ingo Molnar | eba1ed4 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 731 | #ifdef CONFIG_SCHEDSTATS | 
|  | 732 | /* | 
|  | 733 | * Track our maximum slice length, if the CPU's load is at | 
|  | 734 | * least twice that of our own weight (i.e. dont track it | 
|  | 735 | * when there are only lesser-weight tasks around): | 
|  | 736 | */ | 
| Dmitry Adamushko | 495eca4 | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 737 | if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { | 
| Ingo Molnar | eba1ed4 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 738 | se->slice_max = max(se->slice_max, | 
|  | 739 | se->sum_exec_runtime - se->prev_sum_exec_runtime); | 
|  | 740 | } | 
|  | 741 | #endif | 
| Peter Zijlstra | 4a55b45 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 742 | se->prev_sum_exec_runtime = se->sum_exec_runtime; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 743 | } | 
|  | 744 |  | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 745 | static struct sched_entity * | 
|  | 746 | pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
|  | 747 | { | 
| Peter Zijlstra | 103638d9 | 2008-06-27 13:41:16 +0200 | [diff] [blame] | 748 | struct rq *rq = rq_of(cfs_rq); | 
|  | 749 | u64 pair_slice = rq->clock - cfs_rq->pair_start; | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 750 |  | 
| Mike Galbraith | b0aa51b | 2008-10-17 15:33:21 +0200 | [diff] [blame] | 751 | if (!cfs_rq->next || pair_slice > sysctl_sched_min_granularity) { | 
| Peter Zijlstra | 103638d9 | 2008-06-27 13:41:16 +0200 | [diff] [blame] | 752 | cfs_rq->pair_start = rq->clock; | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 753 | return se; | 
| Peter Zijlstra | 103638d9 | 2008-06-27 13:41:16 +0200 | [diff] [blame] | 754 | } | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 755 |  | 
|  | 756 | return cfs_rq->next; | 
|  | 757 | } | 
|  | 758 |  | 
| Ingo Molnar | 9948f4b | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 759 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 760 | { | 
| Dmitry Adamushko | 08ec3df | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 761 | struct sched_entity *se = NULL; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 762 |  | 
| Dmitry Adamushko | 08ec3df | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 763 | if (first_fair(cfs_rq)) { | 
|  | 764 | se = __pick_next_entity(cfs_rq); | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 765 | se = pick_next(cfs_rq, se); | 
| Dmitry Adamushko | 08ec3df | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 766 | set_next_entity(cfs_rq, se); | 
|  | 767 | } | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 768 |  | 
|  | 769 | return se; | 
|  | 770 | } | 
|  | 771 |  | 
| Ingo Molnar | ab6cde2 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 772 | static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 773 | { | 
|  | 774 | /* | 
|  | 775 | * If still on the runqueue then deactivate_task() | 
|  | 776 | * was not called and update_curr() has to be done: | 
|  | 777 | */ | 
|  | 778 | if (prev->on_rq) | 
| Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 779 | update_curr(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 780 |  | 
| Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 781 | check_spread(cfs_rq, prev); | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 782 | if (prev->on_rq) { | 
| Ingo Molnar | 5870db5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 783 | update_stats_wait_start(cfs_rq, prev); | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 784 | /* Put 'current' back into the tree. */ | 
|  | 785 | __enqueue_entity(cfs_rq, prev); | 
|  | 786 | } | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 787 | cfs_rq->curr = NULL; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 788 | } | 
|  | 789 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 790 | static void | 
|  | 791 | entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 792 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 793 | /* | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 794 | * Update run-time statistics of the 'current'. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 795 | */ | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 796 | update_curr(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 797 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 798 | #ifdef CONFIG_SCHED_HRTICK | 
|  | 799 | /* | 
|  | 800 | * queued ticks are scheduled to match the slice, so don't bother | 
|  | 801 | * validating it and just reschedule. | 
|  | 802 | */ | 
| Harvey Harrison | 983ed7a | 2008-04-24 18:17:55 -0700 | [diff] [blame] | 803 | if (queued) { | 
|  | 804 | resched_task(rq_of(cfs_rq)->curr); | 
|  | 805 | return; | 
|  | 806 | } | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 807 | /* | 
|  | 808 | * don't let the period tick interfere with the hrtick preemption | 
|  | 809 | */ | 
|  | 810 | if (!sched_feat(DOUBLE_TICK) && | 
|  | 811 | hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) | 
|  | 812 | return; | 
|  | 813 | #endif | 
|  | 814 |  | 
| Peter Zijlstra | ce6c131 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 815 | if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT)) | 
| Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 816 | check_preempt_tick(cfs_rq, curr); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 817 | } | 
|  | 818 |  | 
|  | 819 | /************************************************** | 
|  | 820 | * CFS operations on tasks: | 
|  | 821 | */ | 
|  | 822 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 823 | #ifdef CONFIG_SCHED_HRTICK | 
|  | 824 | static void hrtick_start_fair(struct rq *rq, struct task_struct *p) | 
|  | 825 | { | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 826 | struct sched_entity *se = &p->se; | 
|  | 827 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 
|  | 828 |  | 
|  | 829 | WARN_ON(task_rq(p) != rq); | 
|  | 830 |  | 
|  | 831 | if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) { | 
|  | 832 | u64 slice = sched_slice(cfs_rq, se); | 
|  | 833 | u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; | 
|  | 834 | s64 delta = slice - ran; | 
|  | 835 |  | 
|  | 836 | if (delta < 0) { | 
|  | 837 | if (rq->curr == p) | 
|  | 838 | resched_task(p); | 
|  | 839 | return; | 
|  | 840 | } | 
|  | 841 |  | 
|  | 842 | /* | 
|  | 843 | * Don't schedule slices shorter than 10000ns, that just | 
|  | 844 | * doesn't make sense. Rely on vruntime for fairness. | 
|  | 845 | */ | 
| Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 846 | if (rq->curr != p) | 
| Peter Zijlstra | 157124c | 2008-07-28 11:53:11 +0200 | [diff] [blame] | 847 | delta = max_t(s64, 10000LL, delta); | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 848 |  | 
| Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 849 | hrtick_start(rq, delta); | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 850 | } | 
|  | 851 | } | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 852 |  | 
|  | 853 | /* | 
|  | 854 | * called from enqueue/dequeue and updates the hrtick when the | 
|  | 855 | * current task is from our class and nr_running is low enough | 
|  | 856 | * to matter. | 
|  | 857 | */ | 
|  | 858 | static void hrtick_update(struct rq *rq) | 
|  | 859 | { | 
|  | 860 | struct task_struct *curr = rq->curr; | 
|  | 861 |  | 
|  | 862 | if (curr->sched_class != &fair_sched_class) | 
|  | 863 | return; | 
|  | 864 |  | 
|  | 865 | if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) | 
|  | 866 | hrtick_start_fair(rq, curr); | 
|  | 867 | } | 
| Dhaval Giani | 55e12e5 | 2008-06-24 23:39:43 +0530 | [diff] [blame] | 868 | #else /* !CONFIG_SCHED_HRTICK */ | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 869 | static inline void | 
|  | 870 | hrtick_start_fair(struct rq *rq, struct task_struct *p) | 
|  | 871 | { | 
|  | 872 | } | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 873 |  | 
|  | 874 | static inline void hrtick_update(struct rq *rq) | 
|  | 875 | { | 
|  | 876 | } | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 877 | #endif | 
|  | 878 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 879 | /* | 
|  | 880 | * The enqueue_task method is called before nr_running is | 
|  | 881 | * increased. Here we update the fair scheduling stats and | 
|  | 882 | * then put the task into the rbtree: | 
|  | 883 | */ | 
| Ingo Molnar | fd390f6 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 884 | static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 885 | { | 
|  | 886 | struct cfs_rq *cfs_rq; | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 887 | struct sched_entity *se = &p->se; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 888 |  | 
|  | 889 | for_each_sched_entity(se) { | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 890 | if (se->on_rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 891 | break; | 
|  | 892 | cfs_rq = cfs_rq_of(se); | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 893 | enqueue_entity(cfs_rq, se, wakeup); | 
| Srivatsa Vaddagiri | b9fa3df | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 894 | wakeup = 1; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 895 | } | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 896 |  | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 897 | hrtick_update(rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 898 | } | 
|  | 899 |  | 
|  | 900 | /* | 
|  | 901 | * The dequeue_task method is called before nr_running is | 
|  | 902 | * decreased. We remove the task from the rbtree and | 
|  | 903 | * update the fair scheduling stats: | 
|  | 904 | */ | 
| Ingo Molnar | f02231e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 905 | static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 906 | { | 
|  | 907 | struct cfs_rq *cfs_rq; | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 908 | struct sched_entity *se = &p->se; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 909 |  | 
|  | 910 | for_each_sched_entity(se) { | 
|  | 911 | cfs_rq = cfs_rq_of(se); | 
| Ingo Molnar | 525c271 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 912 | dequeue_entity(cfs_rq, se, sleep); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 913 | /* Don't dequeue parent if it has other entities besides us */ | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 914 | if (cfs_rq->load.weight) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 915 | break; | 
| Srivatsa Vaddagiri | b9fa3df | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 916 | sleep = 1; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 917 | } | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 918 |  | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 919 | hrtick_update(rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 920 | } | 
|  | 921 |  | 
|  | 922 | /* | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 923 | * sched_yield() support is very simple - we dequeue and enqueue. | 
|  | 924 | * | 
|  | 925 | * If compat_yield is turned on then we requeue to the end of the tree. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 926 | */ | 
| Dmitry Adamushko | 4530d7a | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 927 | static void yield_task_fair(struct rq *rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 928 | { | 
| Ingo Molnar | db292ca | 2007-12-04 17:04:39 +0100 | [diff] [blame] | 929 | struct task_struct *curr = rq->curr; | 
|  | 930 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 
|  | 931 | struct sched_entity *rightmost, *se = &curr->se; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 932 |  | 
|  | 933 | /* | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 934 | * Are we the only task in the tree? | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 935 | */ | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 936 | if (unlikely(cfs_rq->nr_running == 1)) | 
|  | 937 | return; | 
|  | 938 |  | 
| Ingo Molnar | db292ca | 2007-12-04 17:04:39 +0100 | [diff] [blame] | 939 | if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) { | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 940 | update_rq_clock(rq); | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 941 | /* | 
| Dmitry Adamushko | a2a2d68 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 942 | * Update run-time statistics of the 'current'. | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 943 | */ | 
| Dmitry Adamushko | 2b1e315 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 944 | update_curr(cfs_rq); | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 945 |  | 
|  | 946 | return; | 
|  | 947 | } | 
|  | 948 | /* | 
|  | 949 | * Find the rightmost entry in the rbtree: | 
|  | 950 | */ | 
| Dmitry Adamushko | 2b1e315 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 951 | rightmost = __pick_last_entity(cfs_rq); | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 952 | /* | 
|  | 953 | * Already in the rightmost position? | 
|  | 954 | */ | 
| Peter Zijlstra | 79b3fef | 2008-02-18 13:39:37 +0100 | [diff] [blame] | 955 | if (unlikely(!rightmost || rightmost->vruntime < se->vruntime)) | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 956 | return; | 
|  | 957 |  | 
|  | 958 | /* | 
|  | 959 | * Minimally necessary key value to be last in the tree: | 
| Dmitry Adamushko | 2b1e315 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 960 | * Upon rescheduling, sched_class::put_prev_task() will place | 
|  | 961 | * 'current' within the tree based on its new key value. | 
| Ingo Molnar | 1799e35 | 2007-09-19 23:34:46 +0200 | [diff] [blame] | 962 | */ | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 963 | se->vruntime = rightmost->vruntime + 1; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 964 | } | 
|  | 965 |  | 
|  | 966 | /* | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 967 | * wake_idle() will wake a task on an idle cpu if task->cpu is | 
|  | 968 | * not idle and an idle cpu is available.  The span of cpus to | 
|  | 969 | * search starts with cpus closest then further out as needed, | 
|  | 970 | * so we always favor a closer, idle cpu. | 
| Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 971 | * Domains may include CPUs that are not usable for migration, | 
|  | 972 | * hence we need to mask them out (cpu_active_map) | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 973 | * | 
|  | 974 | * Returns the CPU we should wake onto. | 
|  | 975 | */ | 
|  | 976 | #if defined(ARCH_HAS_SCHED_WAKE_IDLE) | 
|  | 977 | static int wake_idle(int cpu, struct task_struct *p) | 
|  | 978 | { | 
|  | 979 | cpumask_t tmp; | 
|  | 980 | struct sched_domain *sd; | 
|  | 981 | int i; | 
|  | 982 |  | 
|  | 983 | /* | 
|  | 984 | * If it is idle, then it is the best cpu to run this task. | 
|  | 985 | * | 
|  | 986 | * This cpu is also the best, if it has more than one task already. | 
|  | 987 | * Siblings must be also busy(in most cases) as they didn't already | 
|  | 988 | * pickup the extra load from this cpu and hence we need not check | 
|  | 989 | * sibling runqueue info. This will avoid the checks and cache miss | 
|  | 990 | * penalities associated with that. | 
|  | 991 | */ | 
| Gregory Haskins | 104f645 | 2008-04-28 12:40:01 -0400 | [diff] [blame] | 992 | if (idle_cpu(cpu) || cpu_rq(cpu)->cfs.nr_running > 1) | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 993 | return cpu; | 
|  | 994 |  | 
|  | 995 | for_each_domain(cpu, sd) { | 
| Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 996 | if ((sd->flags & SD_WAKE_IDLE) | 
|  | 997 | || ((sd->flags & SD_WAKE_IDLE_FAR) | 
|  | 998 | && !task_hot(p, task_rq(p)->clock, sd))) { | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 999 | cpus_and(tmp, sd->span, p->cpus_allowed); | 
| Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 1000 | cpus_and(tmp, tmp, cpu_active_map); | 
| Mike Travis | 363ab6f | 2008-05-12 21:21:13 +0200 | [diff] [blame] | 1001 | for_each_cpu_mask_nr(i, tmp) { | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1002 | if (idle_cpu(i)) { | 
|  | 1003 | if (i != task_cpu(p)) { | 
|  | 1004 | schedstat_inc(p, | 
|  | 1005 | se.nr_wakeups_idle); | 
|  | 1006 | } | 
|  | 1007 | return i; | 
|  | 1008 | } | 
|  | 1009 | } | 
|  | 1010 | } else { | 
|  | 1011 | break; | 
|  | 1012 | } | 
|  | 1013 | } | 
|  | 1014 | return cpu; | 
|  | 1015 | } | 
| Dhaval Giani | 55e12e5 | 2008-06-24 23:39:43 +0530 | [diff] [blame] | 1016 | #else /* !ARCH_HAS_SCHED_WAKE_IDLE*/ | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1017 | static inline int wake_idle(int cpu, struct task_struct *p) | 
|  | 1018 | { | 
|  | 1019 | return cpu; | 
|  | 1020 | } | 
|  | 1021 | #endif | 
|  | 1022 |  | 
|  | 1023 | #ifdef CONFIG_SMP | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1024 |  | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1025 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| Peter Zijlstra | f5bfb7d | 2008-06-27 13:41:39 +0200 | [diff] [blame] | 1026 | /* | 
|  | 1027 | * effective_load() calculates the load change as seen from the root_task_group | 
|  | 1028 | * | 
|  | 1029 | * Adding load to a group doesn't make a group heavier, but can cause movement | 
|  | 1030 | * of group shares between cpus. Assuming the shares were perfectly aligned one | 
|  | 1031 | * can calculate the shift in shares. | 
|  | 1032 | * | 
|  | 1033 | * The problem is that perfectly aligning the shares is rather expensive, hence | 
|  | 1034 | * we try to avoid doing that too often - see update_shares(), which ratelimits | 
|  | 1035 | * this change. | 
|  | 1036 | * | 
|  | 1037 | * We compensate this by not only taking the current delta into account, but | 
|  | 1038 | * also considering the delta between when the shares were last adjusted and | 
|  | 1039 | * now. | 
|  | 1040 | * | 
|  | 1041 | * We still saw a performance dip, some tracing learned us that between | 
|  | 1042 | * cgroup:/ and cgroup:/foo balancing the number of affine wakeups increased | 
|  | 1043 | * significantly. Therefore try to bias the error in direction of failing | 
|  | 1044 | * the affine wakeup. | 
|  | 1045 | * | 
|  | 1046 | */ | 
| Peter Zijlstra | f1d239f | 2008-06-27 13:41:38 +0200 | [diff] [blame] | 1047 | static long effective_load(struct task_group *tg, int cpu, | 
|  | 1048 | long wl, long wg) | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1049 | { | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1050 | struct sched_entity *se = tg->se[cpu]; | 
| Peter Zijlstra | f1d239f | 2008-06-27 13:41:38 +0200 | [diff] [blame] | 1051 |  | 
|  | 1052 | if (!tg->parent) | 
|  | 1053 | return wl; | 
|  | 1054 |  | 
|  | 1055 | /* | 
| Peter Zijlstra | f5bfb7d | 2008-06-27 13:41:39 +0200 | [diff] [blame] | 1056 | * By not taking the decrease of shares on the other cpu into | 
|  | 1057 | * account our error leans towards reducing the affine wakeups. | 
|  | 1058 | */ | 
|  | 1059 | if (!wl && sched_feat(ASYM_EFF_LOAD)) | 
|  | 1060 | return wl; | 
|  | 1061 |  | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1062 | for_each_sched_entity(se) { | 
| Peter Zijlstra | cb5ef42 | 2008-06-27 13:41:32 +0200 | [diff] [blame] | 1063 | long S, rw, s, a, b; | 
| Peter Zijlstra | 940959e | 2008-09-23 15:33:42 +0200 | [diff] [blame] | 1064 | long more_w; | 
|  | 1065 |  | 
|  | 1066 | /* | 
|  | 1067 | * Instead of using this increment, also add the difference | 
|  | 1068 | * between when the shares were last updated and now. | 
|  | 1069 | */ | 
|  | 1070 | more_w = se->my_q->load.weight - se->my_q->rq_weight; | 
|  | 1071 | wl += more_w; | 
|  | 1072 | wg += more_w; | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1073 |  | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1074 | S = se->my_q->tg->shares; | 
|  | 1075 | s = se->my_q->shares; | 
| Peter Zijlstra | f1d239f | 2008-06-27 13:41:38 +0200 | [diff] [blame] | 1076 | rw = se->my_q->rq_weight; | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1077 |  | 
| Peter Zijlstra | cb5ef42 | 2008-06-27 13:41:32 +0200 | [diff] [blame] | 1078 | a = S*(rw + wl); | 
|  | 1079 | b = S*rw + s*wg; | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1080 |  | 
| Peter Zijlstra | 940959e | 2008-09-23 15:33:42 +0200 | [diff] [blame] | 1081 | wl = s*(a-b); | 
|  | 1082 |  | 
|  | 1083 | if (likely(b)) | 
|  | 1084 | wl /= b; | 
|  | 1085 |  | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1086 | /* | 
|  | 1087 | * Assume the group is already running and will | 
|  | 1088 | * thus already be accounted for in the weight. | 
|  | 1089 | * | 
|  | 1090 | * That is, moving shares between CPUs, does not | 
|  | 1091 | * alter the group weight. | 
|  | 1092 | */ | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1093 | wg = 0; | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1094 | } | 
|  | 1095 |  | 
|  | 1096 | return wl; | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1097 | } | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1098 |  | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1099 | #else | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1100 |  | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1101 | static inline unsigned long effective_load(struct task_group *tg, int cpu, | 
|  | 1102 | unsigned long wl, unsigned long wg) | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1103 | { | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1104 | return wl; | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1105 | } | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1106 |  | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1107 | #endif | 
|  | 1108 |  | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1109 | static int | 
| Amit K. Arora | 64b9e02 | 2008-09-30 17:15:39 +0530 | [diff] [blame] | 1110 | wake_affine(struct sched_domain *this_sd, struct rq *this_rq, | 
| Ingo Molnar | 4ae7d5c | 2008-03-19 01:42:00 +0100 | [diff] [blame] | 1111 | struct task_struct *p, int prev_cpu, int this_cpu, int sync, | 
|  | 1112 | int idx, unsigned long load, unsigned long this_load, | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1113 | unsigned int imbalance) | 
|  | 1114 | { | 
| Ingo Molnar | 4ae7d5c | 2008-03-19 01:42:00 +0100 | [diff] [blame] | 1115 | struct task_struct *curr = this_rq->curr; | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1116 | struct task_group *tg; | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1117 | unsigned long tl = this_load; | 
|  | 1118 | unsigned long tl_per_task; | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1119 | unsigned long weight; | 
| Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 1120 | int balanced; | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1121 |  | 
| Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 1122 | if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1123 | return 0; | 
|  | 1124 |  | 
| Peter Zijlstra | 2fb7635 | 2008-10-08 09:16:04 +0200 | [diff] [blame] | 1125 | if (!sync && sched_feat(SYNC_WAKEUPS) && | 
|  | 1126 | curr->se.avg_overlap < sysctl_sched_migration_cost && | 
|  | 1127 | p->se.avg_overlap < sysctl_sched_migration_cost) | 
|  | 1128 | sync = 1; | 
|  | 1129 |  | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1130 | /* | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1131 | * If sync wakeup then subtract the (maximum possible) | 
|  | 1132 | * effect of the currently running task from the load | 
|  | 1133 | * of the current CPU: | 
|  | 1134 | */ | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1135 | if (sync) { | 
|  | 1136 | tg = task_group(current); | 
|  | 1137 | weight = current->se.load.weight; | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1138 |  | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1139 | tl += effective_load(tg, this_cpu, -weight, -weight); | 
|  | 1140 | load += effective_load(tg, prev_cpu, 0, -weight); | 
|  | 1141 | } | 
|  | 1142 |  | 
|  | 1143 | tg = task_group(p); | 
|  | 1144 | weight = p->se.load.weight; | 
|  | 1145 |  | 
|  | 1146 | balanced = 100*(tl + effective_load(tg, this_cpu, weight, weight)) <= | 
|  | 1147 | imbalance*(load + effective_load(tg, prev_cpu, 0, weight)); | 
| Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 1148 |  | 
|  | 1149 | /* | 
|  | 1150 | * If the currently running task will sleep within | 
|  | 1151 | * a reasonable amount of time then attract this newly | 
|  | 1152 | * woken task: | 
|  | 1153 | */ | 
| Peter Zijlstra | 2fb7635 | 2008-10-08 09:16:04 +0200 | [diff] [blame] | 1154 | if (sync && balanced) | 
|  | 1155 | return 1; | 
| Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 1156 |  | 
|  | 1157 | schedstat_inc(p, se.nr_wakeups_affine_attempts); | 
|  | 1158 | tl_per_task = cpu_avg_load_per_task(this_cpu); | 
|  | 1159 |  | 
| Amit K. Arora | 64b9e02 | 2008-09-30 17:15:39 +0530 | [diff] [blame] | 1160 | if (balanced || (tl <= load && tl + target_load(prev_cpu, idx) <= | 
|  | 1161 | tl_per_task)) { | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1162 | /* | 
|  | 1163 | * This domain has SD_WAKE_AFFINE and | 
|  | 1164 | * p is cache cold in this domain, and | 
|  | 1165 | * there is no bad imbalance. | 
|  | 1166 | */ | 
|  | 1167 | schedstat_inc(this_sd, ttwu_move_affine); | 
|  | 1168 | schedstat_inc(p, se.nr_wakeups_affine); | 
|  | 1169 |  | 
|  | 1170 | return 1; | 
|  | 1171 | } | 
|  | 1172 | return 0; | 
|  | 1173 | } | 
|  | 1174 |  | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1175 | static int select_task_rq_fair(struct task_struct *p, int sync) | 
|  | 1176 | { | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1177 | struct sched_domain *sd, *this_sd = NULL; | 
| Ingo Molnar | ac192d3 | 2008-03-16 20:56:26 +0100 | [diff] [blame] | 1178 | int prev_cpu, this_cpu, new_cpu; | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1179 | unsigned long load, this_load; | 
| Amit K. Arora | 64b9e02 | 2008-09-30 17:15:39 +0530 | [diff] [blame] | 1180 | struct rq *this_rq; | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1181 | unsigned int imbalance; | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1182 | int idx; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1183 |  | 
| Ingo Molnar | ac192d3 | 2008-03-16 20:56:26 +0100 | [diff] [blame] | 1184 | prev_cpu	= task_cpu(p); | 
| Ingo Molnar | ac192d3 | 2008-03-16 20:56:26 +0100 | [diff] [blame] | 1185 | this_cpu	= smp_processor_id(); | 
| Ingo Molnar | 4ae7d5c | 2008-03-19 01:42:00 +0100 | [diff] [blame] | 1186 | this_rq		= cpu_rq(this_cpu); | 
| Ingo Molnar | ac192d3 | 2008-03-16 20:56:26 +0100 | [diff] [blame] | 1187 | new_cpu		= prev_cpu; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1188 |  | 
| Amit K. Arora | 64b9e02 | 2008-09-30 17:15:39 +0530 | [diff] [blame] | 1189 | if (prev_cpu == this_cpu) | 
|  | 1190 | goto out; | 
| Ingo Molnar | ac192d3 | 2008-03-16 20:56:26 +0100 | [diff] [blame] | 1191 | /* | 
|  | 1192 | * 'this_sd' is the first domain that both | 
|  | 1193 | * this_cpu and prev_cpu are present in: | 
|  | 1194 | */ | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1195 | for_each_domain(this_cpu, sd) { | 
| Ingo Molnar | ac192d3 | 2008-03-16 20:56:26 +0100 | [diff] [blame] | 1196 | if (cpu_isset(prev_cpu, sd->span)) { | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1197 | this_sd = sd; | 
|  | 1198 | break; | 
|  | 1199 | } | 
|  | 1200 | } | 
|  | 1201 |  | 
|  | 1202 | if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) | 
| Ingo Molnar | f482738 | 2008-03-16 21:21:47 +0100 | [diff] [blame] | 1203 | goto out; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1204 |  | 
|  | 1205 | /* | 
|  | 1206 | * Check for affine wakeup and passive balancing possibilities. | 
|  | 1207 | */ | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1208 | if (!this_sd) | 
| Ingo Molnar | f482738 | 2008-03-16 21:21:47 +0100 | [diff] [blame] | 1209 | goto out; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1210 |  | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1211 | idx = this_sd->wake_idx; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1212 |  | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1213 | imbalance = 100 + (this_sd->imbalance_pct - 100) / 2; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1214 |  | 
| Ingo Molnar | ac192d3 | 2008-03-16 20:56:26 +0100 | [diff] [blame] | 1215 | load = source_load(prev_cpu, idx); | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1216 | this_load = target_load(this_cpu, idx); | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1217 |  | 
| Amit K. Arora | 64b9e02 | 2008-09-30 17:15:39 +0530 | [diff] [blame] | 1218 | if (wake_affine(this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx, | 
| Ingo Molnar | 4ae7d5c | 2008-03-19 01:42:00 +0100 | [diff] [blame] | 1219 | load, this_load, imbalance)) | 
|  | 1220 | return this_cpu; | 
|  | 1221 |  | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1222 | /* | 
|  | 1223 | * Start passive balancing when half the imbalance_pct | 
|  | 1224 | * limit is reached. | 
|  | 1225 | */ | 
|  | 1226 | if (this_sd->flags & SD_WAKE_BALANCE) { | 
|  | 1227 | if (imbalance*this_load <= 100*load) { | 
|  | 1228 | schedstat_inc(this_sd, ttwu_move_balance); | 
|  | 1229 | schedstat_inc(p, se.nr_wakeups_passive); | 
| Ingo Molnar | 4ae7d5c | 2008-03-19 01:42:00 +0100 | [diff] [blame] | 1230 | return this_cpu; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1231 | } | 
|  | 1232 | } | 
|  | 1233 |  | 
| Ingo Molnar | f482738 | 2008-03-16 21:21:47 +0100 | [diff] [blame] | 1234 | out: | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1235 | return wake_idle(new_cpu, p); | 
|  | 1236 | } | 
|  | 1237 | #endif /* CONFIG_SMP */ | 
|  | 1238 |  | 
| Peter Zijlstra | 0bbd333 | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 1239 | static unsigned long wakeup_gran(struct sched_entity *se) | 
|  | 1240 | { | 
|  | 1241 | unsigned long gran = sysctl_sched_wakeup_granularity; | 
|  | 1242 |  | 
|  | 1243 | /* | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 1244 | * More easily preempt - nice tasks, while not making it harder for | 
|  | 1245 | * + nice tasks. | 
| Peter Zijlstra | 0bbd333 | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 1246 | */ | 
| Peter Zijlstra | c9c294a | 2008-06-27 13:41:12 +0200 | [diff] [blame] | 1247 | if (sched_feat(ASYM_GRAN)) | 
| Peter Zijlstra | 6956985 | 2008-09-23 14:54:23 +0200 | [diff] [blame] | 1248 | gran = calc_delta_mine(gran, NICE_0_LOAD, &se->load); | 
| Peter Zijlstra | 0bbd333 | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 1249 |  | 
|  | 1250 | return gran; | 
|  | 1251 | } | 
|  | 1252 |  | 
|  | 1253 | /* | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1254 | * Preempt the current task with a newly woken task if needed: | 
|  | 1255 | */ | 
| Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 1256 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1257 | { | 
|  | 1258 | struct task_struct *curr = rq->curr; | 
| Srivatsa Vaddagiri | fad095a | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 1259 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 
| Srivatsa Vaddagiri | 8651a86 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 1260 | struct sched_entity *se = &curr->se, *pse = &p->se; | 
| Peter Zijlstra | 6956985 | 2008-09-23 14:54:23 +0200 | [diff] [blame] | 1261 | s64 delta_exec; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1262 |  | 
|  | 1263 | if (unlikely(rt_prio(p->prio))) { | 
| Ingo Molnar | a8e504d | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1264 | update_rq_clock(rq); | 
| Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1265 | update_curr(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1266 | resched_task(curr); | 
|  | 1267 | return; | 
|  | 1268 | } | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 1269 |  | 
| Ingo Molnar | 4ae7d5c | 2008-03-19 01:42:00 +0100 | [diff] [blame] | 1270 | if (unlikely(se == pse)) | 
|  | 1271 | return; | 
|  | 1272 |  | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 1273 | cfs_rq_of(pse)->next = pse; | 
|  | 1274 |  | 
| Ingo Molnar | 91c234b | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 1275 | /* | 
| Bharata B Rao | aec0a51 | 2008-08-28 14:42:49 +0530 | [diff] [blame] | 1276 | * We can come here with TIF_NEED_RESCHED already set from new task | 
|  | 1277 | * wake up path. | 
|  | 1278 | */ | 
|  | 1279 | if (test_tsk_need_resched(curr)) | 
|  | 1280 | return; | 
|  | 1281 |  | 
| Ingo Molnar | 91c234b | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 1282 | /* | 
|  | 1283 | * Batch tasks do not preempt (their preemption is driven by | 
|  | 1284 | * the tick): | 
|  | 1285 | */ | 
|  | 1286 | if (unlikely(p->policy == SCHED_BATCH)) | 
|  | 1287 | return; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1288 |  | 
| Ingo Molnar | 77d9cc4 | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 1289 | if (!sched_feat(WAKEUP_PREEMPT)) | 
|  | 1290 | return; | 
| Peter Zijlstra | ce6c131 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 1291 |  | 
| Peter Zijlstra | 2fb7635 | 2008-10-08 09:16:04 +0200 | [diff] [blame] | 1292 | if (sched_feat(WAKEUP_OVERLAP) && (sync || | 
|  | 1293 | (se->avg_overlap < sysctl_sched_migration_cost && | 
|  | 1294 | pse->avg_overlap < sysctl_sched_migration_cost))) { | 
| Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 1295 | resched_task(curr); | 
|  | 1296 | return; | 
| Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 1297 | } | 
|  | 1298 |  | 
| Peter Zijlstra | 6956985 | 2008-09-23 14:54:23 +0200 | [diff] [blame] | 1299 | delta_exec = se->sum_exec_runtime - se->prev_sum_exec_runtime; | 
|  | 1300 | if (delta_exec > wakeup_gran(pse)) | 
| Ingo Molnar | 77d9cc4 | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 1301 | resched_task(curr); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1302 | } | 
|  | 1303 |  | 
| Ingo Molnar | fb8d472 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1304 | static struct task_struct *pick_next_task_fair(struct rq *rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1305 | { | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1306 | struct task_struct *p; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1307 | struct cfs_rq *cfs_rq = &rq->cfs; | 
|  | 1308 | struct sched_entity *se; | 
|  | 1309 |  | 
|  | 1310 | if (unlikely(!cfs_rq->nr_running)) | 
|  | 1311 | return NULL; | 
|  | 1312 |  | 
|  | 1313 | do { | 
| Ingo Molnar | 9948f4b | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1314 | se = pick_next_entity(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1315 | cfs_rq = group_cfs_rq(se); | 
|  | 1316 | } while (cfs_rq); | 
|  | 1317 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1318 | p = task_of(se); | 
|  | 1319 | hrtick_start_fair(rq, p); | 
|  | 1320 |  | 
|  | 1321 | return p; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1322 | } | 
|  | 1323 |  | 
|  | 1324 | /* | 
|  | 1325 | * Account for a descheduled task: | 
|  | 1326 | */ | 
| Ingo Molnar | 31ee529 | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 1327 | static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1328 | { | 
|  | 1329 | struct sched_entity *se = &prev->se; | 
|  | 1330 | struct cfs_rq *cfs_rq; | 
|  | 1331 |  | 
|  | 1332 | for_each_sched_entity(se) { | 
|  | 1333 | cfs_rq = cfs_rq_of(se); | 
| Ingo Molnar | ab6cde2 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1334 | put_prev_entity(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1335 | } | 
|  | 1336 | } | 
|  | 1337 |  | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1338 | #ifdef CONFIG_SMP | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1339 | /************************************************** | 
|  | 1340 | * Fair scheduling class load-balancing methods: | 
|  | 1341 | */ | 
|  | 1342 |  | 
|  | 1343 | /* | 
|  | 1344 | * Load-balancing iterator. Note: while the runqueue stays locked | 
|  | 1345 | * during the whole iteration, the current task might be | 
|  | 1346 | * dequeued so the iterator has to be dequeue-safe. Here we | 
|  | 1347 | * achieve that by always pre-iterating before returning | 
|  | 1348 | * the current task: | 
|  | 1349 | */ | 
| Alexey Dobriyan | a995744 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 1350 | static struct task_struct * | 
| Peter Zijlstra | 4a55bd5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 1351 | __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1352 | { | 
| Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 1353 | struct task_struct *p = NULL; | 
|  | 1354 | struct sched_entity *se; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1355 |  | 
| Mike Galbraith | 77ae651 | 2008-08-11 13:32:02 +0200 | [diff] [blame] | 1356 | if (next == &cfs_rq->tasks) | 
|  | 1357 | return NULL; | 
|  | 1358 |  | 
| Bharata B Rao | b87f172 | 2008-09-25 09:53:54 +0530 | [diff] [blame] | 1359 | se = list_entry(next, struct sched_entity, group_node); | 
|  | 1360 | p = task_of(se); | 
|  | 1361 | cfs_rq->balance_iterator = next->next; | 
| Mike Galbraith | 77ae651 | 2008-08-11 13:32:02 +0200 | [diff] [blame] | 1362 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1363 | return p; | 
|  | 1364 | } | 
|  | 1365 |  | 
|  | 1366 | static struct task_struct *load_balance_start_fair(void *arg) | 
|  | 1367 | { | 
|  | 1368 | struct cfs_rq *cfs_rq = arg; | 
|  | 1369 |  | 
| Peter Zijlstra | 4a55bd5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 1370 | return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1371 | } | 
|  | 1372 |  | 
|  | 1373 | static struct task_struct *load_balance_next_fair(void *arg) | 
|  | 1374 | { | 
|  | 1375 | struct cfs_rq *cfs_rq = arg; | 
|  | 1376 |  | 
| Peter Zijlstra | 4a55bd5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 1377 | return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1378 | } | 
|  | 1379 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1380 | static unsigned long | 
|  | 1381 | __load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
|  | 1382 | unsigned long max_load_move, struct sched_domain *sd, | 
|  | 1383 | enum cpu_idle_type idle, int *all_pinned, int *this_best_prio, | 
|  | 1384 | struct cfs_rq *cfs_rq) | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 1385 | { | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1386 | struct rq_iterator cfs_rq_iterator; | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1387 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1388 | cfs_rq_iterator.start = load_balance_start_fair; | 
|  | 1389 | cfs_rq_iterator.next = load_balance_next_fair; | 
|  | 1390 | cfs_rq_iterator.arg = cfs_rq; | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1391 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1392 | return balance_tasks(this_rq, this_cpu, busiest, | 
|  | 1393 | max_load_move, sd, idle, all_pinned, | 
|  | 1394 | this_best_prio, &cfs_rq_iterator); | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1395 | } | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1396 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1397 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1398 | static unsigned long | 
|  | 1399 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
|  | 1400 | unsigned long max_load_move, | 
|  | 1401 | struct sched_domain *sd, enum cpu_idle_type idle, | 
|  | 1402 | int *all_pinned, int *this_best_prio) | 
|  | 1403 | { | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1404 | long rem_load_move = max_load_move; | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1405 | int busiest_cpu = cpu_of(busiest); | 
|  | 1406 | struct task_group *tg; | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 1407 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1408 | rcu_read_lock(); | 
| Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1409 | update_h_load(busiest_cpu); | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 1410 |  | 
| Chris Friesen | caea8a0 | 2008-09-22 11:06:09 -0600 | [diff] [blame] | 1411 | list_for_each_entry_rcu(tg, &task_groups, list) { | 
| Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1412 | struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu]; | 
| Peter Zijlstra | 42a3ac7 | 2008-06-27 13:41:29 +0200 | [diff] [blame] | 1413 | unsigned long busiest_h_load = busiest_cfs_rq->h_load; | 
|  | 1414 | unsigned long busiest_weight = busiest_cfs_rq->load.weight; | 
| Srivatsa Vaddagiri | 243e0e7 | 2008-06-27 13:41:36 +0200 | [diff] [blame] | 1415 | u64 rem_load, moved_load; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1416 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1417 | /* | 
|  | 1418 | * empty group | 
|  | 1419 | */ | 
| Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1420 | if (!busiest_cfs_rq->task_weight) | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1421 | continue; | 
|  | 1422 |  | 
| Srivatsa Vaddagiri | 243e0e7 | 2008-06-27 13:41:36 +0200 | [diff] [blame] | 1423 | rem_load = (u64)rem_load_move * busiest_weight; | 
|  | 1424 | rem_load = div_u64(rem_load, busiest_h_load + 1); | 
| Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1425 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1426 | moved_load = __load_balance_fair(this_rq, this_cpu, busiest, | 
| Srivatsa Vaddagiri | 53fecd8 | 2008-06-27 13:41:20 +0200 | [diff] [blame] | 1427 | rem_load, sd, idle, all_pinned, this_best_prio, | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1428 | tg->cfs_rq[busiest_cpu]); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1429 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1430 | if (!moved_load) | 
|  | 1431 | continue; | 
|  | 1432 |  | 
| Peter Zijlstra | 42a3ac7 | 2008-06-27 13:41:29 +0200 | [diff] [blame] | 1433 | moved_load *= busiest_h_load; | 
| Srivatsa Vaddagiri | 243e0e7 | 2008-06-27 13:41:36 +0200 | [diff] [blame] | 1434 | moved_load = div_u64(moved_load, busiest_weight + 1); | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1435 |  | 
|  | 1436 | rem_load_move -= moved_load; | 
|  | 1437 | if (rem_load_move < 0) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1438 | break; | 
|  | 1439 | } | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1440 | rcu_read_unlock(); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1441 |  | 
| Peter Williams | 4301065 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 1442 | return max_load_move - rem_load_move; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1443 | } | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1444 | #else | 
|  | 1445 | static unsigned long | 
|  | 1446 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
|  | 1447 | unsigned long max_load_move, | 
|  | 1448 | struct sched_domain *sd, enum cpu_idle_type idle, | 
|  | 1449 | int *all_pinned, int *this_best_prio) | 
|  | 1450 | { | 
|  | 1451 | return __load_balance_fair(this_rq, this_cpu, busiest, | 
|  | 1452 | max_load_move, sd, idle, all_pinned, | 
|  | 1453 | this_best_prio, &busiest->cfs); | 
|  | 1454 | } | 
|  | 1455 | #endif | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1456 |  | 
| Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1457 | static int | 
|  | 1458 | move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
|  | 1459 | struct sched_domain *sd, enum cpu_idle_type idle) | 
|  | 1460 | { | 
|  | 1461 | struct cfs_rq *busy_cfs_rq; | 
|  | 1462 | struct rq_iterator cfs_rq_iterator; | 
|  | 1463 |  | 
|  | 1464 | cfs_rq_iterator.start = load_balance_start_fair; | 
|  | 1465 | cfs_rq_iterator.next = load_balance_next_fair; | 
|  | 1466 |  | 
|  | 1467 | for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { | 
|  | 1468 | /* | 
|  | 1469 | * pass busy_cfs_rq argument into | 
|  | 1470 | * load_balance_[start|next]_fair iterators | 
|  | 1471 | */ | 
|  | 1472 | cfs_rq_iterator.arg = busy_cfs_rq; | 
|  | 1473 | if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle, | 
|  | 1474 | &cfs_rq_iterator)) | 
|  | 1475 | return 1; | 
|  | 1476 | } | 
|  | 1477 |  | 
|  | 1478 | return 0; | 
|  | 1479 | } | 
| Dhaval Giani | 55e12e5 | 2008-06-24 23:39:43 +0530 | [diff] [blame] | 1480 | #endif /* CONFIG_SMP */ | 
| Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1481 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1482 | /* | 
|  | 1483 | * scheduler tick hitting a task of our scheduling class: | 
|  | 1484 | */ | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1485 | static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1486 | { | 
|  | 1487 | struct cfs_rq *cfs_rq; | 
|  | 1488 | struct sched_entity *se = &curr->se; | 
|  | 1489 |  | 
|  | 1490 | for_each_sched_entity(se) { | 
|  | 1491 | cfs_rq = cfs_rq_of(se); | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1492 | entity_tick(cfs_rq, se, queued); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1493 | } | 
|  | 1494 | } | 
|  | 1495 |  | 
| Ingo Molnar | 8eb172d | 2007-10-29 21:18:11 +0100 | [diff] [blame] | 1496 | #define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0) | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1497 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1498 | /* | 
|  | 1499 | * Share the fairness runtime between parent and child, thus the | 
|  | 1500 | * total amount of pressure for CPU stays equal - new tasks | 
|  | 1501 | * get a chance to run but frequent forkers are not allowed to | 
|  | 1502 | * monopolize the CPU. Note: the parent runqueue is locked, | 
|  | 1503 | * the child is not running yet. | 
|  | 1504 | */ | 
| Ingo Molnar | ee0827d | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 1505 | static void task_new_fair(struct rq *rq, struct task_struct *p) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1506 | { | 
|  | 1507 | struct cfs_rq *cfs_rq = task_cfs_rq(p); | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 1508 | struct sched_entity *se = &p->se, *curr = cfs_rq->curr; | 
| Ingo Molnar | 00bf7bf | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 1509 | int this_cpu = smp_processor_id(); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1510 |  | 
|  | 1511 | sched_info_queued(p); | 
|  | 1512 |  | 
| Ting Yang | 7109c44 | 2007-08-28 12:53:24 +0200 | [diff] [blame] | 1513 | update_curr(cfs_rq); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1514 | place_entity(cfs_rq, se, 1); | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1515 |  | 
| Srivatsa Vaddagiri | 3c90e6e | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 1516 | /* 'curr' will be NULL if the child belongs to a different group */ | 
| Ingo Molnar | 00bf7bf | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 1517 | if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) && | 
| Srivatsa Vaddagiri | 3c90e6e | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 1518 | curr && curr->vruntime < se->vruntime) { | 
| Dmitry Adamushko | 87fefa3 | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1519 | /* | 
| Ingo Molnar | edcb60a | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1520 | * Upon rescheduling, sched_class::put_prev_task() will place | 
|  | 1521 | * 'current' within the tree based on its new key value. | 
|  | 1522 | */ | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1523 | swap(curr->vruntime, se->vruntime); | 
| Bharata B Rao | aec0a51 | 2008-08-28 14:42:49 +0530 | [diff] [blame] | 1524 | resched_task(rq->curr); | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1525 | } | 
|  | 1526 |  | 
| Srivatsa Vaddagiri | b9dca1e | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 1527 | enqueue_task_fair(rq, p, 0); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1528 | } | 
|  | 1529 |  | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 1530 | /* | 
|  | 1531 | * Priority of the task has changed. Check to see if we preempt | 
|  | 1532 | * the current task. | 
|  | 1533 | */ | 
|  | 1534 | static void prio_changed_fair(struct rq *rq, struct task_struct *p, | 
|  | 1535 | int oldprio, int running) | 
|  | 1536 | { | 
|  | 1537 | /* | 
|  | 1538 | * Reschedule if we are currently running on this runqueue and | 
|  | 1539 | * our priority decreased, or if we are not currently running on | 
|  | 1540 | * this runqueue and our priority is higher than the current's | 
|  | 1541 | */ | 
|  | 1542 | if (running) { | 
|  | 1543 | if (p->prio > oldprio) | 
|  | 1544 | resched_task(rq->curr); | 
|  | 1545 | } else | 
| Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 1546 | check_preempt_curr(rq, p, 0); | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 1547 | } | 
|  | 1548 |  | 
|  | 1549 | /* | 
|  | 1550 | * We switched to the sched_fair class. | 
|  | 1551 | */ | 
|  | 1552 | static void switched_to_fair(struct rq *rq, struct task_struct *p, | 
|  | 1553 | int running) | 
|  | 1554 | { | 
|  | 1555 | /* | 
|  | 1556 | * We were most likely switched from sched_rt, so | 
|  | 1557 | * kick off the schedule if running, otherwise just see | 
|  | 1558 | * if we can still preempt the current task. | 
|  | 1559 | */ | 
|  | 1560 | if (running) | 
|  | 1561 | resched_task(rq->curr); | 
|  | 1562 | else | 
| Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 1563 | check_preempt_curr(rq, p, 0); | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 1564 | } | 
|  | 1565 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1566 | /* Account for a task changing its policy or group. | 
|  | 1567 | * | 
|  | 1568 | * This routine is mostly called to set cfs_rq->curr field when a task | 
|  | 1569 | * migrates between groups/classes. | 
|  | 1570 | */ | 
|  | 1571 | static void set_curr_task_fair(struct rq *rq) | 
|  | 1572 | { | 
|  | 1573 | struct sched_entity *se = &rq->curr->se; | 
|  | 1574 |  | 
|  | 1575 | for_each_sched_entity(se) | 
|  | 1576 | set_next_entity(cfs_rq_of(se), se); | 
|  | 1577 | } | 
|  | 1578 |  | 
| Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 1579 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
|  | 1580 | static void moved_group_fair(struct task_struct *p) | 
|  | 1581 | { | 
|  | 1582 | struct cfs_rq *cfs_rq = task_cfs_rq(p); | 
|  | 1583 |  | 
|  | 1584 | update_curr(cfs_rq); | 
|  | 1585 | place_entity(cfs_rq, &p->se, 1); | 
|  | 1586 | } | 
|  | 1587 | #endif | 
|  | 1588 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1589 | /* | 
|  | 1590 | * All the scheduling class methods: | 
|  | 1591 | */ | 
| Ingo Molnar | 5522d5d | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 1592 | static const struct sched_class fair_sched_class = { | 
|  | 1593 | .next			= &idle_sched_class, | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1594 | .enqueue_task		= enqueue_task_fair, | 
|  | 1595 | .dequeue_task		= dequeue_task_fair, | 
|  | 1596 | .yield_task		= yield_task_fair, | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1597 | #ifdef CONFIG_SMP | 
|  | 1598 | .select_task_rq		= select_task_rq_fair, | 
|  | 1599 | #endif /* CONFIG_SMP */ | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1600 |  | 
| Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1601 | .check_preempt_curr	= check_preempt_wakeup, | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1602 |  | 
|  | 1603 | .pick_next_task		= pick_next_task_fair, | 
|  | 1604 | .put_prev_task		= put_prev_task_fair, | 
|  | 1605 |  | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1606 | #ifdef CONFIG_SMP | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1607 | .load_balance		= load_balance_fair, | 
| Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1608 | .move_one_task		= move_one_task_fair, | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 1609 | #endif | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1610 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1611 | .set_curr_task          = set_curr_task_fair, | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1612 | .task_tick		= task_tick_fair, | 
|  | 1613 | .task_new		= task_new_fair, | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 1614 |  | 
|  | 1615 | .prio_changed		= prio_changed_fair, | 
|  | 1616 | .switched_to		= switched_to_fair, | 
| Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 1617 |  | 
|  | 1618 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
|  | 1619 | .moved_group		= moved_group_fair, | 
|  | 1620 | #endif | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1621 | }; | 
|  | 1622 |  | 
|  | 1623 | #ifdef CONFIG_SCHED_DEBUG | 
| Ingo Molnar | 5cef9ec | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1624 | static void print_cfs_stats(struct seq_file *m, int cpu) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1625 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1626 | struct cfs_rq *cfs_rq; | 
|  | 1627 |  | 
| Peter Zijlstra | 5973e5b | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 1628 | rcu_read_lock(); | 
| Ingo Molnar | c3b64f1 | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 1629 | for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) | 
| Ingo Molnar | 5cef9ec | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1630 | print_cfs_rq(m, cpu, cfs_rq); | 
| Peter Zijlstra | 5973e5b | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 1631 | rcu_read_unlock(); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1632 | } | 
|  | 1633 | #endif |