| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1 | /* | 
|  | 2 | * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH) | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 
|  | 5 | * | 
|  | 6 | *  Interactivity improvements by Mike Galbraith | 
|  | 7 | *  (C) 2007 Mike Galbraith <efault@gmx.de> | 
|  | 8 | * | 
|  | 9 | *  Various enhancements by Dmitry Adamushko. | 
|  | 10 | *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com> | 
|  | 11 | * | 
|  | 12 | *  Group scheduling enhancements by Srivatsa Vaddagiri | 
|  | 13 | *  Copyright IBM Corporation, 2007 | 
|  | 14 | *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> | 
|  | 15 | * | 
|  | 16 | *  Scaled math optimizations by Thomas Gleixner | 
|  | 17 | *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 18 | * | 
|  | 19 | *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra | 
|  | 20 | *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 21 | */ | 
|  | 22 |  | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 23 | #include <linux/latencytop.h> | 
| Christian Ehrhardt | 1983a92 | 2009-11-30 12:16:47 +0100 | [diff] [blame] | 24 | #include <linux/sched.h> | 
| Sisir Koppaka | 3436ae1 | 2011-03-26 18:22:55 +0530 | [diff] [blame] | 25 | #include <linux/cpumask.h> | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 26 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 27 | /* | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 28 | * Targeted preemption latency for CPU-bound tasks: | 
| Takuya Yoshikawa | 864616e | 2010-10-14 16:09:13 +0900 | [diff] [blame] | 29 | * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 30 | * | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 31 | * NOTE: this latency value is not the same as the concept of | 
| Ingo Molnar | d274a4c | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 32 | * 'timeslice length' - timeslices in CFS are of variable length | 
|  | 33 | * and have no persistent notion like in traditional, time-slice | 
|  | 34 | * based scheduling concepts. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 35 | * | 
| Ingo Molnar | d274a4c | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 36 | * (to see the precise effective timeslice length of your workload, | 
|  | 37 | *  run vmstat and monitor the context-switches (cs) field) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 38 | */ | 
| Mike Galbraith | 2140692 | 2010-03-11 17:17:15 +0100 | [diff] [blame] | 39 | unsigned int sysctl_sched_latency = 6000000ULL; | 
|  | 40 | unsigned int normalized_sysctl_sched_latency = 6000000ULL; | 
| Ingo Molnar | 2bd8e6d | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 41 |  | 
|  | 42 | /* | 
| Christian Ehrhardt | 1983a92 | 2009-11-30 12:16:47 +0100 | [diff] [blame] | 43 | * The initial- and re-scaling of tunables is configurable | 
|  | 44 | * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) | 
|  | 45 | * | 
|  | 46 | * Options are: | 
|  | 47 | * SCHED_TUNABLESCALING_NONE - unscaled, always *1 | 
|  | 48 | * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) | 
|  | 49 | * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus | 
|  | 50 | */ | 
|  | 51 | enum sched_tunable_scaling sysctl_sched_tunable_scaling | 
|  | 52 | = SCHED_TUNABLESCALING_LOG; | 
|  | 53 |  | 
|  | 54 | /* | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 55 | * Minimal preemption granularity for CPU-bound tasks: | 
| Takuya Yoshikawa | 864616e | 2010-10-14 16:09:13 +0900 | [diff] [blame] | 56 | * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 57 | */ | 
| Ingo Molnar | 0bf377b | 2010-09-12 08:14:52 +0200 | [diff] [blame] | 58 | unsigned int sysctl_sched_min_granularity = 750000ULL; | 
|  | 59 | unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 60 |  | 
|  | 61 | /* | 
|  | 62 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity | 
|  | 63 | */ | 
| Ingo Molnar | 0bf377b | 2010-09-12 08:14:52 +0200 | [diff] [blame] | 64 | static unsigned int sched_nr_latency = 8; | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 65 |  | 
|  | 66 | /* | 
| Mike Galbraith | 2bba22c | 2009-09-09 15:41:37 +0200 | [diff] [blame] | 67 | * After fork, child runs first. If set to 0 (default) then | 
| Ingo Molnar | 2bd8e6d | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 68 | * parent will (try to) run first. | 
|  | 69 | */ | 
| Mike Galbraith | 2bba22c | 2009-09-09 15:41:37 +0200 | [diff] [blame] | 70 | unsigned int sysctl_sched_child_runs_first __read_mostly; | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 71 |  | 
|  | 72 | /* | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 73 | * SCHED_OTHER wake-up granularity. | 
| Mike Galbraith | 172e082 | 2009-09-09 15:41:37 +0200 | [diff] [blame] | 74 | * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 75 | * | 
|  | 76 | * This option delays the preemption effects of decoupled workloads | 
|  | 77 | * and reduces their over-scheduling. Synchronous workloads will still | 
|  | 78 | * have immediate wakeup/sleep latencies. | 
|  | 79 | */ | 
| Mike Galbraith | 172e082 | 2009-09-09 15:41:37 +0200 | [diff] [blame] | 80 | unsigned int sysctl_sched_wakeup_granularity = 1000000UL; | 
| Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 81 | unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 82 |  | 
| Ingo Molnar | da84d96 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 83 | const_debug unsigned int sysctl_sched_migration_cost = 500000UL; | 
|  | 84 |  | 
| Paul Turner | a7a4f8a | 2010-11-15 15:47:06 -0800 | [diff] [blame] | 85 | /* | 
|  | 86 | * The exponential sliding  window over which load is averaged for shares | 
|  | 87 | * distribution. | 
|  | 88 | * (default: 10msec) | 
|  | 89 | */ | 
|  | 90 | unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL; | 
|  | 91 |  | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 92 | static const struct sched_class fair_sched_class; | 
|  | 93 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 94 | /************************************************************** | 
|  | 95 | * CFS operations on generic schedulable entities: | 
|  | 96 | */ | 
|  | 97 |  | 
|  | 98 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
|  | 99 |  | 
|  | 100 | /* cpu runqueue to which this cfs_rq is attached */ | 
|  | 101 | static inline struct rq *rq_of(struct cfs_rq *cfs_rq) | 
|  | 102 | { | 
|  | 103 | return cfs_rq->rq; | 
|  | 104 | } | 
|  | 105 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 106 | /* An entity is a task if it doesn't "own" a runqueue */ | 
|  | 107 | #define entity_is_task(se)	(!se->my_q) | 
|  | 108 |  | 
| Peter Zijlstra | 8f48894 | 2009-07-24 12:25:30 +0200 | [diff] [blame] | 109 | static inline struct task_struct *task_of(struct sched_entity *se) | 
|  | 110 | { | 
|  | 111 | #ifdef CONFIG_SCHED_DEBUG | 
|  | 112 | WARN_ON_ONCE(!entity_is_task(se)); | 
|  | 113 | #endif | 
|  | 114 | return container_of(se, struct task_struct, se); | 
|  | 115 | } | 
|  | 116 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 117 | /* Walk up scheduling entities hierarchy */ | 
|  | 118 | #define for_each_sched_entity(se) \ | 
|  | 119 | for (; se; se = se->parent) | 
|  | 120 |  | 
|  | 121 | static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) | 
|  | 122 | { | 
|  | 123 | return p->se.cfs_rq; | 
|  | 124 | } | 
|  | 125 |  | 
|  | 126 | /* runqueue on which this entity is (to be) queued */ | 
|  | 127 | static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) | 
|  | 128 | { | 
|  | 129 | return se->cfs_rq; | 
|  | 130 | } | 
|  | 131 |  | 
|  | 132 | /* runqueue "owned" by this group */ | 
|  | 133 | static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) | 
|  | 134 | { | 
|  | 135 | return grp->my_q; | 
|  | 136 | } | 
|  | 137 |  | 
| Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 138 | static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) | 
|  | 139 | { | 
|  | 140 | if (!cfs_rq->on_list) { | 
| Paul Turner | 67e8625 | 2010-11-15 15:47:05 -0800 | [diff] [blame] | 141 | /* | 
|  | 142 | * Ensure we either appear before our parent (if already | 
|  | 143 | * enqueued) or force our parent to appear after us when it is | 
|  | 144 | * enqueued.  The fact that we always enqueue bottom-up | 
|  | 145 | * reduces this to two cases. | 
|  | 146 | */ | 
|  | 147 | if (cfs_rq->tg->parent && | 
|  | 148 | cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) { | 
|  | 149 | list_add_rcu(&cfs_rq->leaf_cfs_rq_list, | 
| Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 150 | &rq_of(cfs_rq)->leaf_cfs_rq_list); | 
| Paul Turner | 67e8625 | 2010-11-15 15:47:05 -0800 | [diff] [blame] | 151 | } else { | 
|  | 152 | list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, | 
|  | 153 | &rq_of(cfs_rq)->leaf_cfs_rq_list); | 
|  | 154 | } | 
| Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 155 |  | 
|  | 156 | cfs_rq->on_list = 1; | 
|  | 157 | } | 
|  | 158 | } | 
|  | 159 |  | 
|  | 160 | static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) | 
|  | 161 | { | 
|  | 162 | if (cfs_rq->on_list) { | 
|  | 163 | list_del_rcu(&cfs_rq->leaf_cfs_rq_list); | 
|  | 164 | cfs_rq->on_list = 0; | 
|  | 165 | } | 
|  | 166 | } | 
|  | 167 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 168 | /* Iterate thr' all leaf cfs_rq's on a runqueue */ | 
|  | 169 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ | 
|  | 170 | list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) | 
|  | 171 |  | 
|  | 172 | /* Do the two (enqueued) entities belong to the same group ? */ | 
|  | 173 | static inline int | 
|  | 174 | is_same_group(struct sched_entity *se, struct sched_entity *pse) | 
|  | 175 | { | 
|  | 176 | if (se->cfs_rq == pse->cfs_rq) | 
|  | 177 | return 1; | 
|  | 178 |  | 
|  | 179 | return 0; | 
|  | 180 | } | 
|  | 181 |  | 
|  | 182 | static inline struct sched_entity *parent_entity(struct sched_entity *se) | 
|  | 183 | { | 
|  | 184 | return se->parent; | 
|  | 185 | } | 
|  | 186 |  | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 187 | /* return depth at which a sched entity is present in the hierarchy */ | 
|  | 188 | static inline int depth_se(struct sched_entity *se) | 
|  | 189 | { | 
|  | 190 | int depth = 0; | 
|  | 191 |  | 
|  | 192 | for_each_sched_entity(se) | 
|  | 193 | depth++; | 
|  | 194 |  | 
|  | 195 | return depth; | 
|  | 196 | } | 
|  | 197 |  | 
|  | 198 | static void | 
|  | 199 | find_matching_se(struct sched_entity **se, struct sched_entity **pse) | 
|  | 200 | { | 
|  | 201 | int se_depth, pse_depth; | 
|  | 202 |  | 
|  | 203 | /* | 
|  | 204 | * preemption test can be made between sibling entities who are in the | 
|  | 205 | * same cfs_rq i.e who have a common parent. Walk up the hierarchy of | 
|  | 206 | * both tasks until we find their ancestors who are siblings of common | 
|  | 207 | * parent. | 
|  | 208 | */ | 
|  | 209 |  | 
|  | 210 | /* First walk up until both entities are at same depth */ | 
|  | 211 | se_depth = depth_se(*se); | 
|  | 212 | pse_depth = depth_se(*pse); | 
|  | 213 |  | 
|  | 214 | while (se_depth > pse_depth) { | 
|  | 215 | se_depth--; | 
|  | 216 | *se = parent_entity(*se); | 
|  | 217 | } | 
|  | 218 |  | 
|  | 219 | while (pse_depth > se_depth) { | 
|  | 220 | pse_depth--; | 
|  | 221 | *pse = parent_entity(*pse); | 
|  | 222 | } | 
|  | 223 |  | 
|  | 224 | while (!is_same_group(*se, *pse)) { | 
|  | 225 | *se = parent_entity(*se); | 
|  | 226 | *pse = parent_entity(*pse); | 
|  | 227 | } | 
|  | 228 | } | 
|  | 229 |  | 
| Peter Zijlstra | 8f48894 | 2009-07-24 12:25:30 +0200 | [diff] [blame] | 230 | #else	/* !CONFIG_FAIR_GROUP_SCHED */ | 
|  | 231 |  | 
|  | 232 | static inline struct task_struct *task_of(struct sched_entity *se) | 
|  | 233 | { | 
|  | 234 | return container_of(se, struct task_struct, se); | 
|  | 235 | } | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 236 |  | 
|  | 237 | static inline struct rq *rq_of(struct cfs_rq *cfs_rq) | 
|  | 238 | { | 
|  | 239 | return container_of(cfs_rq, struct rq, cfs); | 
|  | 240 | } | 
|  | 241 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 242 | #define entity_is_task(se)	1 | 
|  | 243 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 244 | #define for_each_sched_entity(se) \ | 
|  | 245 | for (; se; se = NULL) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 246 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 247 | static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 248 | { | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 249 | return &task_rq(p)->cfs; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 250 | } | 
|  | 251 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 252 | static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) | 
|  | 253 | { | 
|  | 254 | struct task_struct *p = task_of(se); | 
|  | 255 | struct rq *rq = task_rq(p); | 
|  | 256 |  | 
|  | 257 | return &rq->cfs; | 
|  | 258 | } | 
|  | 259 |  | 
|  | 260 | /* runqueue "owned" by this group */ | 
|  | 261 | static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) | 
|  | 262 | { | 
|  | 263 | return NULL; | 
|  | 264 | } | 
|  | 265 |  | 
| Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 266 | static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) | 
|  | 267 | { | 
|  | 268 | } | 
|  | 269 |  | 
|  | 270 | static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) | 
|  | 271 | { | 
|  | 272 | } | 
|  | 273 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 274 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ | 
|  | 275 | for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) | 
|  | 276 |  | 
|  | 277 | static inline int | 
|  | 278 | is_same_group(struct sched_entity *se, struct sched_entity *pse) | 
|  | 279 | { | 
|  | 280 | return 1; | 
|  | 281 | } | 
|  | 282 |  | 
|  | 283 | static inline struct sched_entity *parent_entity(struct sched_entity *se) | 
|  | 284 | { | 
|  | 285 | return NULL; | 
|  | 286 | } | 
|  | 287 |  | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 288 | static inline void | 
|  | 289 | find_matching_se(struct sched_entity **se, struct sched_entity **pse) | 
|  | 290 | { | 
|  | 291 | } | 
|  | 292 |  | 
| Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 293 | #endif	/* CONFIG_FAIR_GROUP_SCHED */ | 
|  | 294 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 295 |  | 
|  | 296 | /************************************************************** | 
|  | 297 | * Scheduling class tree data structure manipulation methods: | 
|  | 298 | */ | 
|  | 299 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 300 | static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime) | 
| Peter Zijlstra | 02e0431 | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 301 | { | 
| Peter Zijlstra | 368059a | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 302 | s64 delta = (s64)(vruntime - min_vruntime); | 
|  | 303 | if (delta > 0) | 
| Peter Zijlstra | 02e0431 | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 304 | min_vruntime = vruntime; | 
|  | 305 |  | 
|  | 306 | return min_vruntime; | 
|  | 307 | } | 
|  | 308 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 309 | static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) | 
| Peter Zijlstra | b0ffd24 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 310 | { | 
|  | 311 | s64 delta = (s64)(vruntime - min_vruntime); | 
|  | 312 | if (delta < 0) | 
|  | 313 | min_vruntime = vruntime; | 
|  | 314 |  | 
|  | 315 | return min_vruntime; | 
|  | 316 | } | 
|  | 317 |  | 
| Fabio Checconi | 54fdc58 | 2009-07-16 12:32:27 +0200 | [diff] [blame] | 318 | static inline int entity_before(struct sched_entity *a, | 
|  | 319 | struct sched_entity *b) | 
|  | 320 | { | 
|  | 321 | return (s64)(a->vruntime - b->vruntime) < 0; | 
|  | 322 | } | 
|  | 323 |  | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 324 | static void update_min_vruntime(struct cfs_rq *cfs_rq) | 
|  | 325 | { | 
|  | 326 | u64 vruntime = cfs_rq->min_vruntime; | 
|  | 327 |  | 
|  | 328 | if (cfs_rq->curr) | 
|  | 329 | vruntime = cfs_rq->curr->vruntime; | 
|  | 330 |  | 
|  | 331 | if (cfs_rq->rb_leftmost) { | 
|  | 332 | struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost, | 
|  | 333 | struct sched_entity, | 
|  | 334 | run_node); | 
|  | 335 |  | 
| Peter Zijlstra | e17036d | 2009-01-15 14:53:39 +0100 | [diff] [blame] | 336 | if (!cfs_rq->curr) | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 337 | vruntime = se->vruntime; | 
|  | 338 | else | 
|  | 339 | vruntime = min_vruntime(vruntime, se->vruntime); | 
|  | 340 | } | 
|  | 341 |  | 
|  | 342 | cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); | 
| Peter Zijlstra | 3fe1698 | 2011-04-05 17:23:48 +0200 | [diff] [blame] | 343 | #ifndef CONFIG_64BIT | 
|  | 344 | smp_wmb(); | 
|  | 345 | cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; | 
|  | 346 | #endif | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 347 | } | 
|  | 348 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 349 | /* | 
|  | 350 | * Enqueue an entity into the rb-tree: | 
|  | 351 | */ | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 352 | static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 353 | { | 
|  | 354 | struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; | 
|  | 355 | struct rb_node *parent = NULL; | 
|  | 356 | struct sched_entity *entry; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 357 | int leftmost = 1; | 
|  | 358 |  | 
|  | 359 | /* | 
|  | 360 | * Find the right place in the rbtree: | 
|  | 361 | */ | 
|  | 362 | while (*link) { | 
|  | 363 | parent = *link; | 
|  | 364 | entry = rb_entry(parent, struct sched_entity, run_node); | 
|  | 365 | /* | 
|  | 366 | * We dont care about collisions. Nodes with | 
|  | 367 | * the same key stay together. | 
|  | 368 | */ | 
| Stephan Baerwolf | 2bd2d6f | 2011-07-20 14:46:59 +0200 | [diff] [blame] | 369 | if (entity_before(se, entry)) { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 370 | link = &parent->rb_left; | 
|  | 371 | } else { | 
|  | 372 | link = &parent->rb_right; | 
|  | 373 | leftmost = 0; | 
|  | 374 | } | 
|  | 375 | } | 
|  | 376 |  | 
|  | 377 | /* | 
|  | 378 | * Maintain a cache of leftmost tree entries (it is frequently | 
|  | 379 | * used): | 
|  | 380 | */ | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 381 | if (leftmost) | 
| Ingo Molnar | 57cb499 | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 382 | cfs_rq->rb_leftmost = &se->run_node; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 383 |  | 
|  | 384 | rb_link_node(&se->run_node, parent, link); | 
|  | 385 | rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 386 | } | 
|  | 387 |  | 
| Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 388 | static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 389 | { | 
| Peter Zijlstra | 3fe6974 | 2008-03-14 20:55:51 +0100 | [diff] [blame] | 390 | if (cfs_rq->rb_leftmost == &se->run_node) { | 
|  | 391 | struct rb_node *next_node; | 
| Peter Zijlstra | 3fe6974 | 2008-03-14 20:55:51 +0100 | [diff] [blame] | 392 |  | 
|  | 393 | next_node = rb_next(&se->run_node); | 
|  | 394 | cfs_rq->rb_leftmost = next_node; | 
| Peter Zijlstra | 3fe6974 | 2008-03-14 20:55:51 +0100 | [diff] [blame] | 395 | } | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 396 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 397 | rb_erase(&se->run_node, &cfs_rq->tasks_timeline); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 398 | } | 
|  | 399 |  | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 400 | static struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 401 | { | 
| Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 402 | struct rb_node *left = cfs_rq->rb_leftmost; | 
|  | 403 |  | 
|  | 404 | if (!left) | 
|  | 405 | return NULL; | 
|  | 406 |  | 
|  | 407 | return rb_entry(left, struct sched_entity, run_node); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 408 | } | 
|  | 409 |  | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 410 | static struct sched_entity *__pick_next_entity(struct sched_entity *se) | 
|  | 411 | { | 
|  | 412 | struct rb_node *next = rb_next(&se->run_node); | 
|  | 413 |  | 
|  | 414 | if (!next) | 
|  | 415 | return NULL; | 
|  | 416 |  | 
|  | 417 | return rb_entry(next, struct sched_entity, run_node); | 
|  | 418 | } | 
|  | 419 |  | 
|  | 420 | #ifdef CONFIG_SCHED_DEBUG | 
| Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 421 | static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 422 | { | 
| Ingo Molnar | 7eee3e6 | 2008-02-22 10:32:21 +0100 | [diff] [blame] | 423 | struct rb_node *last = rb_last(&cfs_rq->tasks_timeline); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 424 |  | 
| Balbir Singh | 70eee74 | 2008-02-22 13:25:53 +0530 | [diff] [blame] | 425 | if (!last) | 
|  | 426 | return NULL; | 
| Ingo Molnar | 7eee3e6 | 2008-02-22 10:32:21 +0100 | [diff] [blame] | 427 |  | 
|  | 428 | return rb_entry(last, struct sched_entity, run_node); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 429 | } | 
|  | 430 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 431 | /************************************************************** | 
|  | 432 | * Scheduling class statistics methods: | 
|  | 433 | */ | 
|  | 434 |  | 
| Christian Ehrhardt | acb4a84 | 2009-11-30 12:16:48 +0100 | [diff] [blame] | 435 | int sched_proc_update_handler(struct ctl_table *table, int write, | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 436 | void __user *buffer, size_t *lenp, | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 437 | loff_t *ppos) | 
|  | 438 | { | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 439 | int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 
| Christian Ehrhardt | acb4a84 | 2009-11-30 12:16:48 +0100 | [diff] [blame] | 440 | int factor = get_update_sysctl_factor(); | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 441 |  | 
|  | 442 | if (ret || !write) | 
|  | 443 | return ret; | 
|  | 444 |  | 
|  | 445 | sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, | 
|  | 446 | sysctl_sched_min_granularity); | 
|  | 447 |  | 
| Christian Ehrhardt | acb4a84 | 2009-11-30 12:16:48 +0100 | [diff] [blame] | 448 | #define WRT_SYSCTL(name) \ | 
|  | 449 | (normalized_sysctl_##name = sysctl_##name / (factor)) | 
|  | 450 | WRT_SYSCTL(sched_min_granularity); | 
|  | 451 | WRT_SYSCTL(sched_latency); | 
|  | 452 | WRT_SYSCTL(sched_wakeup_granularity); | 
| Christian Ehrhardt | acb4a84 | 2009-11-30 12:16:48 +0100 | [diff] [blame] | 453 | #undef WRT_SYSCTL | 
|  | 454 |  | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 455 | return 0; | 
|  | 456 | } | 
|  | 457 | #endif | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 458 |  | 
|  | 459 | /* | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 460 | * delta /= w | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 461 | */ | 
|  | 462 | static inline unsigned long | 
|  | 463 | calc_delta_fair(unsigned long delta, struct sched_entity *se) | 
|  | 464 | { | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 465 | if (unlikely(se->load.weight != NICE_0_LOAD)) | 
|  | 466 | delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load); | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 467 |  | 
|  | 468 | return delta; | 
|  | 469 | } | 
|  | 470 |  | 
|  | 471 | /* | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 472 | * The idea is to set a period in which each task runs once. | 
|  | 473 | * | 
|  | 474 | * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch | 
|  | 475 | * this period because otherwise the slices get too small. | 
|  | 476 | * | 
|  | 477 | * p = (nr <= nl) ? l : l*nr/nl | 
|  | 478 | */ | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 479 | static u64 __sched_period(unsigned long nr_running) | 
|  | 480 | { | 
|  | 481 | u64 period = sysctl_sched_latency; | 
| Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 482 | unsigned long nr_latency = sched_nr_latency; | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 483 |  | 
|  | 484 | if (unlikely(nr_running > nr_latency)) { | 
| Peter Zijlstra | 4bf0b77 | 2008-01-25 21:08:21 +0100 | [diff] [blame] | 485 | period = sysctl_sched_min_granularity; | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 486 | period *= nr_running; | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 487 | } | 
|  | 488 |  | 
|  | 489 | return period; | 
|  | 490 | } | 
|  | 491 |  | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 492 | /* | 
|  | 493 | * We calculate the wall-time slice from the period by taking a part | 
|  | 494 | * proportional to the weight. | 
|  | 495 | * | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 496 | * s = p*P[w/rw] | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 497 | */ | 
| Peter Zijlstra | 6d0f0eb | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 498 | static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 499 | { | 
| Mike Galbraith | 0a58244 | 2009-01-02 12:16:42 +0100 | [diff] [blame] | 500 | u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 501 |  | 
| Mike Galbraith | 0a58244 | 2009-01-02 12:16:42 +0100 | [diff] [blame] | 502 | for_each_sched_entity(se) { | 
| Lin Ming | 6272d68 | 2009-01-15 17:17:15 +0100 | [diff] [blame] | 503 | struct load_weight *load; | 
| Christian Engelmayer | 3104bf0 | 2009-06-16 10:35:12 +0200 | [diff] [blame] | 504 | struct load_weight lw; | 
| Lin Ming | 6272d68 | 2009-01-15 17:17:15 +0100 | [diff] [blame] | 505 |  | 
|  | 506 | cfs_rq = cfs_rq_of(se); | 
|  | 507 | load = &cfs_rq->load; | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 508 |  | 
| Mike Galbraith | 0a58244 | 2009-01-02 12:16:42 +0100 | [diff] [blame] | 509 | if (unlikely(!se->on_rq)) { | 
| Christian Engelmayer | 3104bf0 | 2009-06-16 10:35:12 +0200 | [diff] [blame] | 510 | lw = cfs_rq->load; | 
| Mike Galbraith | 0a58244 | 2009-01-02 12:16:42 +0100 | [diff] [blame] | 511 |  | 
|  | 512 | update_load_add(&lw, se->load.weight); | 
|  | 513 | load = &lw; | 
|  | 514 | } | 
|  | 515 | slice = calc_delta_mine(slice, se->load.weight, load); | 
|  | 516 | } | 
|  | 517 | return slice; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 518 | } | 
|  | 519 |  | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 520 | /* | 
| Peter Zijlstra | ac884de | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 521 | * We calculate the vruntime slice of a to be inserted task | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 522 | * | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 523 | * vs = s/w | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 524 | */ | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 525 | static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 526 | { | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 527 | return calc_delta_fair(sched_slice(cfs_rq, se), se); | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 528 | } | 
|  | 529 |  | 
| Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 530 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update); | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 531 | static void update_cfs_shares(struct cfs_rq *cfs_rq); | 
| Paul Turner | 3b3d190 | 2010-11-15 15:47:08 -0800 | [diff] [blame] | 532 |  | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 533 | /* | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 534 | * Update the current task's runtime statistics. Skip current tasks that | 
|  | 535 | * are not in our scheduling class. | 
|  | 536 | */ | 
|  | 537 | static inline void | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 538 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | 
|  | 539 | unsigned long delta_exec) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 540 | { | 
| Ingo Molnar | bbdba7c | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 541 | unsigned long delta_exec_weighted; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 542 |  | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 543 | schedstat_set(curr->statistics.exec_max, | 
|  | 544 | max((u64)delta_exec, curr->statistics.exec_max)); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 545 |  | 
|  | 546 | curr->sum_exec_runtime += delta_exec; | 
| Ingo Molnar | 7a62eab | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 547 | schedstat_add(cfs_rq, exec_clock, delta_exec); | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 548 | delta_exec_weighted = calc_delta_fair(delta_exec, curr); | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 549 |  | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 550 | curr->vruntime += delta_exec_weighted; | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 551 | update_min_vruntime(cfs_rq); | 
| Paul Turner | 3b3d190 | 2010-11-15 15:47:08 -0800 | [diff] [blame] | 552 |  | 
| Peter Zijlstra | 70caf8a | 2010-11-20 00:53:51 +0100 | [diff] [blame] | 553 | #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED | 
| Paul Turner | 3b3d190 | 2010-11-15 15:47:08 -0800 | [diff] [blame] | 554 | cfs_rq->load_unacc_exec_time += delta_exec; | 
| Paul Turner | 3b3d190 | 2010-11-15 15:47:08 -0800 | [diff] [blame] | 555 | #endif | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 556 | } | 
|  | 557 |  | 
| Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 558 | static void update_curr(struct cfs_rq *cfs_rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 559 | { | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 560 | struct sched_entity *curr = cfs_rq->curr; | 
| Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 561 | u64 now = rq_of(cfs_rq)->clock_task; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 562 | unsigned long delta_exec; | 
|  | 563 |  | 
|  | 564 | if (unlikely(!curr)) | 
|  | 565 | return; | 
|  | 566 |  | 
|  | 567 | /* | 
|  | 568 | * Get the amount of time the current task was running | 
|  | 569 | * since the last time we changed load (this cannot | 
|  | 570 | * overflow on 32 bits): | 
|  | 571 | */ | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 572 | delta_exec = (unsigned long)(now - curr->exec_start); | 
| Peter Zijlstra | 34f28ec | 2008-12-16 08:45:31 +0100 | [diff] [blame] | 573 | if (!delta_exec) | 
|  | 574 | return; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 575 |  | 
| Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 576 | __update_curr(cfs_rq, curr, delta_exec); | 
|  | 577 | curr->exec_start = now; | 
| Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 578 |  | 
|  | 579 | if (entity_is_task(curr)) { | 
|  | 580 | struct task_struct *curtask = task_of(curr); | 
|  | 581 |  | 
| Ingo Molnar | f977bb4 | 2009-09-13 18:15:54 +0200 | [diff] [blame] | 582 | trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); | 
| Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 583 | cpuacct_charge(curtask, delta_exec); | 
| Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 584 | account_group_exec_runtime(curtask, delta_exec); | 
| Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 585 | } | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 586 | } | 
|  | 587 |  | 
|  | 588 | static inline void | 
| Ingo Molnar | 5870db5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 589 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 590 | { | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 591 | schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 592 | } | 
|  | 593 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 594 | /* | 
|  | 595 | * Task is being enqueued - update stats: | 
|  | 596 | */ | 
| Ingo Molnar | d2417e5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 597 | static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 598 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 599 | /* | 
|  | 600 | * Are we enqueueing a waiting task? (for current tasks | 
|  | 601 | * a dequeue/enqueue event is a NOP) | 
|  | 602 | */ | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 603 | if (se != cfs_rq->curr) | 
| Ingo Molnar | 5870db5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 604 | update_stats_wait_start(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 605 | } | 
|  | 606 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 607 | static void | 
| Ingo Molnar | 9ef0a96 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 608 | update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 609 | { | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 610 | schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max, | 
|  | 611 | rq_of(cfs_rq)->clock - se->statistics.wait_start)); | 
|  | 612 | schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1); | 
|  | 613 | schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum + | 
|  | 614 | rq_of(cfs_rq)->clock - se->statistics.wait_start); | 
| Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 615 | #ifdef CONFIG_SCHEDSTATS | 
|  | 616 | if (entity_is_task(se)) { | 
|  | 617 | trace_sched_stat_wait(task_of(se), | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 618 | rq_of(cfs_rq)->clock - se->statistics.wait_start); | 
| Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 619 | } | 
|  | 620 | #endif | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 621 | schedstat_set(se->statistics.wait_start, 0); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 622 | } | 
|  | 623 |  | 
|  | 624 | static inline void | 
| Ingo Molnar | 19b6a2e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 625 | update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 626 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 627 | /* | 
|  | 628 | * Mark the end of the wait period if dequeueing a | 
|  | 629 | * waiting task: | 
|  | 630 | */ | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 631 | if (se != cfs_rq->curr) | 
| Ingo Molnar | 9ef0a96 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 632 | update_stats_wait_end(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 633 | } | 
|  | 634 |  | 
|  | 635 | /* | 
|  | 636 | * We are picking a new current task - update its stats: | 
|  | 637 | */ | 
|  | 638 | static inline void | 
| Ingo Molnar | 79303e9 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 639 | update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 640 | { | 
|  | 641 | /* | 
|  | 642 | * We are starting a new run period: | 
|  | 643 | */ | 
| Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 644 | se->exec_start = rq_of(cfs_rq)->clock_task; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 645 | } | 
|  | 646 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 647 | /************************************************** | 
|  | 648 | * Scheduling class queueing methods: | 
|  | 649 | */ | 
|  | 650 |  | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 651 | #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED | 
|  | 652 | static void | 
|  | 653 | add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight) | 
|  | 654 | { | 
|  | 655 | cfs_rq->task_weight += weight; | 
|  | 656 | } | 
|  | 657 | #else | 
|  | 658 | static inline void | 
|  | 659 | add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight) | 
|  | 660 | { | 
|  | 661 | } | 
|  | 662 | #endif | 
|  | 663 |  | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 664 | static void | 
|  | 665 | account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
|  | 666 | { | 
|  | 667 | update_load_add(&cfs_rq->load, se->load.weight); | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 668 | if (!parent_entity(se)) | 
|  | 669 | inc_cpu_load(rq_of(cfs_rq), se->load.weight); | 
| Bharata B Rao | b87f172 | 2008-09-25 09:53:54 +0530 | [diff] [blame] | 670 | if (entity_is_task(se)) { | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 671 | add_cfs_task_weight(cfs_rq, se->load.weight); | 
| Bharata B Rao | b87f172 | 2008-09-25 09:53:54 +0530 | [diff] [blame] | 672 | list_add(&se->group_node, &cfs_rq->tasks); | 
|  | 673 | } | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 674 | cfs_rq->nr_running++; | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 675 | } | 
|  | 676 |  | 
|  | 677 | static void | 
|  | 678 | account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
|  | 679 | { | 
|  | 680 | update_load_sub(&cfs_rq->load, se->load.weight); | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 681 | if (!parent_entity(se)) | 
|  | 682 | dec_cpu_load(rq_of(cfs_rq), se->load.weight); | 
| Bharata B Rao | b87f172 | 2008-09-25 09:53:54 +0530 | [diff] [blame] | 683 | if (entity_is_task(se)) { | 
| Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 684 | add_cfs_task_weight(cfs_rq, -se->load.weight); | 
| Bharata B Rao | b87f172 | 2008-09-25 09:53:54 +0530 | [diff] [blame] | 685 | list_del_init(&se->group_node); | 
|  | 686 | } | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 687 | cfs_rq->nr_running--; | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 688 | } | 
|  | 689 |  | 
| Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 690 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
|  | 691 | # ifdef CONFIG_SMP | 
| Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 692 | static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq, | 
|  | 693 | int global_update) | 
|  | 694 | { | 
|  | 695 | struct task_group *tg = cfs_rq->tg; | 
|  | 696 | long load_avg; | 
|  | 697 |  | 
|  | 698 | load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1); | 
|  | 699 | load_avg -= cfs_rq->load_contribution; | 
|  | 700 |  | 
|  | 701 | if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) { | 
|  | 702 | atomic_add(load_avg, &tg->load_weight); | 
|  | 703 | cfs_rq->load_contribution += load_avg; | 
|  | 704 | } | 
|  | 705 | } | 
|  | 706 |  | 
|  | 707 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 708 | { | 
| Paul Turner | a7a4f8a | 2010-11-15 15:47:06 -0800 | [diff] [blame] | 709 | u64 period = sysctl_sched_shares_window; | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 710 | u64 now, delta; | 
| Paul Turner | e33078b | 2010-11-15 15:47:04 -0800 | [diff] [blame] | 711 | unsigned long load = cfs_rq->load.weight; | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 712 |  | 
| Paul Turner | b815f19 | 2011-01-21 20:45:00 -0800 | [diff] [blame] | 713 | if (cfs_rq->tg == &root_task_group) | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 714 | return; | 
|  | 715 |  | 
| Paul Turner | 05ca62c | 2011-01-21 20:45:02 -0800 | [diff] [blame] | 716 | now = rq_of(cfs_rq)->clock_task; | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 717 | delta = now - cfs_rq->load_stamp; | 
|  | 718 |  | 
| Paul Turner | e33078b | 2010-11-15 15:47:04 -0800 | [diff] [blame] | 719 | /* truncate load history at 4 idle periods */ | 
|  | 720 | if (cfs_rq->load_stamp > cfs_rq->load_last && | 
|  | 721 | now - cfs_rq->load_last > 4 * period) { | 
|  | 722 | cfs_rq->load_period = 0; | 
|  | 723 | cfs_rq->load_avg = 0; | 
| Paul Turner | f07333b | 2011-01-21 20:45:03 -0800 | [diff] [blame] | 724 | delta = period - 1; | 
| Paul Turner | e33078b | 2010-11-15 15:47:04 -0800 | [diff] [blame] | 725 | } | 
|  | 726 |  | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 727 | cfs_rq->load_stamp = now; | 
| Paul Turner | 3b3d190 | 2010-11-15 15:47:08 -0800 | [diff] [blame] | 728 | cfs_rq->load_unacc_exec_time = 0; | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 729 | cfs_rq->load_period += delta; | 
| Paul Turner | e33078b | 2010-11-15 15:47:04 -0800 | [diff] [blame] | 730 | if (load) { | 
|  | 731 | cfs_rq->load_last = now; | 
|  | 732 | cfs_rq->load_avg += delta * load; | 
|  | 733 | } | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 734 |  | 
| Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 735 | /* consider updating load contribution on each fold or truncate */ | 
|  | 736 | if (global_update || cfs_rq->load_period > period | 
|  | 737 | || !cfs_rq->load_period) | 
|  | 738 | update_cfs_rq_load_contribution(cfs_rq, global_update); | 
|  | 739 |  | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 740 | while (cfs_rq->load_period > period) { | 
|  | 741 | /* | 
|  | 742 | * Inline assembly required to prevent the compiler | 
|  | 743 | * optimising this loop into a divmod call. | 
|  | 744 | * See __iter_div_u64_rem() for another example of this. | 
|  | 745 | */ | 
|  | 746 | asm("" : "+rm" (cfs_rq->load_period)); | 
|  | 747 | cfs_rq->load_period /= 2; | 
|  | 748 | cfs_rq->load_avg /= 2; | 
|  | 749 | } | 
| Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 750 |  | 
| Paul Turner | e33078b | 2010-11-15 15:47:04 -0800 | [diff] [blame] | 751 | if (!cfs_rq->curr && !cfs_rq->nr_running && !cfs_rq->load_avg) | 
|  | 752 | list_del_leaf_cfs_rq(cfs_rq); | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 753 | } | 
|  | 754 |  | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 755 | static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) | 
| Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 756 | { | 
|  | 757 | long load_weight, load, shares; | 
|  | 758 |  | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 759 | load = cfs_rq->load.weight; | 
| Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 760 |  | 
|  | 761 | load_weight = atomic_read(&tg->load_weight); | 
| Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 762 | load_weight += load; | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 763 | load_weight -= cfs_rq->load_contribution; | 
| Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 764 |  | 
|  | 765 | shares = (tg->shares * load); | 
|  | 766 | if (load_weight) | 
|  | 767 | shares /= load_weight; | 
|  | 768 |  | 
|  | 769 | if (shares < MIN_SHARES) | 
|  | 770 | shares = MIN_SHARES; | 
|  | 771 | if (shares > tg->shares) | 
|  | 772 | shares = tg->shares; | 
|  | 773 |  | 
|  | 774 | return shares; | 
|  | 775 | } | 
|  | 776 |  | 
|  | 777 | static void update_entity_shares_tick(struct cfs_rq *cfs_rq) | 
|  | 778 | { | 
|  | 779 | if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { | 
|  | 780 | update_cfs_load(cfs_rq, 0); | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 781 | update_cfs_shares(cfs_rq); | 
| Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 782 | } | 
|  | 783 | } | 
|  | 784 | # else /* CONFIG_SMP */ | 
|  | 785 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | 
|  | 786 | { | 
|  | 787 | } | 
|  | 788 |  | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 789 | static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) | 
| Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 790 | { | 
|  | 791 | return tg->shares; | 
|  | 792 | } | 
|  | 793 |  | 
|  | 794 | static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq) | 
|  | 795 | { | 
|  | 796 | } | 
|  | 797 | # endif /* CONFIG_SMP */ | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 798 | static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, | 
|  | 799 | unsigned long weight) | 
|  | 800 | { | 
| Paul Turner | 19e5eeb | 2010-12-15 19:10:18 -0800 | [diff] [blame] | 801 | if (se->on_rq) { | 
|  | 802 | /* commit outstanding execution time */ | 
|  | 803 | if (cfs_rq->curr == se) | 
|  | 804 | update_curr(cfs_rq); | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 805 | account_entity_dequeue(cfs_rq, se); | 
| Paul Turner | 19e5eeb | 2010-12-15 19:10:18 -0800 | [diff] [blame] | 806 | } | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 807 |  | 
|  | 808 | update_load_set(&se->load, weight); | 
|  | 809 |  | 
|  | 810 | if (se->on_rq) | 
|  | 811 | account_entity_enqueue(cfs_rq, se); | 
|  | 812 | } | 
|  | 813 |  | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 814 | static void update_cfs_shares(struct cfs_rq *cfs_rq) | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 815 | { | 
|  | 816 | struct task_group *tg; | 
|  | 817 | struct sched_entity *se; | 
| Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 818 | long shares; | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 819 |  | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 820 | tg = cfs_rq->tg; | 
|  | 821 | se = tg->se[cpu_of(rq_of(cfs_rq))]; | 
|  | 822 | if (!se) | 
|  | 823 | return; | 
| Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 824 | #ifndef CONFIG_SMP | 
|  | 825 | if (likely(se->load.weight == tg->shares)) | 
|  | 826 | return; | 
|  | 827 | #endif | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 828 | shares = calc_cfs_shares(cfs_rq, tg); | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 829 |  | 
|  | 830 | reweight_entity(cfs_rq_of(se), se, shares); | 
|  | 831 | } | 
|  | 832 | #else /* CONFIG_FAIR_GROUP_SCHED */ | 
| Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 833 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 834 | { | 
|  | 835 | } | 
|  | 836 |  | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 837 | static inline void update_cfs_shares(struct cfs_rq *cfs_rq) | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 838 | { | 
|  | 839 | } | 
| Paul Turner | 43365bd | 2010-12-15 19:10:17 -0800 | [diff] [blame] | 840 |  | 
|  | 841 | static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq) | 
|  | 842 | { | 
|  | 843 | } | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 844 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 
|  | 845 |  | 
| Ingo Molnar | 2396af6 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 846 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 847 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 848 | #ifdef CONFIG_SCHEDSTATS | 
| Peter Zijlstra | e414314 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 849 | struct task_struct *tsk = NULL; | 
|  | 850 |  | 
|  | 851 | if (entity_is_task(se)) | 
|  | 852 | tsk = task_of(se); | 
|  | 853 |  | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 854 | if (se->statistics.sleep_start) { | 
|  | 855 | u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 856 |  | 
|  | 857 | if ((s64)delta < 0) | 
|  | 858 | delta = 0; | 
|  | 859 |  | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 860 | if (unlikely(delta > se->statistics.sleep_max)) | 
|  | 861 | se->statistics.sleep_max = delta; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 862 |  | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 863 | se->statistics.sleep_start = 0; | 
|  | 864 | se->statistics.sum_sleep_runtime += delta; | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 865 |  | 
| Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 866 | if (tsk) { | 
| Peter Zijlstra | e414314 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 867 | account_scheduler_latency(tsk, delta >> 10, 1); | 
| Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 868 | trace_sched_stat_sleep(tsk, delta); | 
|  | 869 | } | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 870 | } | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 871 | if (se->statistics.block_start) { | 
|  | 872 | u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 873 |  | 
|  | 874 | if ((s64)delta < 0) | 
|  | 875 | delta = 0; | 
|  | 876 |  | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 877 | if (unlikely(delta > se->statistics.block_max)) | 
|  | 878 | se->statistics.block_max = delta; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 879 |  | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 880 | se->statistics.block_start = 0; | 
|  | 881 | se->statistics.sum_sleep_runtime += delta; | 
| Ingo Molnar | 30084fb | 2007-10-02 14:13:08 +0200 | [diff] [blame] | 882 |  | 
| Peter Zijlstra | e414314 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 883 | if (tsk) { | 
| Arjan van de Ven | 8f0dfc3 | 2009-07-20 11:26:58 -0700 | [diff] [blame] | 884 | if (tsk->in_iowait) { | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 885 | se->statistics.iowait_sum += delta; | 
|  | 886 | se->statistics.iowait_count++; | 
| Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 887 | trace_sched_stat_iowait(tsk, delta); | 
| Arjan van de Ven | 8f0dfc3 | 2009-07-20 11:26:58 -0700 | [diff] [blame] | 888 | } | 
|  | 889 |  | 
| Peter Zijlstra | e414314 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 890 | /* | 
|  | 891 | * Blocking time is in units of nanosecs, so shift by | 
|  | 892 | * 20 to get a milliseconds-range estimation of the | 
|  | 893 | * amount of time that the task spent sleeping: | 
|  | 894 | */ | 
|  | 895 | if (unlikely(prof_on == SLEEP_PROFILING)) { | 
|  | 896 | profile_hits(SLEEP_PROFILING, | 
|  | 897 | (void *)get_wchan(tsk), | 
|  | 898 | delta >> 20); | 
|  | 899 | } | 
|  | 900 | account_scheduler_latency(tsk, delta >> 10, 0); | 
| Ingo Molnar | 30084fb | 2007-10-02 14:13:08 +0200 | [diff] [blame] | 901 | } | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 902 | } | 
|  | 903 | #endif | 
|  | 904 | } | 
|  | 905 |  | 
| Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 906 | static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
|  | 907 | { | 
|  | 908 | #ifdef CONFIG_SCHED_DEBUG | 
|  | 909 | s64 d = se->vruntime - cfs_rq->min_vruntime; | 
|  | 910 |  | 
|  | 911 | if (d < 0) | 
|  | 912 | d = -d; | 
|  | 913 |  | 
|  | 914 | if (d > 3*sysctl_sched_latency) | 
|  | 915 | schedstat_inc(cfs_rq, nr_spread_over); | 
|  | 916 | #endif | 
|  | 917 | } | 
|  | 918 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 919 | static void | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 920 | place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | 
|  | 921 | { | 
| Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 922 | u64 vruntime = cfs_rq->min_vruntime; | 
| Peter Zijlstra | 94dfb5e | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 923 |  | 
| Peter Zijlstra | 2cb8600 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 924 | /* | 
|  | 925 | * The 'current' period is already promised to the current tasks, | 
|  | 926 | * however the extra weight of the new task will slow them down a | 
|  | 927 | * little, place the new task so that it fits in the slot that | 
|  | 928 | * stays open at the end. | 
|  | 929 | */ | 
| Peter Zijlstra | 94dfb5e | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 930 | if (initial && sched_feat(START_DEBIT)) | 
| Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 931 | vruntime += sched_vslice(cfs_rq, se); | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 932 |  | 
| Mike Galbraith | a2e7a7e | 2009-09-18 09:19:25 +0200 | [diff] [blame] | 933 | /* sleeps up to a single latency don't count. */ | 
| Mike Galbraith | 5ca9880 | 2010-03-11 17:17:17 +0100 | [diff] [blame] | 934 | if (!initial) { | 
| Mike Galbraith | a2e7a7e | 2009-09-18 09:19:25 +0200 | [diff] [blame] | 935 | unsigned long thresh = sysctl_sched_latency; | 
| Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 936 |  | 
| Mike Galbraith | a2e7a7e | 2009-09-18 09:19:25 +0200 | [diff] [blame] | 937 | /* | 
| Mike Galbraith | a2e7a7e | 2009-09-18 09:19:25 +0200 | [diff] [blame] | 938 | * Halve their sleep time's effect, to allow | 
|  | 939 | * for a gentler effect of sleepers: | 
|  | 940 | */ | 
|  | 941 | if (sched_feat(GENTLE_FAIR_SLEEPERS)) | 
|  | 942 | thresh >>= 1; | 
| Ingo Molnar | 51e0304 | 2009-09-16 08:54:45 +0200 | [diff] [blame] | 943 |  | 
| Mike Galbraith | a2e7a7e | 2009-09-18 09:19:25 +0200 | [diff] [blame] | 944 | vruntime -= thresh; | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 945 | } | 
|  | 946 |  | 
| Mike Galbraith | b5d9d73 | 2009-09-08 11:12:28 +0200 | [diff] [blame] | 947 | /* ensure we never gain time by being placed backwards. */ | 
|  | 948 | vruntime = max_vruntime(se->vruntime, vruntime); | 
|  | 949 |  | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 950 | se->vruntime = vruntime; | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 951 | } | 
|  | 952 |  | 
|  | 953 | static void | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 954 | enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 955 | { | 
|  | 956 | /* | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 957 | * Update the normalized vruntime before updating min_vruntime | 
|  | 958 | * through callig update_curr(). | 
|  | 959 | */ | 
| Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 960 | if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING)) | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 961 | se->vruntime += cfs_rq->min_vruntime; | 
|  | 962 |  | 
|  | 963 | /* | 
| Dmitry Adamushko | a2a2d68 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 964 | * Update run-time statistics of the 'current'. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 965 | */ | 
| Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 966 | update_curr(cfs_rq); | 
| Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 967 | update_cfs_load(cfs_rq, 0); | 
| Peter Zijlstra | a992241 | 2008-05-05 23:56:17 +0200 | [diff] [blame] | 968 | account_entity_enqueue(cfs_rq, se); | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 969 | update_cfs_shares(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 970 |  | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 971 | if (flags & ENQUEUE_WAKEUP) { | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 972 | place_entity(cfs_rq, se, 0); | 
| Ingo Molnar | 2396af6 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 973 | enqueue_sleeper(cfs_rq, se); | 
| Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 974 | } | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 975 |  | 
| Ingo Molnar | d2417e5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 976 | update_stats_enqueue(cfs_rq, se); | 
| Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 977 | check_spread(cfs_rq, se); | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 978 | if (se != cfs_rq->curr) | 
|  | 979 | __enqueue_entity(cfs_rq, se); | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 980 | se->on_rq = 1; | 
| Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 981 |  | 
|  | 982 | if (cfs_rq->nr_running == 1) | 
|  | 983 | list_add_leaf_cfs_rq(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 984 | } | 
|  | 985 |  | 
| Rik van Riel | 2c13c919 | 2011-02-01 09:48:37 -0500 | [diff] [blame] | 986 | static void __clear_buddies_last(struct sched_entity *se) | 
| Peter Zijlstra | 2002c69 | 2008-11-11 11:52:33 +0100 | [diff] [blame] | 987 | { | 
| Rik van Riel | 2c13c919 | 2011-02-01 09:48:37 -0500 | [diff] [blame] | 988 | for_each_sched_entity(se) { | 
|  | 989 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 
|  | 990 | if (cfs_rq->last == se) | 
|  | 991 | cfs_rq->last = NULL; | 
|  | 992 | else | 
|  | 993 | break; | 
|  | 994 | } | 
|  | 995 | } | 
| Peter Zijlstra | 2002c69 | 2008-11-11 11:52:33 +0100 | [diff] [blame] | 996 |  | 
| Rik van Riel | 2c13c919 | 2011-02-01 09:48:37 -0500 | [diff] [blame] | 997 | static void __clear_buddies_next(struct sched_entity *se) | 
|  | 998 | { | 
|  | 999 | for_each_sched_entity(se) { | 
|  | 1000 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 
|  | 1001 | if (cfs_rq->next == se) | 
|  | 1002 | cfs_rq->next = NULL; | 
|  | 1003 | else | 
|  | 1004 | break; | 
|  | 1005 | } | 
| Peter Zijlstra | 2002c69 | 2008-11-11 11:52:33 +0100 | [diff] [blame] | 1006 | } | 
|  | 1007 |  | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1008 | static void __clear_buddies_skip(struct sched_entity *se) | 
|  | 1009 | { | 
|  | 1010 | for_each_sched_entity(se) { | 
|  | 1011 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 
|  | 1012 | if (cfs_rq->skip == se) | 
|  | 1013 | cfs_rq->skip = NULL; | 
|  | 1014 | else | 
|  | 1015 | break; | 
|  | 1016 | } | 
|  | 1017 | } | 
|  | 1018 |  | 
| Peter Zijlstra | a571bbe | 2009-01-28 14:51:40 +0100 | [diff] [blame] | 1019 | static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
|  | 1020 | { | 
| Rik van Riel | 2c13c919 | 2011-02-01 09:48:37 -0500 | [diff] [blame] | 1021 | if (cfs_rq->last == se) | 
|  | 1022 | __clear_buddies_last(se); | 
|  | 1023 |  | 
|  | 1024 | if (cfs_rq->next == se) | 
|  | 1025 | __clear_buddies_next(se); | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1026 |  | 
|  | 1027 | if (cfs_rq->skip == se) | 
|  | 1028 | __clear_buddies_skip(se); | 
| Peter Zijlstra | a571bbe | 2009-01-28 14:51:40 +0100 | [diff] [blame] | 1029 | } | 
|  | 1030 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1031 | static void | 
| Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1032 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1033 | { | 
| Dmitry Adamushko | a2a2d68 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 1034 | /* | 
|  | 1035 | * Update run-time statistics of the 'current'. | 
|  | 1036 | */ | 
|  | 1037 | update_curr(cfs_rq); | 
|  | 1038 |  | 
| Ingo Molnar | 19b6a2e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1039 | update_stats_dequeue(cfs_rq, se); | 
| Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1040 | if (flags & DEQUEUE_SLEEP) { | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 1041 | #ifdef CONFIG_SCHEDSTATS | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1042 | if (entity_is_task(se)) { | 
|  | 1043 | struct task_struct *tsk = task_of(se); | 
|  | 1044 |  | 
|  | 1045 | if (tsk->state & TASK_INTERRUPTIBLE) | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 1046 | se->statistics.sleep_start = rq_of(cfs_rq)->clock; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1047 | if (tsk->state & TASK_UNINTERRUPTIBLE) | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 1048 | se->statistics.block_start = rq_of(cfs_rq)->clock; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1049 | } | 
| Dmitry Adamushko | db36cc7 | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 1050 | #endif | 
| Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 1051 | } | 
|  | 1052 |  | 
| Peter Zijlstra | 2002c69 | 2008-11-11 11:52:33 +0100 | [diff] [blame] | 1053 | clear_buddies(cfs_rq, se); | 
| Peter Zijlstra | 4793241 | 2008-11-04 21:25:09 +0100 | [diff] [blame] | 1054 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1055 | if (se != cfs_rq->curr) | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 1056 | __dequeue_entity(cfs_rq, se); | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 1057 | se->on_rq = 0; | 
| Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 1058 | update_cfs_load(cfs_rq, 0); | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 1059 | account_entity_dequeue(cfs_rq, se); | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 1060 |  | 
|  | 1061 | /* | 
|  | 1062 | * Normalize the entity after updating the min_vruntime because the | 
|  | 1063 | * update can refer to the ->curr item and we need to reflect this | 
|  | 1064 | * movement in our normalized position. | 
|  | 1065 | */ | 
| Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1066 | if (!(flags & DEQUEUE_SLEEP)) | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 1067 | se->vruntime -= cfs_rq->min_vruntime; | 
| Peter Zijlstra | 1e87623 | 2011-05-17 16:21:10 -0700 | [diff] [blame] | 1068 |  | 
|  | 1069 | update_min_vruntime(cfs_rq); | 
|  | 1070 | update_cfs_shares(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1071 | } | 
|  | 1072 |  | 
|  | 1073 | /* | 
|  | 1074 | * Preempt the current task with a newly woken task if needed: | 
|  | 1075 | */ | 
| Peter Zijlstra | 7c92e54 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1076 | static void | 
| Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1077 | check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1078 | { | 
| Peter Zijlstra | 1169783 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1079 | unsigned long ideal_runtime, delta_exec; | 
|  | 1080 |  | 
| Peter Zijlstra | 6d0f0eb | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1081 | ideal_runtime = sched_slice(cfs_rq, curr); | 
| Peter Zijlstra | 1169783 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1082 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | 
| Mike Galbraith | a9f3e2b | 2009-01-28 14:51:39 +0100 | [diff] [blame] | 1083 | if (delta_exec > ideal_runtime) { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1084 | resched_task(rq_of(cfs_rq)->curr); | 
| Mike Galbraith | a9f3e2b | 2009-01-28 14:51:39 +0100 | [diff] [blame] | 1085 | /* | 
|  | 1086 | * The current task ran long enough, ensure it doesn't get | 
|  | 1087 | * re-elected due to buddy favours. | 
|  | 1088 | */ | 
|  | 1089 | clear_buddies(cfs_rq, curr); | 
| Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 1090 | return; | 
|  | 1091 | } | 
|  | 1092 |  | 
|  | 1093 | /* | 
|  | 1094 | * Ensure that a task that missed wakeup preemption by a | 
|  | 1095 | * narrow margin doesn't have to wait for a full slice. | 
|  | 1096 | * This also mitigates buddy induced latencies under load. | 
|  | 1097 | */ | 
|  | 1098 | if (!sched_feat(WAKEUP_PREEMPT)) | 
|  | 1099 | return; | 
|  | 1100 |  | 
|  | 1101 | if (delta_exec < sysctl_sched_min_granularity) | 
|  | 1102 | return; | 
|  | 1103 |  | 
|  | 1104 | if (cfs_rq->nr_running > 1) { | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1105 | struct sched_entity *se = __pick_first_entity(cfs_rq); | 
| Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 1106 | s64 delta = curr->vruntime - se->vruntime; | 
|  | 1107 |  | 
| Mike Galbraith | d7d82944 | 2011-01-05 05:41:17 +0100 | [diff] [blame] | 1108 | if (delta < 0) | 
|  | 1109 | return; | 
|  | 1110 |  | 
| Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 1111 | if (delta > ideal_runtime) | 
|  | 1112 | resched_task(rq_of(cfs_rq)->curr); | 
| Mike Galbraith | a9f3e2b | 2009-01-28 14:51:39 +0100 | [diff] [blame] | 1113 | } | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1114 | } | 
|  | 1115 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1116 | static void | 
| Ingo Molnar | 8494f41 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1117 | set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1118 | { | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1119 | /* 'current' is not kept within the tree. */ | 
|  | 1120 | if (se->on_rq) { | 
|  | 1121 | /* | 
|  | 1122 | * Any task has to be enqueued before it get to execute on | 
|  | 1123 | * a CPU. So account for the time it spent waiting on the | 
|  | 1124 | * runqueue. | 
|  | 1125 | */ | 
|  | 1126 | update_stats_wait_end(cfs_rq, se); | 
|  | 1127 | __dequeue_entity(cfs_rq, se); | 
|  | 1128 | } | 
|  | 1129 |  | 
| Ingo Molnar | 79303e9 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1130 | update_stats_curr_start(cfs_rq, se); | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 1131 | cfs_rq->curr = se; | 
| Ingo Molnar | eba1ed4 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 1132 | #ifdef CONFIG_SCHEDSTATS | 
|  | 1133 | /* | 
|  | 1134 | * Track our maximum slice length, if the CPU's load is at | 
|  | 1135 | * least twice that of our own weight (i.e. dont track it | 
|  | 1136 | * when there are only lesser-weight tasks around): | 
|  | 1137 | */ | 
| Dmitry Adamushko | 495eca4 | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 1138 | if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 1139 | se->statistics.slice_max = max(se->statistics.slice_max, | 
| Ingo Molnar | eba1ed4 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 1140 | se->sum_exec_runtime - se->prev_sum_exec_runtime); | 
|  | 1141 | } | 
|  | 1142 | #endif | 
| Peter Zijlstra | 4a55b45 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1143 | se->prev_sum_exec_runtime = se->sum_exec_runtime; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1144 | } | 
|  | 1145 |  | 
| Peter Zijlstra | 3f3a490 | 2008-10-24 11:06:16 +0200 | [diff] [blame] | 1146 | static int | 
|  | 1147 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); | 
|  | 1148 |  | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1149 | /* | 
|  | 1150 | * Pick the next process, keeping these things in mind, in this order: | 
|  | 1151 | * 1) keep things fair between processes/task groups | 
|  | 1152 | * 2) pick the "next" process, since someone really wants that to run | 
|  | 1153 | * 3) pick the "last" process, for cache locality | 
|  | 1154 | * 4) do not run the "skip" process, if something else is available | 
|  | 1155 | */ | 
| Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 1156 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 1157 | { | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1158 | struct sched_entity *se = __pick_first_entity(cfs_rq); | 
| Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 1159 | struct sched_entity *left = se; | 
| Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 1160 |  | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1161 | /* | 
|  | 1162 | * Avoid running the skip buddy, if running something else can | 
|  | 1163 | * be done without getting too unfair. | 
|  | 1164 | */ | 
|  | 1165 | if (cfs_rq->skip == se) { | 
|  | 1166 | struct sched_entity *second = __pick_next_entity(se); | 
|  | 1167 | if (second && wakeup_preempt_entity(second, left) < 1) | 
|  | 1168 | se = second; | 
|  | 1169 | } | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 1170 |  | 
| Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 1171 | /* | 
|  | 1172 | * Prefer last buddy, try to return the CPU to a preempted task. | 
|  | 1173 | */ | 
|  | 1174 | if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) | 
|  | 1175 | se = cfs_rq->last; | 
|  | 1176 |  | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1177 | /* | 
|  | 1178 | * Someone really wants this to run. If it's not unfair, run it. | 
|  | 1179 | */ | 
|  | 1180 | if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) | 
|  | 1181 | se = cfs_rq->next; | 
|  | 1182 |  | 
| Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 1183 | clear_buddies(cfs_rq, se); | 
| Peter Zijlstra | 4793241 | 2008-11-04 21:25:09 +0100 | [diff] [blame] | 1184 |  | 
|  | 1185 | return se; | 
| Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 1186 | } | 
|  | 1187 |  | 
| Ingo Molnar | ab6cde2 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1188 | static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1189 | { | 
|  | 1190 | /* | 
|  | 1191 | * If still on the runqueue then deactivate_task() | 
|  | 1192 | * was not called and update_curr() has to be done: | 
|  | 1193 | */ | 
|  | 1194 | if (prev->on_rq) | 
| Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1195 | update_curr(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1196 |  | 
| Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 1197 | check_spread(cfs_rq, prev); | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 1198 | if (prev->on_rq) { | 
| Ingo Molnar | 5870db5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1199 | update_stats_wait_start(cfs_rq, prev); | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 1200 | /* Put 'current' back into the tree. */ | 
|  | 1201 | __enqueue_entity(cfs_rq, prev); | 
|  | 1202 | } | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 1203 | cfs_rq->curr = NULL; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1204 | } | 
|  | 1205 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1206 | static void | 
|  | 1207 | entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1208 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1209 | /* | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 1210 | * Update run-time statistics of the 'current'. | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1211 | */ | 
| Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 1212 | update_curr(cfs_rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1213 |  | 
| Paul Turner | 43365bd | 2010-12-15 19:10:17 -0800 | [diff] [blame] | 1214 | /* | 
|  | 1215 | * Update share accounting for long-running entities. | 
|  | 1216 | */ | 
|  | 1217 | update_entity_shares_tick(cfs_rq); | 
|  | 1218 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1219 | #ifdef CONFIG_SCHED_HRTICK | 
|  | 1220 | /* | 
|  | 1221 | * queued ticks are scheduled to match the slice, so don't bother | 
|  | 1222 | * validating it and just reschedule. | 
|  | 1223 | */ | 
| Harvey Harrison | 983ed7a | 2008-04-24 18:17:55 -0700 | [diff] [blame] | 1224 | if (queued) { | 
|  | 1225 | resched_task(rq_of(cfs_rq)->curr); | 
|  | 1226 | return; | 
|  | 1227 | } | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1228 | /* | 
|  | 1229 | * don't let the period tick interfere with the hrtick preemption | 
|  | 1230 | */ | 
|  | 1231 | if (!sched_feat(DOUBLE_TICK) && | 
|  | 1232 | hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) | 
|  | 1233 | return; | 
|  | 1234 | #endif | 
|  | 1235 |  | 
| Peter Zijlstra | ce6c131 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 1236 | if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT)) | 
| Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1237 | check_preempt_tick(cfs_rq, curr); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1238 | } | 
|  | 1239 |  | 
|  | 1240 | /************************************************** | 
|  | 1241 | * CFS operations on tasks: | 
|  | 1242 | */ | 
|  | 1243 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1244 | #ifdef CONFIG_SCHED_HRTICK | 
|  | 1245 | static void hrtick_start_fair(struct rq *rq, struct task_struct *p) | 
|  | 1246 | { | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1247 | struct sched_entity *se = &p->se; | 
|  | 1248 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 
|  | 1249 |  | 
|  | 1250 | WARN_ON(task_rq(p) != rq); | 
|  | 1251 |  | 
|  | 1252 | if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) { | 
|  | 1253 | u64 slice = sched_slice(cfs_rq, se); | 
|  | 1254 | u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; | 
|  | 1255 | s64 delta = slice - ran; | 
|  | 1256 |  | 
|  | 1257 | if (delta < 0) { | 
|  | 1258 | if (rq->curr == p) | 
|  | 1259 | resched_task(p); | 
|  | 1260 | return; | 
|  | 1261 | } | 
|  | 1262 |  | 
|  | 1263 | /* | 
|  | 1264 | * Don't schedule slices shorter than 10000ns, that just | 
|  | 1265 | * doesn't make sense. Rely on vruntime for fairness. | 
|  | 1266 | */ | 
| Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1267 | if (rq->curr != p) | 
| Peter Zijlstra | 157124c | 2008-07-28 11:53:11 +0200 | [diff] [blame] | 1268 | delta = max_t(s64, 10000LL, delta); | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1269 |  | 
| Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1270 | hrtick_start(rq, delta); | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1271 | } | 
|  | 1272 | } | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 1273 |  | 
|  | 1274 | /* | 
|  | 1275 | * called from enqueue/dequeue and updates the hrtick when the | 
|  | 1276 | * current task is from our class and nr_running is low enough | 
|  | 1277 | * to matter. | 
|  | 1278 | */ | 
|  | 1279 | static void hrtick_update(struct rq *rq) | 
|  | 1280 | { | 
|  | 1281 | struct task_struct *curr = rq->curr; | 
|  | 1282 |  | 
|  | 1283 | if (curr->sched_class != &fair_sched_class) | 
|  | 1284 | return; | 
|  | 1285 |  | 
|  | 1286 | if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) | 
|  | 1287 | hrtick_start_fair(rq, curr); | 
|  | 1288 | } | 
| Dhaval Giani | 55e12e5 | 2008-06-24 23:39:43 +0530 | [diff] [blame] | 1289 | #else /* !CONFIG_SCHED_HRTICK */ | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1290 | static inline void | 
|  | 1291 | hrtick_start_fair(struct rq *rq, struct task_struct *p) | 
|  | 1292 | { | 
|  | 1293 | } | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 1294 |  | 
|  | 1295 | static inline void hrtick_update(struct rq *rq) | 
|  | 1296 | { | 
|  | 1297 | } | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1298 | #endif | 
|  | 1299 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1300 | /* | 
|  | 1301 | * The enqueue_task method is called before nr_running is | 
|  | 1302 | * increased. Here we update the fair scheduling stats and | 
|  | 1303 | * then put the task into the rbtree: | 
|  | 1304 | */ | 
| Thomas Gleixner | ea87bb7 | 2010-01-20 20:58:57 +0000 | [diff] [blame] | 1305 | static void | 
| Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1306 | enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1307 | { | 
|  | 1308 | struct cfs_rq *cfs_rq; | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 1309 | struct sched_entity *se = &p->se; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1310 |  | 
|  | 1311 | for_each_sched_entity(se) { | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 1312 | if (se->on_rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1313 | break; | 
|  | 1314 | cfs_rq = cfs_rq_of(se); | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 1315 | enqueue_entity(cfs_rq, se, flags); | 
|  | 1316 | flags = ENQUEUE_WAKEUP; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1317 | } | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1318 |  | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 1319 | for_each_sched_entity(se) { | 
| Lin Ming | 0f31714 | 2011-07-22 09:14:31 +0800 | [diff] [blame] | 1320 | cfs_rq = cfs_rq_of(se); | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 1321 |  | 
| Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 1322 | update_cfs_load(cfs_rq, 0); | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 1323 | update_cfs_shares(cfs_rq); | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 1324 | } | 
|  | 1325 |  | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 1326 | hrtick_update(rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1327 | } | 
|  | 1328 |  | 
| Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 1329 | static void set_next_buddy(struct sched_entity *se); | 
|  | 1330 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1331 | /* | 
|  | 1332 | * The dequeue_task method is called before nr_running is | 
|  | 1333 | * decreased. We remove the task from the rbtree and | 
|  | 1334 | * update the fair scheduling stats: | 
|  | 1335 | */ | 
| Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1336 | static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1337 | { | 
|  | 1338 | struct cfs_rq *cfs_rq; | 
| Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 1339 | struct sched_entity *se = &p->se; | 
| Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 1340 | int task_sleep = flags & DEQUEUE_SLEEP; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1341 |  | 
|  | 1342 | for_each_sched_entity(se) { | 
|  | 1343 | cfs_rq = cfs_rq_of(se); | 
| Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1344 | dequeue_entity(cfs_rq, se, flags); | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 1345 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1346 | /* Don't dequeue parent if it has other entities besides us */ | 
| Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 1347 | if (cfs_rq->load.weight) { | 
|  | 1348 | /* | 
|  | 1349 | * Bias pick_next to pick a task from this cfs_rq, as | 
|  | 1350 | * p is sleeping when it is within its sched_slice. | 
|  | 1351 | */ | 
|  | 1352 | if (task_sleep && parent_entity(se)) | 
|  | 1353 | set_next_buddy(parent_entity(se)); | 
| Paul Turner | 9598c82 | 2011-07-06 22:30:37 -0700 | [diff] [blame] | 1354 |  | 
|  | 1355 | /* avoid re-evaluating load for this entity */ | 
|  | 1356 | se = parent_entity(se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1357 | break; | 
| Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 1358 | } | 
| Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1359 | flags |= DEQUEUE_SLEEP; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1360 | } | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1361 |  | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 1362 | for_each_sched_entity(se) { | 
| Lin Ming | 0f31714 | 2011-07-22 09:14:31 +0800 | [diff] [blame] | 1363 | cfs_rq = cfs_rq_of(se); | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 1364 |  | 
| Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 1365 | update_cfs_load(cfs_rq, 0); | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 1366 | update_cfs_shares(cfs_rq); | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 1367 | } | 
|  | 1368 |  | 
| Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 1369 | hrtick_update(rq); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1370 | } | 
|  | 1371 |  | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1372 | #ifdef CONFIG_SMP | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1373 |  | 
| Peter Zijlstra | 74f8e4b | 2011-04-05 17:23:47 +0200 | [diff] [blame] | 1374 | static void task_waking_fair(struct task_struct *p) | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 1375 | { | 
|  | 1376 | struct sched_entity *se = &p->se; | 
|  | 1377 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 
| Peter Zijlstra | 3fe1698 | 2011-04-05 17:23:48 +0200 | [diff] [blame] | 1378 | u64 min_vruntime; | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 1379 |  | 
| Peter Zijlstra | 3fe1698 | 2011-04-05 17:23:48 +0200 | [diff] [blame] | 1380 | #ifndef CONFIG_64BIT | 
|  | 1381 | u64 min_vruntime_copy; | 
| Peter Zijlstra | 74f8e4b | 2011-04-05 17:23:47 +0200 | [diff] [blame] | 1382 |  | 
| Peter Zijlstra | 3fe1698 | 2011-04-05 17:23:48 +0200 | [diff] [blame] | 1383 | do { | 
|  | 1384 | min_vruntime_copy = cfs_rq->min_vruntime_copy; | 
|  | 1385 | smp_rmb(); | 
|  | 1386 | min_vruntime = cfs_rq->min_vruntime; | 
|  | 1387 | } while (min_vruntime != min_vruntime_copy); | 
|  | 1388 | #else | 
|  | 1389 | min_vruntime = cfs_rq->min_vruntime; | 
|  | 1390 | #endif | 
|  | 1391 |  | 
|  | 1392 | se->vruntime -= min_vruntime; | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 1393 | } | 
|  | 1394 |  | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1395 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| Peter Zijlstra | f5bfb7d | 2008-06-27 13:41:39 +0200 | [diff] [blame] | 1396 | /* | 
|  | 1397 | * effective_load() calculates the load change as seen from the root_task_group | 
|  | 1398 | * | 
|  | 1399 | * Adding load to a group doesn't make a group heavier, but can cause movement | 
|  | 1400 | * of group shares between cpus. Assuming the shares were perfectly aligned one | 
|  | 1401 | * can calculate the shift in shares. | 
| Peter Zijlstra | f5bfb7d | 2008-06-27 13:41:39 +0200 | [diff] [blame] | 1402 | */ | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 1403 | static long effective_load(struct task_group *tg, int cpu, long wl, long wg) | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1404 | { | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1405 | struct sched_entity *se = tg->se[cpu]; | 
| Peter Zijlstra | f1d239f | 2008-06-27 13:41:38 +0200 | [diff] [blame] | 1406 |  | 
|  | 1407 | if (!tg->parent) | 
|  | 1408 | return wl; | 
|  | 1409 |  | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1410 | for_each_sched_entity(se) { | 
| Paul Turner | 977dda7 | 2011-01-14 17:57:50 -0800 | [diff] [blame] | 1411 | long lw, w; | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1412 |  | 
| Paul Turner | 977dda7 | 2011-01-14 17:57:50 -0800 | [diff] [blame] | 1413 | tg = se->my_q->tg; | 
|  | 1414 | w = se->my_q->load.weight; | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1415 |  | 
| Paul Turner | 977dda7 | 2011-01-14 17:57:50 -0800 | [diff] [blame] | 1416 | /* use this cpu's instantaneous contribution */ | 
|  | 1417 | lw = atomic_read(&tg->load_weight); | 
|  | 1418 | lw -= se->my_q->load_contribution; | 
|  | 1419 | lw += w + wg; | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1420 |  | 
| Paul Turner | 977dda7 | 2011-01-14 17:57:50 -0800 | [diff] [blame] | 1421 | wl += w; | 
| Peter Zijlstra | 940959e | 2008-09-23 15:33:42 +0200 | [diff] [blame] | 1422 |  | 
| Paul Turner | 977dda7 | 2011-01-14 17:57:50 -0800 | [diff] [blame] | 1423 | if (lw > 0 && wl < lw) | 
|  | 1424 | wl = (wl * tg->shares) / lw; | 
|  | 1425 | else | 
|  | 1426 | wl = tg->shares; | 
| Peter Zijlstra | 940959e | 2008-09-23 15:33:42 +0200 | [diff] [blame] | 1427 |  | 
| Paul Turner | 977dda7 | 2011-01-14 17:57:50 -0800 | [diff] [blame] | 1428 | /* zero point is MIN_SHARES */ | 
|  | 1429 | if (wl < MIN_SHARES) | 
|  | 1430 | wl = MIN_SHARES; | 
|  | 1431 | wl -= se->load.weight; | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1432 | wg = 0; | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1433 | } | 
|  | 1434 |  | 
|  | 1435 | return wl; | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1436 | } | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1437 |  | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1438 | #else | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1439 |  | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1440 | static inline unsigned long effective_load(struct task_group *tg, int cpu, | 
|  | 1441 | unsigned long wl, unsigned long wg) | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1442 | { | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1443 | return wl; | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1444 | } | 
| Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 1445 |  | 
| Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 1446 | #endif | 
|  | 1447 |  | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1448 | static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1449 | { | 
| Paul Turner | e37b6a7 | 2011-01-21 20:44:59 -0800 | [diff] [blame] | 1450 | s64 this_load, load; | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1451 | int idx, this_cpu, prev_cpu; | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1452 | unsigned long tl_per_task; | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1453 | struct task_group *tg; | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1454 | unsigned long weight; | 
| Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 1455 | int balanced; | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1456 |  | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1457 | idx	  = sd->wake_idx; | 
|  | 1458 | this_cpu  = smp_processor_id(); | 
|  | 1459 | prev_cpu  = task_cpu(p); | 
|  | 1460 | load	  = source_load(prev_cpu, idx); | 
|  | 1461 | this_load = target_load(this_cpu, idx); | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1462 |  | 
|  | 1463 | /* | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1464 | * If sync wakeup then subtract the (maximum possible) | 
|  | 1465 | * effect of the currently running task from the load | 
|  | 1466 | * of the current CPU: | 
|  | 1467 | */ | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1468 | if (sync) { | 
|  | 1469 | tg = task_group(current); | 
|  | 1470 | weight = current->se.load.weight; | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1471 |  | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1472 | this_load += effective_load(tg, this_cpu, -weight, -weight); | 
| Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 1473 | load += effective_load(tg, prev_cpu, 0, -weight); | 
|  | 1474 | } | 
|  | 1475 |  | 
|  | 1476 | tg = task_group(p); | 
|  | 1477 | weight = p->se.load.weight; | 
|  | 1478 |  | 
| Peter Zijlstra | 71a29aa | 2009-09-07 18:28:05 +0200 | [diff] [blame] | 1479 | /* | 
|  | 1480 | * In low-load situations, where prev_cpu is idle and this_cpu is idle | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1481 | * due to the sync cause above having dropped this_load to 0, we'll | 
|  | 1482 | * always have an imbalance, but there's really nothing you can do | 
|  | 1483 | * about that, so that's good too. | 
| Peter Zijlstra | 71a29aa | 2009-09-07 18:28:05 +0200 | [diff] [blame] | 1484 | * | 
|  | 1485 | * Otherwise check if either cpus are near enough in load to allow this | 
|  | 1486 | * task to be woken on this_cpu. | 
|  | 1487 | */ | 
| Paul Turner | e37b6a7 | 2011-01-21 20:44:59 -0800 | [diff] [blame] | 1488 | if (this_load > 0) { | 
|  | 1489 | s64 this_eff_load, prev_eff_load; | 
| Peter Zijlstra | e51fd5e | 2010-05-31 12:37:30 +0200 | [diff] [blame] | 1490 |  | 
|  | 1491 | this_eff_load = 100; | 
|  | 1492 | this_eff_load *= power_of(prev_cpu); | 
|  | 1493 | this_eff_load *= this_load + | 
|  | 1494 | effective_load(tg, this_cpu, weight, weight); | 
|  | 1495 |  | 
|  | 1496 | prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2; | 
|  | 1497 | prev_eff_load *= power_of(this_cpu); | 
|  | 1498 | prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight); | 
|  | 1499 |  | 
|  | 1500 | balanced = this_eff_load <= prev_eff_load; | 
|  | 1501 | } else | 
|  | 1502 | balanced = true; | 
| Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 1503 |  | 
|  | 1504 | /* | 
|  | 1505 | * If the currently running task will sleep within | 
|  | 1506 | * a reasonable amount of time then attract this newly | 
|  | 1507 | * woken task: | 
|  | 1508 | */ | 
| Peter Zijlstra | 2fb7635 | 2008-10-08 09:16:04 +0200 | [diff] [blame] | 1509 | if (sync && balanced) | 
|  | 1510 | return 1; | 
| Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 1511 |  | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 1512 | schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts); | 
| Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 1513 | tl_per_task = cpu_avg_load_per_task(this_cpu); | 
|  | 1514 |  | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1515 | if (balanced || | 
|  | 1516 | (this_load <= load && | 
|  | 1517 | this_load + target_load(prev_cpu, idx) <= tl_per_task)) { | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1518 | /* | 
|  | 1519 | * This domain has SD_WAKE_AFFINE and | 
|  | 1520 | * p is cache cold in this domain, and | 
|  | 1521 | * there is no bad imbalance. | 
|  | 1522 | */ | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1523 | schedstat_inc(sd, ttwu_move_affine); | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 1524 | schedstat_inc(p, se.statistics.nr_wakeups_affine); | 
| Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 1525 |  | 
|  | 1526 | return 1; | 
|  | 1527 | } | 
|  | 1528 | return 0; | 
|  | 1529 | } | 
|  | 1530 |  | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1531 | /* | 
|  | 1532 | * find_idlest_group finds and returns the least busy CPU group within the | 
|  | 1533 | * domain. | 
|  | 1534 | */ | 
|  | 1535 | static struct sched_group * | 
| Peter Zijlstra | 78e7ed5 | 2009-09-03 13:16:51 +0200 | [diff] [blame] | 1536 | find_idlest_group(struct sched_domain *sd, struct task_struct *p, | 
| Peter Zijlstra | 5158f4e | 2009-09-16 13:46:59 +0200 | [diff] [blame] | 1537 | int this_cpu, int load_idx) | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1538 | { | 
| Andi Kleen | b3bd3de | 2010-08-10 14:17:51 -0700 | [diff] [blame] | 1539 | struct sched_group *idlest = NULL, *group = sd->groups; | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1540 | unsigned long min_load = ULONG_MAX, this_load = 0; | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1541 | int imbalance = 100 + (sd->imbalance_pct-100)/2; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1542 |  | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1543 | do { | 
|  | 1544 | unsigned long load, avg_load; | 
|  | 1545 | int local_group; | 
|  | 1546 | int i; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1547 |  | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1548 | /* Skip over this group if it has no CPUs allowed */ | 
|  | 1549 | if (!cpumask_intersects(sched_group_cpus(group), | 
|  | 1550 | &p->cpus_allowed)) | 
|  | 1551 | continue; | 
|  | 1552 |  | 
|  | 1553 | local_group = cpumask_test_cpu(this_cpu, | 
|  | 1554 | sched_group_cpus(group)); | 
|  | 1555 |  | 
|  | 1556 | /* Tally up the load of all CPUs in the group */ | 
|  | 1557 | avg_load = 0; | 
|  | 1558 |  | 
|  | 1559 | for_each_cpu(i, sched_group_cpus(group)) { | 
|  | 1560 | /* Bias balancing toward cpus of our domain */ | 
|  | 1561 | if (local_group) | 
|  | 1562 | load = source_load(i, load_idx); | 
|  | 1563 | else | 
|  | 1564 | load = target_load(i, load_idx); | 
|  | 1565 |  | 
|  | 1566 | avg_load += load; | 
|  | 1567 | } | 
|  | 1568 |  | 
|  | 1569 | /* Adjust by relative CPU power of the group */ | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 1570 | avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power; | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1571 |  | 
|  | 1572 | if (local_group) { | 
|  | 1573 | this_load = avg_load; | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1574 | } else if (avg_load < min_load) { | 
|  | 1575 | min_load = avg_load; | 
|  | 1576 | idlest = group; | 
|  | 1577 | } | 
|  | 1578 | } while (group = group->next, group != sd->groups); | 
|  | 1579 |  | 
|  | 1580 | if (!idlest || 100*this_load < imbalance*min_load) | 
|  | 1581 | return NULL; | 
|  | 1582 | return idlest; | 
|  | 1583 | } | 
|  | 1584 |  | 
|  | 1585 | /* | 
|  | 1586 | * find_idlest_cpu - find the idlest cpu among the cpus in group. | 
|  | 1587 | */ | 
|  | 1588 | static int | 
|  | 1589 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) | 
|  | 1590 | { | 
|  | 1591 | unsigned long load, min_load = ULONG_MAX; | 
|  | 1592 | int idlest = -1; | 
|  | 1593 | int i; | 
|  | 1594 |  | 
|  | 1595 | /* Traverse only the allowed CPUs */ | 
|  | 1596 | for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) { | 
|  | 1597 | load = weighted_cpuload(i); | 
|  | 1598 |  | 
|  | 1599 | if (load < min_load || (load == min_load && i == this_cpu)) { | 
|  | 1600 | min_load = load; | 
|  | 1601 | idlest = i; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1602 | } | 
|  | 1603 | } | 
|  | 1604 |  | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1605 | return idlest; | 
|  | 1606 | } | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1607 |  | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1608 | /* | 
| Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 1609 | * Try and locate an idle CPU in the sched_domain. | 
|  | 1610 | */ | 
| Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 1611 | static int select_idle_sibling(struct task_struct *p, int target) | 
| Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 1612 | { | 
|  | 1613 | int cpu = smp_processor_id(); | 
|  | 1614 | int prev_cpu = task_cpu(p); | 
| Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 1615 | struct sched_domain *sd; | 
| Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 1616 | int i; | 
|  | 1617 |  | 
|  | 1618 | /* | 
| Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 1619 | * If the task is going to be woken-up on this cpu and if it is | 
|  | 1620 | * already idle, then it is the right target. | 
| Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 1621 | */ | 
| Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 1622 | if (target == cpu && idle_cpu(cpu)) | 
|  | 1623 | return cpu; | 
|  | 1624 |  | 
|  | 1625 | /* | 
|  | 1626 | * If the task is going to be woken-up on the cpu where it previously | 
|  | 1627 | * ran and if it is currently idle, then it the right target. | 
|  | 1628 | */ | 
|  | 1629 | if (target == prev_cpu && idle_cpu(prev_cpu)) | 
| Peter Zijlstra | fe3bcfe | 2009-11-12 15:55:29 +0100 | [diff] [blame] | 1630 | return prev_cpu; | 
| Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 1631 |  | 
|  | 1632 | /* | 
| Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 1633 | * Otherwise, iterate the domains and find an elegible idle cpu. | 
| Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 1634 | */ | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 1635 | rcu_read_lock(); | 
| Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 1636 | for_each_domain(target, sd) { | 
|  | 1637 | if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) | 
| Peter Zijlstra | fe3bcfe | 2009-11-12 15:55:29 +0100 | [diff] [blame] | 1638 | break; | 
| Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 1639 |  | 
|  | 1640 | for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) { | 
|  | 1641 | if (idle_cpu(i)) { | 
|  | 1642 | target = i; | 
|  | 1643 | break; | 
|  | 1644 | } | 
| Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 1645 | } | 
| Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 1646 |  | 
|  | 1647 | /* | 
|  | 1648 | * Lets stop looking for an idle sibling when we reached | 
|  | 1649 | * the domain that spans the current cpu and prev_cpu. | 
|  | 1650 | */ | 
|  | 1651 | if (cpumask_test_cpu(cpu, sched_domain_span(sd)) && | 
|  | 1652 | cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) | 
|  | 1653 | break; | 
| Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 1654 | } | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 1655 | rcu_read_unlock(); | 
| Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 1656 |  | 
|  | 1657 | return target; | 
|  | 1658 | } | 
|  | 1659 |  | 
|  | 1660 | /* | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1661 | * sched_balance_self: balance the current task (running on cpu) in domains | 
|  | 1662 | * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and | 
|  | 1663 | * SD_BALANCE_EXEC. | 
|  | 1664 | * | 
|  | 1665 | * Balance, ie. select the least loaded group. | 
|  | 1666 | * | 
|  | 1667 | * Returns the target CPU number, or the same CPU if no balancing is needed. | 
|  | 1668 | * | 
|  | 1669 | * preempt must be disabled. | 
|  | 1670 | */ | 
| Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 1671 | static int | 
| Peter Zijlstra | 7608dec | 2011-04-05 17:23:46 +0200 | [diff] [blame] | 1672 | select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1673 | { | 
| Peter Zijlstra | 29cd8ba | 2009-09-17 09:01:14 +0200 | [diff] [blame] | 1674 | struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1675 | int cpu = smp_processor_id(); | 
|  | 1676 | int prev_cpu = task_cpu(p); | 
|  | 1677 | int new_cpu = cpu; | 
| Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 1678 | int want_affine = 0; | 
| Peter Zijlstra | 29cd8ba | 2009-09-17 09:01:14 +0200 | [diff] [blame] | 1679 | int want_sd = 1; | 
| Peter Zijlstra | 5158f4e | 2009-09-16 13:46:59 +0200 | [diff] [blame] | 1680 | int sync = wake_flags & WF_SYNC; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1681 |  | 
| Peter Zijlstra | 0763a66 | 2009-09-14 19:37:39 +0200 | [diff] [blame] | 1682 | if (sd_flag & SD_BALANCE_WAKE) { | 
| Mike Galbraith | beac4c7 | 2010-03-11 17:17:20 +0100 | [diff] [blame] | 1683 | if (cpumask_test_cpu(cpu, &p->cpus_allowed)) | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1684 | want_affine = 1; | 
|  | 1685 | new_cpu = prev_cpu; | 
|  | 1686 | } | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1687 |  | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 1688 | rcu_read_lock(); | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1689 | for_each_domain(cpu, tmp) { | 
| Peter Zijlstra | e4f4288 | 2009-12-16 18:04:34 +0100 | [diff] [blame] | 1690 | if (!(tmp->flags & SD_LOAD_BALANCE)) | 
|  | 1691 | continue; | 
|  | 1692 |  | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1693 | /* | 
| Peter Zijlstra | ae154be | 2009-09-10 14:40:57 +0200 | [diff] [blame] | 1694 | * If power savings logic is enabled for a domain, see if we | 
|  | 1695 | * are not overloaded, if so, don't balance wider. | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1696 | */ | 
| Peter Zijlstra | 59abf02 | 2009-09-16 08:28:30 +0200 | [diff] [blame] | 1697 | if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) { | 
| Peter Zijlstra | ae154be | 2009-09-10 14:40:57 +0200 | [diff] [blame] | 1698 | unsigned long power = 0; | 
|  | 1699 | unsigned long nr_running = 0; | 
|  | 1700 | unsigned long capacity; | 
|  | 1701 | int i; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1702 |  | 
| Peter Zijlstra | ae154be | 2009-09-10 14:40:57 +0200 | [diff] [blame] | 1703 | for_each_cpu(i, sched_domain_span(tmp)) { | 
|  | 1704 | power += power_of(i); | 
|  | 1705 | nr_running += cpu_rq(i)->cfs.nr_running; | 
|  | 1706 | } | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1707 |  | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 1708 | capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE); | 
| Ingo Molnar | 4ae7d5c | 2008-03-19 01:42:00 +0100 | [diff] [blame] | 1709 |  | 
| Peter Zijlstra | 59abf02 | 2009-09-16 08:28:30 +0200 | [diff] [blame] | 1710 | if (tmp->flags & SD_POWERSAVINGS_BALANCE) | 
|  | 1711 | nr_running /= 2; | 
|  | 1712 |  | 
|  | 1713 | if (nr_running < capacity) | 
| Peter Zijlstra | 29cd8ba | 2009-09-17 09:01:14 +0200 | [diff] [blame] | 1714 | want_sd = 0; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1715 | } | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1716 |  | 
| Peter Zijlstra | fe3bcfe | 2009-11-12 15:55:29 +0100 | [diff] [blame] | 1717 | /* | 
| Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 1718 | * If both cpu and prev_cpu are part of this domain, | 
|  | 1719 | * cpu is a valid SD_WAKE_AFFINE target. | 
| Peter Zijlstra | fe3bcfe | 2009-11-12 15:55:29 +0100 | [diff] [blame] | 1720 | */ | 
| Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 1721 | if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && | 
|  | 1722 | cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { | 
|  | 1723 | affine_sd = tmp; | 
|  | 1724 | want_affine = 0; | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1725 | } | 
|  | 1726 |  | 
| Peter Zijlstra | 29cd8ba | 2009-09-17 09:01:14 +0200 | [diff] [blame] | 1727 | if (!want_sd && !want_affine) | 
|  | 1728 | break; | 
|  | 1729 |  | 
| Peter Zijlstra | 0763a66 | 2009-09-14 19:37:39 +0200 | [diff] [blame] | 1730 | if (!(tmp->flags & sd_flag)) | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1731 | continue; | 
|  | 1732 |  | 
| Peter Zijlstra | 29cd8ba | 2009-09-17 09:01:14 +0200 | [diff] [blame] | 1733 | if (want_sd) | 
|  | 1734 | sd = tmp; | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1735 | } | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1736 |  | 
| Mike Galbraith | 8b911ac | 2010-03-11 17:17:16 +0100 | [diff] [blame] | 1737 | if (affine_sd) { | 
| Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 1738 | if (cpu == prev_cpu || wake_affine(affine_sd, p, sync)) | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 1739 | prev_cpu = cpu; | 
|  | 1740 |  | 
|  | 1741 | new_cpu = select_idle_sibling(p, prev_cpu); | 
|  | 1742 | goto unlock; | 
| Mike Galbraith | 8b911ac | 2010-03-11 17:17:16 +0100 | [diff] [blame] | 1743 | } | 
| Peter Zijlstra | 3b64089 | 2009-09-16 13:44:33 +0200 | [diff] [blame] | 1744 |  | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1745 | while (sd) { | 
| Peter Zijlstra | 5158f4e | 2009-09-16 13:46:59 +0200 | [diff] [blame] | 1746 | int load_idx = sd->forkexec_idx; | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1747 | struct sched_group *group; | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1748 | int weight; | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1749 |  | 
| Peter Zijlstra | 0763a66 | 2009-09-14 19:37:39 +0200 | [diff] [blame] | 1750 | if (!(sd->flags & sd_flag)) { | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1751 | sd = sd->child; | 
|  | 1752 | continue; | 
|  | 1753 | } | 
|  | 1754 |  | 
| Peter Zijlstra | 5158f4e | 2009-09-16 13:46:59 +0200 | [diff] [blame] | 1755 | if (sd_flag & SD_BALANCE_WAKE) | 
|  | 1756 | load_idx = sd->wake_idx; | 
|  | 1757 |  | 
|  | 1758 | group = find_idlest_group(sd, p, cpu, load_idx); | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1759 | if (!group) { | 
|  | 1760 | sd = sd->child; | 
|  | 1761 | continue; | 
|  | 1762 | } | 
|  | 1763 |  | 
| Peter Zijlstra | d7c33c4 | 2009-09-11 12:45:38 +0200 | [diff] [blame] | 1764 | new_cpu = find_idlest_cpu(group, p, cpu); | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1765 | if (new_cpu == -1 || new_cpu == cpu) { | 
|  | 1766 | /* Now try balancing at a lower domain level of cpu */ | 
|  | 1767 | sd = sd->child; | 
|  | 1768 | continue; | 
|  | 1769 | } | 
|  | 1770 |  | 
|  | 1771 | /* Now try balancing at a lower domain level of new_cpu */ | 
|  | 1772 | cpu = new_cpu; | 
| Peter Zijlstra | 669c55e | 2010-04-16 14:59:29 +0200 | [diff] [blame] | 1773 | weight = sd->span_weight; | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1774 | sd = NULL; | 
|  | 1775 | for_each_domain(cpu, tmp) { | 
| Peter Zijlstra | 669c55e | 2010-04-16 14:59:29 +0200 | [diff] [blame] | 1776 | if (weight <= tmp->span_weight) | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1777 | break; | 
| Peter Zijlstra | 0763a66 | 2009-09-14 19:37:39 +0200 | [diff] [blame] | 1778 | if (tmp->flags & sd_flag) | 
| Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 1779 | sd = tmp; | 
|  | 1780 | } | 
|  | 1781 | /* while loop will break here if sd == NULL */ | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1782 | } | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 1783 | unlock: | 
|  | 1784 | rcu_read_unlock(); | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1785 |  | 
| Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 1786 | return new_cpu; | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1787 | } | 
|  | 1788 | #endif /* CONFIG_SMP */ | 
|  | 1789 |  | 
| Peter Zijlstra | e52fb7c | 2009-01-14 12:39:19 +0100 | [diff] [blame] | 1790 | static unsigned long | 
|  | 1791 | wakeup_gran(struct sched_entity *curr, struct sched_entity *se) | 
| Peter Zijlstra | 0bbd333 | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 1792 | { | 
|  | 1793 | unsigned long gran = sysctl_sched_wakeup_granularity; | 
|  | 1794 |  | 
|  | 1795 | /* | 
| Peter Zijlstra | e52fb7c | 2009-01-14 12:39:19 +0100 | [diff] [blame] | 1796 | * Since its curr running now, convert the gran from real-time | 
|  | 1797 | * to virtual-time in his units. | 
| Mike Galbraith | 13814d4 | 2010-03-11 17:17:04 +0100 | [diff] [blame] | 1798 | * | 
|  | 1799 | * By using 'se' instead of 'curr' we penalize light tasks, so | 
|  | 1800 | * they get preempted easier. That is, if 'se' < 'curr' then | 
|  | 1801 | * the resulting gran will be larger, therefore penalizing the | 
|  | 1802 | * lighter, if otoh 'se' > 'curr' then the resulting gran will | 
|  | 1803 | * be smaller, again penalizing the lighter task. | 
|  | 1804 | * | 
|  | 1805 | * This is especially important for buddies when the leftmost | 
|  | 1806 | * task is higher priority than the buddy. | 
| Peter Zijlstra | 0bbd333 | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 1807 | */ | 
| Shaohua Li | f4ad9bd | 2011-04-08 12:53:09 +0800 | [diff] [blame] | 1808 | return calc_delta_fair(gran, se); | 
| Peter Zijlstra | 0bbd333 | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 1809 | } | 
|  | 1810 |  | 
|  | 1811 | /* | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 1812 | * Should 'se' preempt 'curr'. | 
|  | 1813 | * | 
|  | 1814 | *             |s1 | 
|  | 1815 | *        |s2 | 
|  | 1816 | *   |s3 | 
|  | 1817 | *         g | 
|  | 1818 | *      |<--->|c | 
|  | 1819 | * | 
|  | 1820 | *  w(c, s1) = -1 | 
|  | 1821 | *  w(c, s2) =  0 | 
|  | 1822 | *  w(c, s3) =  1 | 
|  | 1823 | * | 
|  | 1824 | */ | 
|  | 1825 | static int | 
|  | 1826 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) | 
|  | 1827 | { | 
|  | 1828 | s64 gran, vdiff = curr->vruntime - se->vruntime; | 
|  | 1829 |  | 
|  | 1830 | if (vdiff <= 0) | 
|  | 1831 | return -1; | 
|  | 1832 |  | 
| Peter Zijlstra | e52fb7c | 2009-01-14 12:39:19 +0100 | [diff] [blame] | 1833 | gran = wakeup_gran(curr, se); | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 1834 | if (vdiff > gran) | 
|  | 1835 | return 1; | 
|  | 1836 |  | 
|  | 1837 | return 0; | 
|  | 1838 | } | 
|  | 1839 |  | 
| Peter Zijlstra | 0247909 | 2008-11-04 21:25:10 +0100 | [diff] [blame] | 1840 | static void set_last_buddy(struct sched_entity *se) | 
|  | 1841 | { | 
| Venkatesh Pallipadi | 69c80f3 | 2011-04-13 18:21:09 -0700 | [diff] [blame] | 1842 | if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) | 
|  | 1843 | return; | 
|  | 1844 |  | 
|  | 1845 | for_each_sched_entity(se) | 
|  | 1846 | cfs_rq_of(se)->last = se; | 
| Peter Zijlstra | 0247909 | 2008-11-04 21:25:10 +0100 | [diff] [blame] | 1847 | } | 
|  | 1848 |  | 
|  | 1849 | static void set_next_buddy(struct sched_entity *se) | 
|  | 1850 | { | 
| Venkatesh Pallipadi | 69c80f3 | 2011-04-13 18:21:09 -0700 | [diff] [blame] | 1851 | if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) | 
|  | 1852 | return; | 
|  | 1853 |  | 
|  | 1854 | for_each_sched_entity(se) | 
|  | 1855 | cfs_rq_of(se)->next = se; | 
| Peter Zijlstra | 0247909 | 2008-11-04 21:25:10 +0100 | [diff] [blame] | 1856 | } | 
|  | 1857 |  | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1858 | static void set_skip_buddy(struct sched_entity *se) | 
|  | 1859 | { | 
| Venkatesh Pallipadi | 69c80f3 | 2011-04-13 18:21:09 -0700 | [diff] [blame] | 1860 | for_each_sched_entity(se) | 
|  | 1861 | cfs_rq_of(se)->skip = se; | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1862 | } | 
|  | 1863 |  | 
| Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 1864 | /* | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1865 | * Preempt the current task with a newly woken task if needed: | 
|  | 1866 | */ | 
| Peter Zijlstra | 5a9b86f | 2009-09-16 13:47:58 +0200 | [diff] [blame] | 1867 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1868 | { | 
|  | 1869 | struct task_struct *curr = rq->curr; | 
| Srivatsa Vaddagiri | 8651a86 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 1870 | struct sched_entity *se = &curr->se, *pse = &p->se; | 
| Mike Galbraith | 03e89e4 | 2008-12-16 08:45:30 +0100 | [diff] [blame] | 1871 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 
| Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 1872 | int scale = cfs_rq->nr_running >= sched_nr_latency; | 
| Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 1873 | int next_buddy_marked = 0; | 
| Mike Galbraith | 03e89e4 | 2008-12-16 08:45:30 +0100 | [diff] [blame] | 1874 |  | 
| Ingo Molnar | 4ae7d5c | 2008-03-19 01:42:00 +0100 | [diff] [blame] | 1875 | if (unlikely(se == pse)) | 
|  | 1876 | return; | 
|  | 1877 |  | 
| Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 1878 | if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) { | 
| Mike Galbraith | 3cb63d5 | 2009-09-11 12:01:17 +0200 | [diff] [blame] | 1879 | set_next_buddy(pse); | 
| Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 1880 | next_buddy_marked = 1; | 
|  | 1881 | } | 
| Peter Zijlstra | 57fdc26 | 2008-09-23 15:33:45 +0200 | [diff] [blame] | 1882 |  | 
| Bharata B Rao | aec0a51 | 2008-08-28 14:42:49 +0530 | [diff] [blame] | 1883 | /* | 
|  | 1884 | * We can come here with TIF_NEED_RESCHED already set from new task | 
|  | 1885 | * wake up path. | 
|  | 1886 | */ | 
|  | 1887 | if (test_tsk_need_resched(curr)) | 
|  | 1888 | return; | 
|  | 1889 |  | 
| Darren Hart | a2f5c9a | 2011-02-22 13:04:33 -0800 | [diff] [blame] | 1890 | /* Idle tasks are by definition preempted by non-idle tasks. */ | 
|  | 1891 | if (unlikely(curr->policy == SCHED_IDLE) && | 
|  | 1892 | likely(p->policy != SCHED_IDLE)) | 
|  | 1893 | goto preempt; | 
|  | 1894 |  | 
| Ingo Molnar | 91c234b | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 1895 | /* | 
| Darren Hart | a2f5c9a | 2011-02-22 13:04:33 -0800 | [diff] [blame] | 1896 | * Batch and idle tasks do not preempt non-idle tasks (their preemption | 
|  | 1897 | * is driven by the tick): | 
| Ingo Molnar | 91c234b | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 1898 | */ | 
| Peter Zijlstra | 6bc912b | 2009-01-15 14:53:38 +0100 | [diff] [blame] | 1899 | if (unlikely(p->policy != SCHED_NORMAL)) | 
| Ingo Molnar | 91c234b | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 1900 | return; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1901 |  | 
| Peter Zijlstra | 6bc912b | 2009-01-15 14:53:38 +0100 | [diff] [blame] | 1902 |  | 
| Peter Zijlstra | ad4b78b | 2009-09-16 12:31:31 +0200 | [diff] [blame] | 1903 | if (!sched_feat(WAKEUP_PREEMPT)) | 
|  | 1904 | return; | 
|  | 1905 |  | 
| Peter Zijlstra | 3a7e73a | 2009-11-28 18:51:02 +0100 | [diff] [blame] | 1906 | find_matching_se(&se, &pse); | 
| Paul Turner | 9bbd737 | 2011-07-05 19:07:21 -0700 | [diff] [blame] | 1907 | update_curr(cfs_rq_of(se)); | 
| Peter Zijlstra | 3a7e73a | 2009-11-28 18:51:02 +0100 | [diff] [blame] | 1908 | BUG_ON(!pse); | 
| Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 1909 | if (wakeup_preempt_entity(se, pse) == 1) { | 
|  | 1910 | /* | 
|  | 1911 | * Bias pick_next to pick the sched entity that is | 
|  | 1912 | * triggering this preemption. | 
|  | 1913 | */ | 
|  | 1914 | if (!next_buddy_marked) | 
|  | 1915 | set_next_buddy(pse); | 
| Peter Zijlstra | 3a7e73a | 2009-11-28 18:51:02 +0100 | [diff] [blame] | 1916 | goto preempt; | 
| Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 1917 | } | 
| Jupyung Lee | a65ac74 | 2009-11-17 18:51:40 +0900 | [diff] [blame] | 1918 |  | 
| Peter Zijlstra | 3a7e73a | 2009-11-28 18:51:02 +0100 | [diff] [blame] | 1919 | return; | 
|  | 1920 |  | 
|  | 1921 | preempt: | 
|  | 1922 | resched_task(curr); | 
|  | 1923 | /* | 
|  | 1924 | * Only set the backward buddy when the current task is still | 
|  | 1925 | * on the rq. This can happen when a wakeup gets interleaved | 
|  | 1926 | * with schedule on the ->pre_schedule() or idle_balance() | 
|  | 1927 | * point, either of which can * drop the rq lock. | 
|  | 1928 | * | 
|  | 1929 | * Also, during early boot the idle thread is in the fair class, | 
|  | 1930 | * for obvious reasons its a bad idea to schedule back to it. | 
|  | 1931 | */ | 
|  | 1932 | if (unlikely(!se->on_rq || curr == rq->idle)) | 
|  | 1933 | return; | 
|  | 1934 |  | 
|  | 1935 | if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) | 
|  | 1936 | set_last_buddy(se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1937 | } | 
|  | 1938 |  | 
| Ingo Molnar | fb8d472 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1939 | static struct task_struct *pick_next_task_fair(struct rq *rq) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1940 | { | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1941 | struct task_struct *p; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1942 | struct cfs_rq *cfs_rq = &rq->cfs; | 
|  | 1943 | struct sched_entity *se; | 
|  | 1944 |  | 
| Tim Blechmann | 36ace27 | 2009-11-24 11:55:45 +0100 | [diff] [blame] | 1945 | if (!cfs_rq->nr_running) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1946 | return NULL; | 
|  | 1947 |  | 
|  | 1948 | do { | 
| Ingo Molnar | 9948f4b | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1949 | se = pick_next_entity(cfs_rq); | 
| Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 1950 | set_next_entity(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1951 | cfs_rq = group_cfs_rq(se); | 
|  | 1952 | } while (cfs_rq); | 
|  | 1953 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1954 | p = task_of(se); | 
|  | 1955 | hrtick_start_fair(rq, p); | 
|  | 1956 |  | 
|  | 1957 | return p; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1958 | } | 
|  | 1959 |  | 
|  | 1960 | /* | 
|  | 1961 | * Account for a descheduled task: | 
|  | 1962 | */ | 
| Ingo Molnar | 31ee529 | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 1963 | static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1964 | { | 
|  | 1965 | struct sched_entity *se = &prev->se; | 
|  | 1966 | struct cfs_rq *cfs_rq; | 
|  | 1967 |  | 
|  | 1968 | for_each_sched_entity(se) { | 
|  | 1969 | cfs_rq = cfs_rq_of(se); | 
| Ingo Molnar | ab6cde2 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1970 | put_prev_entity(cfs_rq, se); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1971 | } | 
|  | 1972 | } | 
|  | 1973 |  | 
| Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1974 | /* | 
|  | 1975 | * sched_yield() is very simple | 
|  | 1976 | * | 
|  | 1977 | * The magic of dealing with the ->skip buddy is in pick_next_entity. | 
|  | 1978 | */ | 
|  | 1979 | static void yield_task_fair(struct rq *rq) | 
|  | 1980 | { | 
|  | 1981 | struct task_struct *curr = rq->curr; | 
|  | 1982 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 
|  | 1983 | struct sched_entity *se = &curr->se; | 
|  | 1984 |  | 
|  | 1985 | /* | 
|  | 1986 | * Are we the only task in the tree? | 
|  | 1987 | */ | 
|  | 1988 | if (unlikely(rq->nr_running == 1)) | 
|  | 1989 | return; | 
|  | 1990 |  | 
|  | 1991 | clear_buddies(cfs_rq, se); | 
|  | 1992 |  | 
|  | 1993 | if (curr->policy != SCHED_BATCH) { | 
|  | 1994 | update_rq_clock(rq); | 
|  | 1995 | /* | 
|  | 1996 | * Update run-time statistics of the 'current'. | 
|  | 1997 | */ | 
|  | 1998 | update_curr(cfs_rq); | 
|  | 1999 | } | 
|  | 2000 |  | 
|  | 2001 | set_skip_buddy(se); | 
|  | 2002 | } | 
|  | 2003 |  | 
| Mike Galbraith | d95f412 | 2011-02-01 09:50:51 -0500 | [diff] [blame] | 2004 | static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt) | 
|  | 2005 | { | 
|  | 2006 | struct sched_entity *se = &p->se; | 
|  | 2007 |  | 
|  | 2008 | if (!se->on_rq) | 
|  | 2009 | return false; | 
|  | 2010 |  | 
|  | 2011 | /* Tell the scheduler that we'd really like pse to run next. */ | 
|  | 2012 | set_next_buddy(se); | 
|  | 2013 |  | 
| Mike Galbraith | d95f412 | 2011-02-01 09:50:51 -0500 | [diff] [blame] | 2014 | yield_task_fair(rq); | 
|  | 2015 |  | 
|  | 2016 | return true; | 
|  | 2017 | } | 
|  | 2018 |  | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 2019 | #ifdef CONFIG_SMP | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2020 | /************************************************** | 
|  | 2021 | * Fair scheduling class load-balancing methods: | 
|  | 2022 | */ | 
|  | 2023 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2024 | /* | 
|  | 2025 | * pull_task - move a task from a remote runqueue to the local runqueue. | 
|  | 2026 | * Both runqueues must be locked. | 
|  | 2027 | */ | 
|  | 2028 | static void pull_task(struct rq *src_rq, struct task_struct *p, | 
|  | 2029 | struct rq *this_rq, int this_cpu) | 
|  | 2030 | { | 
|  | 2031 | deactivate_task(src_rq, p, 0); | 
|  | 2032 | set_task_cpu(p, this_cpu); | 
|  | 2033 | activate_task(this_rq, p, 0); | 
|  | 2034 | check_preempt_curr(this_rq, p, 0); | 
|  | 2035 | } | 
|  | 2036 |  | 
|  | 2037 | /* | 
|  | 2038 | * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? | 
|  | 2039 | */ | 
|  | 2040 | static | 
|  | 2041 | int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, | 
|  | 2042 | struct sched_domain *sd, enum cpu_idle_type idle, | 
|  | 2043 | int *all_pinned) | 
|  | 2044 | { | 
|  | 2045 | int tsk_cache_hot = 0; | 
|  | 2046 | /* | 
|  | 2047 | * We do not migrate tasks that are: | 
|  | 2048 | * 1) running (obviously), or | 
|  | 2049 | * 2) cannot be migrated to this CPU due to cpus_allowed, or | 
|  | 2050 | * 3) are cache-hot on their current CPU. | 
|  | 2051 | */ | 
|  | 2052 | if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 2053 | schedstat_inc(p, se.statistics.nr_failed_migrations_affine); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2054 | return 0; | 
|  | 2055 | } | 
|  | 2056 | *all_pinned = 0; | 
|  | 2057 |  | 
|  | 2058 | if (task_running(rq, p)) { | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 2059 | schedstat_inc(p, se.statistics.nr_failed_migrations_running); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2060 | return 0; | 
|  | 2061 | } | 
|  | 2062 |  | 
|  | 2063 | /* | 
|  | 2064 | * Aggressive migration if: | 
|  | 2065 | * 1) task is cache cold, or | 
|  | 2066 | * 2) too many balance attempts have failed. | 
|  | 2067 | */ | 
|  | 2068 |  | 
| Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 2069 | tsk_cache_hot = task_hot(p, rq->clock_task, sd); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2070 | if (!tsk_cache_hot || | 
|  | 2071 | sd->nr_balance_failed > sd->cache_nice_tries) { | 
|  | 2072 | #ifdef CONFIG_SCHEDSTATS | 
|  | 2073 | if (tsk_cache_hot) { | 
|  | 2074 | schedstat_inc(sd, lb_hot_gained[idle]); | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 2075 | schedstat_inc(p, se.statistics.nr_forced_migrations); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2076 | } | 
|  | 2077 | #endif | 
|  | 2078 | return 1; | 
|  | 2079 | } | 
|  | 2080 |  | 
|  | 2081 | if (tsk_cache_hot) { | 
| Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 2082 | schedstat_inc(p, se.statistics.nr_failed_migrations_hot); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2083 | return 0; | 
|  | 2084 | } | 
|  | 2085 | return 1; | 
|  | 2086 | } | 
|  | 2087 |  | 
| Peter Zijlstra | 897c395 | 2009-12-17 17:45:42 +0100 | [diff] [blame] | 2088 | /* | 
|  | 2089 | * move_one_task tries to move exactly one task from busiest to this_rq, as | 
|  | 2090 | * part of active balancing operations within "domain". | 
|  | 2091 | * Returns 1 if successful and 0 otherwise. | 
|  | 2092 | * | 
|  | 2093 | * Called with both runqueues locked. | 
|  | 2094 | */ | 
|  | 2095 | static int | 
|  | 2096 | move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
|  | 2097 | struct sched_domain *sd, enum cpu_idle_type idle) | 
|  | 2098 | { | 
|  | 2099 | struct task_struct *p, *n; | 
|  | 2100 | struct cfs_rq *cfs_rq; | 
|  | 2101 | int pinned = 0; | 
|  | 2102 |  | 
|  | 2103 | for_each_leaf_cfs_rq(busiest, cfs_rq) { | 
|  | 2104 | list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) { | 
|  | 2105 |  | 
|  | 2106 | if (!can_migrate_task(p, busiest, this_cpu, | 
|  | 2107 | sd, idle, &pinned)) | 
|  | 2108 | continue; | 
|  | 2109 |  | 
|  | 2110 | pull_task(busiest, p, this_rq, this_cpu); | 
|  | 2111 | /* | 
|  | 2112 | * Right now, this is only the second place pull_task() | 
|  | 2113 | * is called, so we can safely collect pull_task() | 
|  | 2114 | * stats here rather than inside pull_task(). | 
|  | 2115 | */ | 
|  | 2116 | schedstat_inc(sd, lb_gained[idle]); | 
|  | 2117 | return 1; | 
|  | 2118 | } | 
|  | 2119 | } | 
|  | 2120 |  | 
|  | 2121 | return 0; | 
|  | 2122 | } | 
|  | 2123 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2124 | static unsigned long | 
|  | 2125 | balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
|  | 2126 | unsigned long max_load_move, struct sched_domain *sd, | 
|  | 2127 | enum cpu_idle_type idle, int *all_pinned, | 
| Vladimir Davydov | 931aeed | 2011-05-03 22:31:07 +0400 | [diff] [blame] | 2128 | struct cfs_rq *busiest_cfs_rq) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2129 | { | 
| Ken Chen | b30aef1 | 2011-04-08 12:20:16 -0700 | [diff] [blame] | 2130 | int loops = 0, pulled = 0; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2131 | long rem_load_move = max_load_move; | 
| Peter Zijlstra | ee00e66 | 2009-12-17 17:25:20 +0100 | [diff] [blame] | 2132 | struct task_struct *p, *n; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2133 |  | 
|  | 2134 | if (max_load_move == 0) | 
|  | 2135 | goto out; | 
|  | 2136 |  | 
| Peter Zijlstra | ee00e66 | 2009-12-17 17:25:20 +0100 | [diff] [blame] | 2137 | list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { | 
|  | 2138 | if (loops++ > sysctl_sched_nr_migrate) | 
|  | 2139 | break; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2140 |  | 
| Peter Zijlstra | ee00e66 | 2009-12-17 17:25:20 +0100 | [diff] [blame] | 2141 | if ((p->se.load.weight >> 1) > rem_load_move || | 
| Ken Chen | b30aef1 | 2011-04-08 12:20:16 -0700 | [diff] [blame] | 2142 | !can_migrate_task(p, busiest, this_cpu, sd, idle, | 
|  | 2143 | all_pinned)) | 
| Peter Zijlstra | ee00e66 | 2009-12-17 17:25:20 +0100 | [diff] [blame] | 2144 | continue; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2145 |  | 
| Peter Zijlstra | ee00e66 | 2009-12-17 17:25:20 +0100 | [diff] [blame] | 2146 | pull_task(busiest, p, this_rq, this_cpu); | 
|  | 2147 | pulled++; | 
|  | 2148 | rem_load_move -= p->se.load.weight; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2149 |  | 
|  | 2150 | #ifdef CONFIG_PREEMPT | 
| Peter Zijlstra | ee00e66 | 2009-12-17 17:25:20 +0100 | [diff] [blame] | 2151 | /* | 
|  | 2152 | * NEWIDLE balancing is a source of latency, so preemptible | 
|  | 2153 | * kernels will stop after the first task is pulled to minimize | 
|  | 2154 | * the critical section. | 
|  | 2155 | */ | 
|  | 2156 | if (idle == CPU_NEWLY_IDLE) | 
|  | 2157 | break; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2158 | #endif | 
|  | 2159 |  | 
| Peter Zijlstra | ee00e66 | 2009-12-17 17:25:20 +0100 | [diff] [blame] | 2160 | /* | 
|  | 2161 | * We only want to steal up to the prescribed amount of | 
|  | 2162 | * weighted load. | 
|  | 2163 | */ | 
|  | 2164 | if (rem_load_move <= 0) | 
|  | 2165 | break; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2166 | } | 
|  | 2167 | out: | 
|  | 2168 | /* | 
|  | 2169 | * Right now, this is one of only two places pull_task() is called, | 
|  | 2170 | * so we can safely collect pull_task() stats here rather than | 
|  | 2171 | * inside pull_task(). | 
|  | 2172 | */ | 
|  | 2173 | schedstat_add(sd, lb_gained[idle], pulled); | 
|  | 2174 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2175 | return max_load_move - rem_load_move; | 
|  | 2176 | } | 
|  | 2177 |  | 
| Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 2178 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| Peter Zijlstra | 9e3081c | 2010-11-15 15:47:02 -0800 | [diff] [blame] | 2179 | /* | 
|  | 2180 | * update tg->load_weight by folding this cpu's load_avg | 
|  | 2181 | */ | 
| Paul Turner | 67e8625 | 2010-11-15 15:47:05 -0800 | [diff] [blame] | 2182 | static int update_shares_cpu(struct task_group *tg, int cpu) | 
| Peter Zijlstra | 9e3081c | 2010-11-15 15:47:02 -0800 | [diff] [blame] | 2183 | { | 
|  | 2184 | struct cfs_rq *cfs_rq; | 
|  | 2185 | unsigned long flags; | 
|  | 2186 | struct rq *rq; | 
| Peter Zijlstra | 9e3081c | 2010-11-15 15:47:02 -0800 | [diff] [blame] | 2187 |  | 
|  | 2188 | if (!tg->se[cpu]) | 
|  | 2189 | return 0; | 
|  | 2190 |  | 
|  | 2191 | rq = cpu_rq(cpu); | 
|  | 2192 | cfs_rq = tg->cfs_rq[cpu]; | 
|  | 2193 |  | 
|  | 2194 | raw_spin_lock_irqsave(&rq->lock, flags); | 
|  | 2195 |  | 
|  | 2196 | update_rq_clock(rq); | 
| Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 2197 | update_cfs_load(cfs_rq, 1); | 
| Peter Zijlstra | 9e3081c | 2010-11-15 15:47:02 -0800 | [diff] [blame] | 2198 |  | 
|  | 2199 | /* | 
|  | 2200 | * We need to update shares after updating tg->load_weight in | 
|  | 2201 | * order to adjust the weight of groups with long running tasks. | 
|  | 2202 | */ | 
| Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 2203 | update_cfs_shares(cfs_rq); | 
| Peter Zijlstra | 9e3081c | 2010-11-15 15:47:02 -0800 | [diff] [blame] | 2204 |  | 
|  | 2205 | raw_spin_unlock_irqrestore(&rq->lock, flags); | 
|  | 2206 |  | 
|  | 2207 | return 0; | 
|  | 2208 | } | 
|  | 2209 |  | 
|  | 2210 | static void update_shares(int cpu) | 
|  | 2211 | { | 
|  | 2212 | struct cfs_rq *cfs_rq; | 
|  | 2213 | struct rq *rq = cpu_rq(cpu); | 
|  | 2214 |  | 
|  | 2215 | rcu_read_lock(); | 
| Peter Zijlstra | 9763b67 | 2011-07-13 13:09:25 +0200 | [diff] [blame] | 2216 | /* | 
|  | 2217 | * Iterates the task_group tree in a bottom up fashion, see | 
|  | 2218 | * list_add_leaf_cfs_rq() for details. | 
|  | 2219 | */ | 
| Paul Turner | 67e8625 | 2010-11-15 15:47:05 -0800 | [diff] [blame] | 2220 | for_each_leaf_cfs_rq(rq, cfs_rq) | 
|  | 2221 | update_shares_cpu(cfs_rq->tg, cpu); | 
| Peter Zijlstra | 9e3081c | 2010-11-15 15:47:02 -0800 | [diff] [blame] | 2222 | rcu_read_unlock(); | 
|  | 2223 | } | 
|  | 2224 |  | 
| Peter Zijlstra | 9763b67 | 2011-07-13 13:09:25 +0200 | [diff] [blame] | 2225 | /* | 
|  | 2226 | * Compute the cpu's hierarchical load factor for each task group. | 
|  | 2227 | * This needs to be done in a top-down fashion because the load of a child | 
|  | 2228 | * group is a fraction of its parents load. | 
|  | 2229 | */ | 
|  | 2230 | static int tg_load_down(struct task_group *tg, void *data) | 
|  | 2231 | { | 
|  | 2232 | unsigned long load; | 
|  | 2233 | long cpu = (long)data; | 
|  | 2234 |  | 
|  | 2235 | if (!tg->parent) { | 
|  | 2236 | load = cpu_rq(cpu)->load.weight; | 
|  | 2237 | } else { | 
|  | 2238 | load = tg->parent->cfs_rq[cpu]->h_load; | 
|  | 2239 | load *= tg->se[cpu]->load.weight; | 
|  | 2240 | load /= tg->parent->cfs_rq[cpu]->load.weight + 1; | 
|  | 2241 | } | 
|  | 2242 |  | 
|  | 2243 | tg->cfs_rq[cpu]->h_load = load; | 
|  | 2244 |  | 
|  | 2245 | return 0; | 
|  | 2246 | } | 
|  | 2247 |  | 
|  | 2248 | static void update_h_load(long cpu) | 
|  | 2249 | { | 
|  | 2250 | walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); | 
|  | 2251 | } | 
|  | 2252 |  | 
| Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 2253 | static unsigned long | 
|  | 2254 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
|  | 2255 | unsigned long max_load_move, | 
|  | 2256 | struct sched_domain *sd, enum cpu_idle_type idle, | 
| Vladimir Davydov | 931aeed | 2011-05-03 22:31:07 +0400 | [diff] [blame] | 2257 | int *all_pinned) | 
| Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 2258 | { | 
|  | 2259 | long rem_load_move = max_load_move; | 
| Peter Zijlstra | 9763b67 | 2011-07-13 13:09:25 +0200 | [diff] [blame] | 2260 | struct cfs_rq *busiest_cfs_rq; | 
| Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 2261 |  | 
|  | 2262 | rcu_read_lock(); | 
| Peter Zijlstra | 9763b67 | 2011-07-13 13:09:25 +0200 | [diff] [blame] | 2263 | update_h_load(cpu_of(busiest)); | 
| Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 2264 |  | 
| Peter Zijlstra | 9763b67 | 2011-07-13 13:09:25 +0200 | [diff] [blame] | 2265 | for_each_leaf_cfs_rq(busiest, busiest_cfs_rq) { | 
| Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 2266 | unsigned long busiest_h_load = busiest_cfs_rq->h_load; | 
|  | 2267 | unsigned long busiest_weight = busiest_cfs_rq->load.weight; | 
|  | 2268 | u64 rem_load, moved_load; | 
|  | 2269 |  | 
|  | 2270 | /* | 
|  | 2271 | * empty group | 
|  | 2272 | */ | 
|  | 2273 | if (!busiest_cfs_rq->task_weight) | 
|  | 2274 | continue; | 
|  | 2275 |  | 
|  | 2276 | rem_load = (u64)rem_load_move * busiest_weight; | 
|  | 2277 | rem_load = div_u64(rem_load, busiest_h_load + 1); | 
|  | 2278 |  | 
|  | 2279 | moved_load = balance_tasks(this_rq, this_cpu, busiest, | 
| Vladimir Davydov | 931aeed | 2011-05-03 22:31:07 +0400 | [diff] [blame] | 2280 | rem_load, sd, idle, all_pinned, | 
| Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 2281 | busiest_cfs_rq); | 
|  | 2282 |  | 
|  | 2283 | if (!moved_load) | 
|  | 2284 | continue; | 
|  | 2285 |  | 
|  | 2286 | moved_load *= busiest_h_load; | 
|  | 2287 | moved_load = div_u64(moved_load, busiest_weight + 1); | 
|  | 2288 |  | 
|  | 2289 | rem_load_move -= moved_load; | 
|  | 2290 | if (rem_load_move < 0) | 
|  | 2291 | break; | 
|  | 2292 | } | 
|  | 2293 | rcu_read_unlock(); | 
|  | 2294 |  | 
|  | 2295 | return max_load_move - rem_load_move; | 
|  | 2296 | } | 
|  | 2297 | #else | 
| Peter Zijlstra | 9e3081c | 2010-11-15 15:47:02 -0800 | [diff] [blame] | 2298 | static inline void update_shares(int cpu) | 
|  | 2299 | { | 
|  | 2300 | } | 
|  | 2301 |  | 
| Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 2302 | static unsigned long | 
|  | 2303 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
|  | 2304 | unsigned long max_load_move, | 
|  | 2305 | struct sched_domain *sd, enum cpu_idle_type idle, | 
| Vladimir Davydov | 931aeed | 2011-05-03 22:31:07 +0400 | [diff] [blame] | 2306 | int *all_pinned) | 
| Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 2307 | { | 
|  | 2308 | return balance_tasks(this_rq, this_cpu, busiest, | 
|  | 2309 | max_load_move, sd, idle, all_pinned, | 
| Vladimir Davydov | 931aeed | 2011-05-03 22:31:07 +0400 | [diff] [blame] | 2310 | &busiest->cfs); | 
| Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 2311 | } | 
|  | 2312 | #endif | 
|  | 2313 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2314 | /* | 
|  | 2315 | * move_tasks tries to move up to max_load_move weighted load from busiest to | 
|  | 2316 | * this_rq, as part of a balancing operation within domain "sd". | 
|  | 2317 | * Returns 1 if successful and 0 otherwise. | 
|  | 2318 | * | 
|  | 2319 | * Called with both runqueues locked. | 
|  | 2320 | */ | 
|  | 2321 | static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
|  | 2322 | unsigned long max_load_move, | 
|  | 2323 | struct sched_domain *sd, enum cpu_idle_type idle, | 
|  | 2324 | int *all_pinned) | 
|  | 2325 | { | 
| Peter Zijlstra | 3d45fd8 | 2009-12-17 17:12:46 +0100 | [diff] [blame] | 2326 | unsigned long total_load_moved = 0, load_moved; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2327 |  | 
|  | 2328 | do { | 
| Peter Zijlstra | 3d45fd8 | 2009-12-17 17:12:46 +0100 | [diff] [blame] | 2329 | load_moved = load_balance_fair(this_rq, this_cpu, busiest, | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2330 | max_load_move - total_load_moved, | 
| Vladimir Davydov | 931aeed | 2011-05-03 22:31:07 +0400 | [diff] [blame] | 2331 | sd, idle, all_pinned); | 
| Peter Zijlstra | 3d45fd8 | 2009-12-17 17:12:46 +0100 | [diff] [blame] | 2332 |  | 
|  | 2333 | total_load_moved += load_moved; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2334 |  | 
|  | 2335 | #ifdef CONFIG_PREEMPT | 
|  | 2336 | /* | 
|  | 2337 | * NEWIDLE balancing is a source of latency, so preemptible | 
|  | 2338 | * kernels will stop after the first task is pulled to minimize | 
|  | 2339 | * the critical section. | 
|  | 2340 | */ | 
|  | 2341 | if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) | 
|  | 2342 | break; | 
| Peter Zijlstra | baa8c11 | 2009-12-17 18:10:09 +0100 | [diff] [blame] | 2343 |  | 
|  | 2344 | if (raw_spin_is_contended(&this_rq->lock) || | 
|  | 2345 | raw_spin_is_contended(&busiest->lock)) | 
|  | 2346 | break; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2347 | #endif | 
| Peter Zijlstra | 3d45fd8 | 2009-12-17 17:12:46 +0100 | [diff] [blame] | 2348 | } while (load_moved && max_load_move > total_load_moved); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2349 |  | 
|  | 2350 | return total_load_moved > 0; | 
|  | 2351 | } | 
|  | 2352 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2353 | /********** Helpers for find_busiest_group ************************/ | 
|  | 2354 | /* | 
|  | 2355 | * sd_lb_stats - Structure to store the statistics of a sched_domain | 
|  | 2356 | * 		during load balancing. | 
|  | 2357 | */ | 
|  | 2358 | struct sd_lb_stats { | 
|  | 2359 | struct sched_group *busiest; /* Busiest group in this sd */ | 
|  | 2360 | struct sched_group *this;  /* Local group in this sd */ | 
|  | 2361 | unsigned long total_load;  /* Total load of all groups in sd */ | 
|  | 2362 | unsigned long total_pwr;   /*	Total power of all groups in sd */ | 
|  | 2363 | unsigned long avg_load;	   /* Average load across all groups in sd */ | 
|  | 2364 |  | 
|  | 2365 | /** Statistics of this group */ | 
|  | 2366 | unsigned long this_load; | 
|  | 2367 | unsigned long this_load_per_task; | 
|  | 2368 | unsigned long this_nr_running; | 
| Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 2369 | unsigned long this_has_capacity; | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 2370 | unsigned int  this_idle_cpus; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2371 |  | 
|  | 2372 | /* Statistics of the busiest group */ | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 2373 | unsigned int  busiest_idle_cpus; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2374 | unsigned long max_load; | 
|  | 2375 | unsigned long busiest_load_per_task; | 
|  | 2376 | unsigned long busiest_nr_running; | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 2377 | unsigned long busiest_group_capacity; | 
| Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 2378 | unsigned long busiest_has_capacity; | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 2379 | unsigned int  busiest_group_weight; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2380 |  | 
|  | 2381 | int group_imb; /* Is there imbalance in this sd */ | 
|  | 2382 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 
|  | 2383 | int power_savings_balance; /* Is powersave balance needed for this sd */ | 
|  | 2384 | struct sched_group *group_min; /* Least loaded group in sd */ | 
|  | 2385 | struct sched_group *group_leader; /* Group which relieves group_min */ | 
|  | 2386 | unsigned long min_load_per_task; /* load_per_task in group_min */ | 
|  | 2387 | unsigned long leader_nr_running; /* Nr running of group_leader */ | 
|  | 2388 | unsigned long min_nr_running; /* Nr running of group_min */ | 
|  | 2389 | #endif | 
|  | 2390 | }; | 
|  | 2391 |  | 
|  | 2392 | /* | 
|  | 2393 | * sg_lb_stats - stats of a sched_group required for load_balancing | 
|  | 2394 | */ | 
|  | 2395 | struct sg_lb_stats { | 
|  | 2396 | unsigned long avg_load; /*Avg load across the CPUs of the group */ | 
|  | 2397 | unsigned long group_load; /* Total load over the CPUs of the group */ | 
|  | 2398 | unsigned long sum_nr_running; /* Nr tasks running in the group */ | 
|  | 2399 | unsigned long sum_weighted_load; /* Weighted load of group's tasks */ | 
|  | 2400 | unsigned long group_capacity; | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 2401 | unsigned long idle_cpus; | 
|  | 2402 | unsigned long group_weight; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2403 | int group_imb; /* Is there an imbalance in the group ? */ | 
| Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 2404 | int group_has_capacity; /* Is there extra capacity in the group? */ | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2405 | }; | 
|  | 2406 |  | 
|  | 2407 | /** | 
|  | 2408 | * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. | 
|  | 2409 | * @group: The group whose first cpu is to be returned. | 
|  | 2410 | */ | 
|  | 2411 | static inline unsigned int group_first_cpu(struct sched_group *group) | 
|  | 2412 | { | 
|  | 2413 | return cpumask_first(sched_group_cpus(group)); | 
|  | 2414 | } | 
|  | 2415 |  | 
|  | 2416 | /** | 
|  | 2417 | * get_sd_load_idx - Obtain the load index for a given sched domain. | 
|  | 2418 | * @sd: The sched_domain whose load_idx is to be obtained. | 
|  | 2419 | * @idle: The Idle status of the CPU for whose sd load_icx is obtained. | 
|  | 2420 | */ | 
|  | 2421 | static inline int get_sd_load_idx(struct sched_domain *sd, | 
|  | 2422 | enum cpu_idle_type idle) | 
|  | 2423 | { | 
|  | 2424 | int load_idx; | 
|  | 2425 |  | 
|  | 2426 | switch (idle) { | 
|  | 2427 | case CPU_NOT_IDLE: | 
|  | 2428 | load_idx = sd->busy_idx; | 
|  | 2429 | break; | 
|  | 2430 |  | 
|  | 2431 | case CPU_NEWLY_IDLE: | 
|  | 2432 | load_idx = sd->newidle_idx; | 
|  | 2433 | break; | 
|  | 2434 | default: | 
|  | 2435 | load_idx = sd->idle_idx; | 
|  | 2436 | break; | 
|  | 2437 | } | 
|  | 2438 |  | 
|  | 2439 | return load_idx; | 
|  | 2440 | } | 
|  | 2441 |  | 
|  | 2442 |  | 
|  | 2443 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 
|  | 2444 | /** | 
|  | 2445 | * init_sd_power_savings_stats - Initialize power savings statistics for | 
|  | 2446 | * the given sched_domain, during load balancing. | 
|  | 2447 | * | 
|  | 2448 | * @sd: Sched domain whose power-savings statistics are to be initialized. | 
|  | 2449 | * @sds: Variable containing the statistics for sd. | 
|  | 2450 | * @idle: Idle status of the CPU at which we're performing load-balancing. | 
|  | 2451 | */ | 
|  | 2452 | static inline void init_sd_power_savings_stats(struct sched_domain *sd, | 
|  | 2453 | struct sd_lb_stats *sds, enum cpu_idle_type idle) | 
|  | 2454 | { | 
|  | 2455 | /* | 
|  | 2456 | * Busy processors will not participate in power savings | 
|  | 2457 | * balance. | 
|  | 2458 | */ | 
|  | 2459 | if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE)) | 
|  | 2460 | sds->power_savings_balance = 0; | 
|  | 2461 | else { | 
|  | 2462 | sds->power_savings_balance = 1; | 
|  | 2463 | sds->min_nr_running = ULONG_MAX; | 
|  | 2464 | sds->leader_nr_running = 0; | 
|  | 2465 | } | 
|  | 2466 | } | 
|  | 2467 |  | 
|  | 2468 | /** | 
|  | 2469 | * update_sd_power_savings_stats - Update the power saving stats for a | 
|  | 2470 | * sched_domain while performing load balancing. | 
|  | 2471 | * | 
|  | 2472 | * @group: sched_group belonging to the sched_domain under consideration. | 
|  | 2473 | * @sds: Variable containing the statistics of the sched_domain | 
|  | 2474 | * @local_group: Does group contain the CPU for which we're performing | 
|  | 2475 | * 		load balancing ? | 
|  | 2476 | * @sgs: Variable containing the statistics of the group. | 
|  | 2477 | */ | 
|  | 2478 | static inline void update_sd_power_savings_stats(struct sched_group *group, | 
|  | 2479 | struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs) | 
|  | 2480 | { | 
|  | 2481 |  | 
|  | 2482 | if (!sds->power_savings_balance) | 
|  | 2483 | return; | 
|  | 2484 |  | 
|  | 2485 | /* | 
|  | 2486 | * If the local group is idle or completely loaded | 
|  | 2487 | * no need to do power savings balance at this domain | 
|  | 2488 | */ | 
|  | 2489 | if (local_group && (sds->this_nr_running >= sgs->group_capacity || | 
|  | 2490 | !sds->this_nr_running)) | 
|  | 2491 | sds->power_savings_balance = 0; | 
|  | 2492 |  | 
|  | 2493 | /* | 
|  | 2494 | * If a group is already running at full capacity or idle, | 
|  | 2495 | * don't include that group in power savings calculations | 
|  | 2496 | */ | 
|  | 2497 | if (!sds->power_savings_balance || | 
|  | 2498 | sgs->sum_nr_running >= sgs->group_capacity || | 
|  | 2499 | !sgs->sum_nr_running) | 
|  | 2500 | return; | 
|  | 2501 |  | 
|  | 2502 | /* | 
|  | 2503 | * Calculate the group which has the least non-idle load. | 
|  | 2504 | * This is the group from where we need to pick up the load | 
|  | 2505 | * for saving power | 
|  | 2506 | */ | 
|  | 2507 | if ((sgs->sum_nr_running < sds->min_nr_running) || | 
|  | 2508 | (sgs->sum_nr_running == sds->min_nr_running && | 
|  | 2509 | group_first_cpu(group) > group_first_cpu(sds->group_min))) { | 
|  | 2510 | sds->group_min = group; | 
|  | 2511 | sds->min_nr_running = sgs->sum_nr_running; | 
|  | 2512 | sds->min_load_per_task = sgs->sum_weighted_load / | 
|  | 2513 | sgs->sum_nr_running; | 
|  | 2514 | } | 
|  | 2515 |  | 
|  | 2516 | /* | 
|  | 2517 | * Calculate the group which is almost near its | 
|  | 2518 | * capacity but still has some space to pick up some load | 
|  | 2519 | * from other group and save more power | 
|  | 2520 | */ | 
|  | 2521 | if (sgs->sum_nr_running + 1 > sgs->group_capacity) | 
|  | 2522 | return; | 
|  | 2523 |  | 
|  | 2524 | if (sgs->sum_nr_running > sds->leader_nr_running || | 
|  | 2525 | (sgs->sum_nr_running == sds->leader_nr_running && | 
|  | 2526 | group_first_cpu(group) < group_first_cpu(sds->group_leader))) { | 
|  | 2527 | sds->group_leader = group; | 
|  | 2528 | sds->leader_nr_running = sgs->sum_nr_running; | 
|  | 2529 | } | 
|  | 2530 | } | 
|  | 2531 |  | 
|  | 2532 | /** | 
|  | 2533 | * check_power_save_busiest_group - see if there is potential for some power-savings balance | 
|  | 2534 | * @sds: Variable containing the statistics of the sched_domain | 
|  | 2535 | *	under consideration. | 
|  | 2536 | * @this_cpu: Cpu at which we're currently performing load-balancing. | 
|  | 2537 | * @imbalance: Variable to store the imbalance. | 
|  | 2538 | * | 
|  | 2539 | * Description: | 
|  | 2540 | * Check if we have potential to perform some power-savings balance. | 
|  | 2541 | * If yes, set the busiest group to be the least loaded group in the | 
|  | 2542 | * sched_domain, so that it's CPUs can be put to idle. | 
|  | 2543 | * | 
|  | 2544 | * Returns 1 if there is potential to perform power-savings balance. | 
|  | 2545 | * Else returns 0. | 
|  | 2546 | */ | 
|  | 2547 | static inline int check_power_save_busiest_group(struct sd_lb_stats *sds, | 
|  | 2548 | int this_cpu, unsigned long *imbalance) | 
|  | 2549 | { | 
|  | 2550 | if (!sds->power_savings_balance) | 
|  | 2551 | return 0; | 
|  | 2552 |  | 
|  | 2553 | if (sds->this != sds->group_leader || | 
|  | 2554 | sds->group_leader == sds->group_min) | 
|  | 2555 | return 0; | 
|  | 2556 |  | 
|  | 2557 | *imbalance = sds->min_load_per_task; | 
|  | 2558 | sds->busiest = sds->group_min; | 
|  | 2559 |  | 
|  | 2560 | return 1; | 
|  | 2561 |  | 
|  | 2562 | } | 
|  | 2563 | #else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ | 
|  | 2564 | static inline void init_sd_power_savings_stats(struct sched_domain *sd, | 
|  | 2565 | struct sd_lb_stats *sds, enum cpu_idle_type idle) | 
|  | 2566 | { | 
|  | 2567 | return; | 
|  | 2568 | } | 
|  | 2569 |  | 
|  | 2570 | static inline void update_sd_power_savings_stats(struct sched_group *group, | 
|  | 2571 | struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs) | 
|  | 2572 | { | 
|  | 2573 | return; | 
|  | 2574 | } | 
|  | 2575 |  | 
|  | 2576 | static inline int check_power_save_busiest_group(struct sd_lb_stats *sds, | 
|  | 2577 | int this_cpu, unsigned long *imbalance) | 
|  | 2578 | { | 
|  | 2579 | return 0; | 
|  | 2580 | } | 
|  | 2581 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ | 
|  | 2582 |  | 
|  | 2583 |  | 
|  | 2584 | unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu) | 
|  | 2585 | { | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 2586 | return SCHED_POWER_SCALE; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2587 | } | 
|  | 2588 |  | 
|  | 2589 | unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu) | 
|  | 2590 | { | 
|  | 2591 | return default_scale_freq_power(sd, cpu); | 
|  | 2592 | } | 
|  | 2593 |  | 
|  | 2594 | unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu) | 
|  | 2595 | { | 
| Peter Zijlstra | 669c55e | 2010-04-16 14:59:29 +0200 | [diff] [blame] | 2596 | unsigned long weight = sd->span_weight; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2597 | unsigned long smt_gain = sd->smt_gain; | 
|  | 2598 |  | 
|  | 2599 | smt_gain /= weight; | 
|  | 2600 |  | 
|  | 2601 | return smt_gain; | 
|  | 2602 | } | 
|  | 2603 |  | 
|  | 2604 | unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu) | 
|  | 2605 | { | 
|  | 2606 | return default_scale_smt_power(sd, cpu); | 
|  | 2607 | } | 
|  | 2608 |  | 
|  | 2609 | unsigned long scale_rt_power(int cpu) | 
|  | 2610 | { | 
|  | 2611 | struct rq *rq = cpu_rq(cpu); | 
|  | 2612 | u64 total, available; | 
|  | 2613 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2614 | total = sched_avg_period() + (rq->clock - rq->age_stamp); | 
| Venkatesh Pallipadi | aa48380 | 2010-10-04 17:03:22 -0700 | [diff] [blame] | 2615 |  | 
|  | 2616 | if (unlikely(total < rq->rt_avg)) { | 
|  | 2617 | /* Ensures that power won't end up being negative */ | 
|  | 2618 | available = 0; | 
|  | 2619 | } else { | 
|  | 2620 | available = total - rq->rt_avg; | 
|  | 2621 | } | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2622 |  | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 2623 | if (unlikely((s64)total < SCHED_POWER_SCALE)) | 
|  | 2624 | total = SCHED_POWER_SCALE; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2625 |  | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 2626 | total >>= SCHED_POWER_SHIFT; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2627 |  | 
|  | 2628 | return div_u64(available, total); | 
|  | 2629 | } | 
|  | 2630 |  | 
|  | 2631 | static void update_cpu_power(struct sched_domain *sd, int cpu) | 
|  | 2632 | { | 
| Peter Zijlstra | 669c55e | 2010-04-16 14:59:29 +0200 | [diff] [blame] | 2633 | unsigned long weight = sd->span_weight; | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 2634 | unsigned long power = SCHED_POWER_SCALE; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2635 | struct sched_group *sdg = sd->groups; | 
|  | 2636 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2637 | if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { | 
|  | 2638 | if (sched_feat(ARCH_POWER)) | 
|  | 2639 | power *= arch_scale_smt_power(sd, cpu); | 
|  | 2640 | else | 
|  | 2641 | power *= default_scale_smt_power(sd, cpu); | 
|  | 2642 |  | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 2643 | power >>= SCHED_POWER_SHIFT; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2644 | } | 
|  | 2645 |  | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 2646 | sdg->sgp->power_orig = power; | 
| Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 2647 |  | 
|  | 2648 | if (sched_feat(ARCH_POWER)) | 
|  | 2649 | power *= arch_scale_freq_power(sd, cpu); | 
|  | 2650 | else | 
|  | 2651 | power *= default_scale_freq_power(sd, cpu); | 
|  | 2652 |  | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 2653 | power >>= SCHED_POWER_SHIFT; | 
| Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 2654 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2655 | power *= scale_rt_power(cpu); | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 2656 | power >>= SCHED_POWER_SHIFT; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2657 |  | 
|  | 2658 | if (!power) | 
|  | 2659 | power = 1; | 
|  | 2660 |  | 
| Peter Zijlstra | e51fd5e | 2010-05-31 12:37:30 +0200 | [diff] [blame] | 2661 | cpu_rq(cpu)->cpu_power = power; | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 2662 | sdg->sgp->power = power; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2663 | } | 
|  | 2664 |  | 
|  | 2665 | static void update_group_power(struct sched_domain *sd, int cpu) | 
|  | 2666 | { | 
|  | 2667 | struct sched_domain *child = sd->child; | 
|  | 2668 | struct sched_group *group, *sdg = sd->groups; | 
|  | 2669 | unsigned long power; | 
|  | 2670 |  | 
|  | 2671 | if (!child) { | 
|  | 2672 | update_cpu_power(sd, cpu); | 
|  | 2673 | return; | 
|  | 2674 | } | 
|  | 2675 |  | 
|  | 2676 | power = 0; | 
|  | 2677 |  | 
|  | 2678 | group = child->groups; | 
|  | 2679 | do { | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 2680 | power += group->sgp->power; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2681 | group = group->next; | 
|  | 2682 | } while (group != child->groups); | 
|  | 2683 |  | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 2684 | sdg->sgp->power = power; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2685 | } | 
|  | 2686 |  | 
| Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 2687 | /* | 
|  | 2688 | * Try and fix up capacity for tiny siblings, this is needed when | 
|  | 2689 | * things like SD_ASYM_PACKING need f_b_g to select another sibling | 
|  | 2690 | * which on its own isn't powerful enough. | 
|  | 2691 | * | 
|  | 2692 | * See update_sd_pick_busiest() and check_asym_packing(). | 
|  | 2693 | */ | 
|  | 2694 | static inline int | 
|  | 2695 | fix_small_capacity(struct sched_domain *sd, struct sched_group *group) | 
|  | 2696 | { | 
|  | 2697 | /* | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 2698 | * Only siblings can have significantly less than SCHED_POWER_SCALE | 
| Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 2699 | */ | 
| Peter Zijlstra | a6c75f2 | 2011-04-07 14:09:52 +0200 | [diff] [blame] | 2700 | if (!(sd->flags & SD_SHARE_CPUPOWER)) | 
| Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 2701 | return 0; | 
|  | 2702 |  | 
|  | 2703 | /* | 
|  | 2704 | * If ~90% of the cpu_power is still there, we're good. | 
|  | 2705 | */ | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 2706 | if (group->sgp->power * 32 > group->sgp->power_orig * 29) | 
| Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 2707 | return 1; | 
|  | 2708 |  | 
|  | 2709 | return 0; | 
|  | 2710 | } | 
|  | 2711 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2712 | /** | 
|  | 2713 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. | 
|  | 2714 | * @sd: The sched_domain whose statistics are to be updated. | 
|  | 2715 | * @group: sched_group whose statistics are to be updated. | 
|  | 2716 | * @this_cpu: Cpu for which load balance is currently performed. | 
|  | 2717 | * @idle: Idle status of this_cpu | 
|  | 2718 | * @load_idx: Load index of sched_domain of this_cpu for load calc. | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2719 | * @local_group: Does group contain this_cpu. | 
|  | 2720 | * @cpus: Set of cpus considered for load balancing. | 
|  | 2721 | * @balance: Should we balance. | 
|  | 2722 | * @sgs: variable to hold the statistics for this group. | 
|  | 2723 | */ | 
|  | 2724 | static inline void update_sg_lb_stats(struct sched_domain *sd, | 
|  | 2725 | struct sched_group *group, int this_cpu, | 
| Venkatesh Pallipadi | 46e49b3 | 2011-02-14 14:38:50 -0800 | [diff] [blame] | 2726 | enum cpu_idle_type idle, int load_idx, | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2727 | int local_group, const struct cpumask *cpus, | 
|  | 2728 | int *balance, struct sg_lb_stats *sgs) | 
|  | 2729 | { | 
| Nikhil Rao | 2582f0e | 2010-10-13 12:09:36 -0700 | [diff] [blame] | 2730 | unsigned long load, max_cpu_load, min_cpu_load, max_nr_running; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2731 | int i; | 
|  | 2732 | unsigned int balance_cpu = -1, first_idle_cpu = 0; | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 2733 | unsigned long avg_load_per_task = 0; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2734 |  | 
| Gautham R Shenoy | 871e35b | 2010-01-20 14:02:44 -0600 | [diff] [blame] | 2735 | if (local_group) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2736 | balance_cpu = group_first_cpu(group); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2737 |  | 
|  | 2738 | /* Tally up the load of all CPUs in the group */ | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2739 | max_cpu_load = 0; | 
|  | 2740 | min_cpu_load = ~0UL; | 
| Nikhil Rao | 2582f0e | 2010-10-13 12:09:36 -0700 | [diff] [blame] | 2741 | max_nr_running = 0; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2742 |  | 
|  | 2743 | for_each_cpu_and(i, sched_group_cpus(group), cpus) { | 
|  | 2744 | struct rq *rq = cpu_rq(i); | 
|  | 2745 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2746 | /* Bias balancing toward cpus of our domain */ | 
|  | 2747 | if (local_group) { | 
|  | 2748 | if (idle_cpu(i) && !first_idle_cpu) { | 
|  | 2749 | first_idle_cpu = 1; | 
|  | 2750 | balance_cpu = i; | 
|  | 2751 | } | 
|  | 2752 |  | 
|  | 2753 | load = target_load(i, load_idx); | 
|  | 2754 | } else { | 
|  | 2755 | load = source_load(i, load_idx); | 
| Nikhil Rao | 2582f0e | 2010-10-13 12:09:36 -0700 | [diff] [blame] | 2756 | if (load > max_cpu_load) { | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2757 | max_cpu_load = load; | 
| Nikhil Rao | 2582f0e | 2010-10-13 12:09:36 -0700 | [diff] [blame] | 2758 | max_nr_running = rq->nr_running; | 
|  | 2759 | } | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2760 | if (min_cpu_load > load) | 
|  | 2761 | min_cpu_load = load; | 
|  | 2762 | } | 
|  | 2763 |  | 
|  | 2764 | sgs->group_load += load; | 
|  | 2765 | sgs->sum_nr_running += rq->nr_running; | 
|  | 2766 | sgs->sum_weighted_load += weighted_cpuload(i); | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 2767 | if (idle_cpu(i)) | 
|  | 2768 | sgs->idle_cpus++; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2769 | } | 
|  | 2770 |  | 
|  | 2771 | /* | 
|  | 2772 | * First idle cpu or the first cpu(busiest) in this sched group | 
|  | 2773 | * is eligible for doing load balancing at this and above | 
|  | 2774 | * domains. In the newly idle case, we will allow all the cpu's | 
|  | 2775 | * to do the newly idle load balance. | 
|  | 2776 | */ | 
| Peter Zijlstra | bbc8cb5 | 2010-07-09 15:15:43 +0200 | [diff] [blame] | 2777 | if (idle != CPU_NEWLY_IDLE && local_group) { | 
|  | 2778 | if (balance_cpu != this_cpu) { | 
|  | 2779 | *balance = 0; | 
|  | 2780 | return; | 
|  | 2781 | } | 
|  | 2782 | update_group_power(sd, this_cpu); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2783 | } | 
|  | 2784 |  | 
|  | 2785 | /* Adjust by relative CPU power of the group */ | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 2786 | sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2787 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2788 | /* | 
|  | 2789 | * Consider the group unbalanced when the imbalance is larger | 
| Peter Zijlstra | 866ab43 | 2011-02-21 18:56:47 +0100 | [diff] [blame] | 2790 | * than the average weight of a task. | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2791 | * | 
|  | 2792 | * APZ: with cgroup the avg task weight can vary wildly and | 
|  | 2793 | *      might not be a suitable number - should we keep a | 
|  | 2794 | *      normalized nr_running number somewhere that negates | 
|  | 2795 | *      the hierarchy? | 
|  | 2796 | */ | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 2797 | if (sgs->sum_nr_running) | 
|  | 2798 | avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2799 |  | 
| Peter Zijlstra | 866ab43 | 2011-02-21 18:56:47 +0100 | [diff] [blame] | 2800 | if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2801 | sgs->group_imb = 1; | 
|  | 2802 |  | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 2803 | sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power, | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 2804 | SCHED_POWER_SCALE); | 
| Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 2805 | if (!sgs->group_capacity) | 
|  | 2806 | sgs->group_capacity = fix_small_capacity(sd, group); | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 2807 | sgs->group_weight = group->group_weight; | 
| Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 2808 |  | 
|  | 2809 | if (sgs->group_capacity > sgs->sum_nr_running) | 
|  | 2810 | sgs->group_has_capacity = 1; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2811 | } | 
|  | 2812 |  | 
|  | 2813 | /** | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 2814 | * update_sd_pick_busiest - return 1 on busiest group | 
|  | 2815 | * @sd: sched_domain whose statistics are to be checked | 
|  | 2816 | * @sds: sched_domain statistics | 
|  | 2817 | * @sg: sched_group candidate to be checked for being the busiest | 
| Michael Neuling | b6b1229 | 2010-06-10 12:06:21 +1000 | [diff] [blame] | 2818 | * @sgs: sched_group statistics | 
|  | 2819 | * @this_cpu: the current cpu | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 2820 | * | 
|  | 2821 | * Determine if @sg is a busier group than the previously selected | 
|  | 2822 | * busiest group. | 
|  | 2823 | */ | 
|  | 2824 | static bool update_sd_pick_busiest(struct sched_domain *sd, | 
|  | 2825 | struct sd_lb_stats *sds, | 
|  | 2826 | struct sched_group *sg, | 
|  | 2827 | struct sg_lb_stats *sgs, | 
|  | 2828 | int this_cpu) | 
|  | 2829 | { | 
|  | 2830 | if (sgs->avg_load <= sds->max_load) | 
|  | 2831 | return false; | 
|  | 2832 |  | 
|  | 2833 | if (sgs->sum_nr_running > sgs->group_capacity) | 
|  | 2834 | return true; | 
|  | 2835 |  | 
|  | 2836 | if (sgs->group_imb) | 
|  | 2837 | return true; | 
|  | 2838 |  | 
|  | 2839 | /* | 
|  | 2840 | * ASYM_PACKING needs to move all the work to the lowest | 
|  | 2841 | * numbered CPUs in the group, therefore mark all groups | 
|  | 2842 | * higher than ourself as busy. | 
|  | 2843 | */ | 
|  | 2844 | if ((sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running && | 
|  | 2845 | this_cpu < group_first_cpu(sg)) { | 
|  | 2846 | if (!sds->busiest) | 
|  | 2847 | return true; | 
|  | 2848 |  | 
|  | 2849 | if (group_first_cpu(sds->busiest) > group_first_cpu(sg)) | 
|  | 2850 | return true; | 
|  | 2851 | } | 
|  | 2852 |  | 
|  | 2853 | return false; | 
|  | 2854 | } | 
|  | 2855 |  | 
|  | 2856 | /** | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2857 | * update_sd_lb_stats - Update sched_group's statistics for load balancing. | 
|  | 2858 | * @sd: sched_domain whose statistics are to be updated. | 
|  | 2859 | * @this_cpu: Cpu for which load balance is currently performed. | 
|  | 2860 | * @idle: Idle status of this_cpu | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2861 | * @cpus: Set of cpus considered for load balancing. | 
|  | 2862 | * @balance: Should we balance. | 
|  | 2863 | * @sds: variable to hold the statistics for this sched_domain. | 
|  | 2864 | */ | 
|  | 2865 | static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, | 
| Venkatesh Pallipadi | 46e49b3 | 2011-02-14 14:38:50 -0800 | [diff] [blame] | 2866 | enum cpu_idle_type idle, const struct cpumask *cpus, | 
|  | 2867 | int *balance, struct sd_lb_stats *sds) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2868 | { | 
|  | 2869 | struct sched_domain *child = sd->child; | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 2870 | struct sched_group *sg = sd->groups; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2871 | struct sg_lb_stats sgs; | 
|  | 2872 | int load_idx, prefer_sibling = 0; | 
|  | 2873 |  | 
|  | 2874 | if (child && child->flags & SD_PREFER_SIBLING) | 
|  | 2875 | prefer_sibling = 1; | 
|  | 2876 |  | 
|  | 2877 | init_sd_power_savings_stats(sd, sds, idle); | 
|  | 2878 | load_idx = get_sd_load_idx(sd, idle); | 
|  | 2879 |  | 
|  | 2880 | do { | 
|  | 2881 | int local_group; | 
|  | 2882 |  | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 2883 | local_group = cpumask_test_cpu(this_cpu, sched_group_cpus(sg)); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2884 | memset(&sgs, 0, sizeof(sgs)); | 
| Venkatesh Pallipadi | 46e49b3 | 2011-02-14 14:38:50 -0800 | [diff] [blame] | 2885 | update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx, | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2886 | local_group, cpus, balance, &sgs); | 
|  | 2887 |  | 
| Peter Zijlstra | 8f190fb | 2009-12-24 14:18:21 +0100 | [diff] [blame] | 2888 | if (local_group && !(*balance)) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2889 | return; | 
|  | 2890 |  | 
|  | 2891 | sds->total_load += sgs.group_load; | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 2892 | sds->total_pwr += sg->sgp->power; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2893 |  | 
|  | 2894 | /* | 
|  | 2895 | * In case the child domain prefers tasks go to siblings | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 2896 | * first, lower the sg capacity to one so that we'll try | 
| Nikhil Rao | 75dd321 | 2010-10-15 13:12:30 -0700 | [diff] [blame] | 2897 | * and move all the excess tasks away. We lower the capacity | 
|  | 2898 | * of a group only if the local group has the capacity to fit | 
|  | 2899 | * these excess tasks, i.e. nr_running < group_capacity. The | 
|  | 2900 | * extra check prevents the case where you always pull from the | 
|  | 2901 | * heaviest group when it is already under-utilized (possible | 
|  | 2902 | * with a large weight task outweighs the tasks on the system). | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2903 | */ | 
| Nikhil Rao | 75dd321 | 2010-10-15 13:12:30 -0700 | [diff] [blame] | 2904 | if (prefer_sibling && !local_group && sds->this_has_capacity) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2905 | sgs.group_capacity = min(sgs.group_capacity, 1UL); | 
|  | 2906 |  | 
|  | 2907 | if (local_group) { | 
|  | 2908 | sds->this_load = sgs.avg_load; | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 2909 | sds->this = sg; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2910 | sds->this_nr_running = sgs.sum_nr_running; | 
|  | 2911 | sds->this_load_per_task = sgs.sum_weighted_load; | 
| Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 2912 | sds->this_has_capacity = sgs.group_has_capacity; | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 2913 | sds->this_idle_cpus = sgs.idle_cpus; | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 2914 | } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) { | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2915 | sds->max_load = sgs.avg_load; | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 2916 | sds->busiest = sg; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2917 | sds->busiest_nr_running = sgs.sum_nr_running; | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 2918 | sds->busiest_idle_cpus = sgs.idle_cpus; | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 2919 | sds->busiest_group_capacity = sgs.group_capacity; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2920 | sds->busiest_load_per_task = sgs.sum_weighted_load; | 
| Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 2921 | sds->busiest_has_capacity = sgs.group_has_capacity; | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 2922 | sds->busiest_group_weight = sgs.group_weight; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2923 | sds->group_imb = sgs.group_imb; | 
|  | 2924 | } | 
|  | 2925 |  | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 2926 | update_sd_power_savings_stats(sg, sds, local_group, &sgs); | 
|  | 2927 | sg = sg->next; | 
|  | 2928 | } while (sg != sd->groups); | 
|  | 2929 | } | 
|  | 2930 |  | 
| Michael Neuling | 2ec57d4 | 2010-06-29 12:02:01 +1000 | [diff] [blame] | 2931 | int __weak arch_sd_sibling_asym_packing(void) | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 2932 | { | 
|  | 2933 | return 0*SD_ASYM_PACKING; | 
|  | 2934 | } | 
|  | 2935 |  | 
|  | 2936 | /** | 
|  | 2937 | * check_asym_packing - Check to see if the group is packed into the | 
|  | 2938 | *			sched doman. | 
|  | 2939 | * | 
|  | 2940 | * This is primarily intended to used at the sibling level.  Some | 
|  | 2941 | * cores like POWER7 prefer to use lower numbered SMT threads.  In the | 
|  | 2942 | * case of POWER7, it can move to lower SMT modes only when higher | 
|  | 2943 | * threads are idle.  When in lower SMT modes, the threads will | 
|  | 2944 | * perform better since they share less core resources.  Hence when we | 
|  | 2945 | * have idle threads, we want them to be the higher ones. | 
|  | 2946 | * | 
|  | 2947 | * This packing function is run on idle threads.  It checks to see if | 
|  | 2948 | * the busiest CPU in this domain (core in the P7 case) has a higher | 
|  | 2949 | * CPU number than the packing function is being run on.  Here we are | 
|  | 2950 | * assuming lower CPU number will be equivalent to lower a SMT thread | 
|  | 2951 | * number. | 
|  | 2952 | * | 
| Michael Neuling | b6b1229 | 2010-06-10 12:06:21 +1000 | [diff] [blame] | 2953 | * Returns 1 when packing is required and a task should be moved to | 
|  | 2954 | * this CPU.  The amount of the imbalance is returned in *imbalance. | 
|  | 2955 | * | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 2956 | * @sd: The sched_domain whose packing is to be checked. | 
|  | 2957 | * @sds: Statistics of the sched_domain which is to be packed | 
|  | 2958 | * @this_cpu: The cpu at whose sched_domain we're performing load-balance. | 
|  | 2959 | * @imbalance: returns amount of imbalanced due to packing. | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 2960 | */ | 
|  | 2961 | static int check_asym_packing(struct sched_domain *sd, | 
|  | 2962 | struct sd_lb_stats *sds, | 
|  | 2963 | int this_cpu, unsigned long *imbalance) | 
|  | 2964 | { | 
|  | 2965 | int busiest_cpu; | 
|  | 2966 |  | 
|  | 2967 | if (!(sd->flags & SD_ASYM_PACKING)) | 
|  | 2968 | return 0; | 
|  | 2969 |  | 
|  | 2970 | if (!sds->busiest) | 
|  | 2971 | return 0; | 
|  | 2972 |  | 
|  | 2973 | busiest_cpu = group_first_cpu(sds->busiest); | 
|  | 2974 | if (this_cpu > busiest_cpu) | 
|  | 2975 | return 0; | 
|  | 2976 |  | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 2977 | *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power, | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 2978 | SCHED_POWER_SCALE); | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 2979 | return 1; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2980 | } | 
|  | 2981 |  | 
|  | 2982 | /** | 
|  | 2983 | * fix_small_imbalance - Calculate the minor imbalance that exists | 
|  | 2984 | *			amongst the groups of a sched_domain, during | 
|  | 2985 | *			load balancing. | 
|  | 2986 | * @sds: Statistics of the sched_domain whose imbalance is to be calculated. | 
|  | 2987 | * @this_cpu: The cpu at whose sched_domain we're performing load-balance. | 
|  | 2988 | * @imbalance: Variable to store the imbalance. | 
|  | 2989 | */ | 
|  | 2990 | static inline void fix_small_imbalance(struct sd_lb_stats *sds, | 
|  | 2991 | int this_cpu, unsigned long *imbalance) | 
|  | 2992 | { | 
|  | 2993 | unsigned long tmp, pwr_now = 0, pwr_move = 0; | 
|  | 2994 | unsigned int imbn = 2; | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 2995 | unsigned long scaled_busy_load_per_task; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2996 |  | 
|  | 2997 | if (sds->this_nr_running) { | 
|  | 2998 | sds->this_load_per_task /= sds->this_nr_running; | 
|  | 2999 | if (sds->busiest_load_per_task > | 
|  | 3000 | sds->this_load_per_task) | 
|  | 3001 | imbn = 1; | 
|  | 3002 | } else | 
|  | 3003 | sds->this_load_per_task = | 
|  | 3004 | cpu_avg_load_per_task(this_cpu); | 
|  | 3005 |  | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 3006 | scaled_busy_load_per_task = sds->busiest_load_per_task | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3007 | * SCHED_POWER_SCALE; | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 3008 | scaled_busy_load_per_task /= sds->busiest->sgp->power; | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 3009 |  | 
|  | 3010 | if (sds->max_load - sds->this_load + scaled_busy_load_per_task >= | 
|  | 3011 | (scaled_busy_load_per_task * imbn)) { | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3012 | *imbalance = sds->busiest_load_per_task; | 
|  | 3013 | return; | 
|  | 3014 | } | 
|  | 3015 |  | 
|  | 3016 | /* | 
|  | 3017 | * OK, we don't have enough imbalance to justify moving tasks, | 
|  | 3018 | * however we may be able to increase total CPU power used by | 
|  | 3019 | * moving them. | 
|  | 3020 | */ | 
|  | 3021 |  | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 3022 | pwr_now += sds->busiest->sgp->power * | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3023 | min(sds->busiest_load_per_task, sds->max_load); | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 3024 | pwr_now += sds->this->sgp->power * | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3025 | min(sds->this_load_per_task, sds->this_load); | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3026 | pwr_now /= SCHED_POWER_SCALE; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3027 |  | 
|  | 3028 | /* Amount of load we'd subtract */ | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3029 | tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 3030 | sds->busiest->sgp->power; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3031 | if (sds->max_load > tmp) | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 3032 | pwr_move += sds->busiest->sgp->power * | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3033 | min(sds->busiest_load_per_task, sds->max_load - tmp); | 
|  | 3034 |  | 
|  | 3035 | /* Amount of load we'd add */ | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 3036 | if (sds->max_load * sds->busiest->sgp->power < | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3037 | sds->busiest_load_per_task * SCHED_POWER_SCALE) | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 3038 | tmp = (sds->max_load * sds->busiest->sgp->power) / | 
|  | 3039 | sds->this->sgp->power; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3040 | else | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3041 | tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 3042 | sds->this->sgp->power; | 
|  | 3043 | pwr_move += sds->this->sgp->power * | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3044 | min(sds->this_load_per_task, sds->this_load + tmp); | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3045 | pwr_move /= SCHED_POWER_SCALE; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3046 |  | 
|  | 3047 | /* Move if we gain throughput */ | 
|  | 3048 | if (pwr_move > pwr_now) | 
|  | 3049 | *imbalance = sds->busiest_load_per_task; | 
|  | 3050 | } | 
|  | 3051 |  | 
|  | 3052 | /** | 
|  | 3053 | * calculate_imbalance - Calculate the amount of imbalance present within the | 
|  | 3054 | *			 groups of a given sched_domain during load balance. | 
|  | 3055 | * @sds: statistics of the sched_domain whose imbalance is to be calculated. | 
|  | 3056 | * @this_cpu: Cpu for which currently load balance is being performed. | 
|  | 3057 | * @imbalance: The variable to store the imbalance. | 
|  | 3058 | */ | 
|  | 3059 | static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu, | 
|  | 3060 | unsigned long *imbalance) | 
|  | 3061 | { | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 3062 | unsigned long max_pull, load_above_capacity = ~0UL; | 
|  | 3063 |  | 
|  | 3064 | sds->busiest_load_per_task /= sds->busiest_nr_running; | 
|  | 3065 | if (sds->group_imb) { | 
|  | 3066 | sds->busiest_load_per_task = | 
|  | 3067 | min(sds->busiest_load_per_task, sds->avg_load); | 
|  | 3068 | } | 
|  | 3069 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3070 | /* | 
|  | 3071 | * In the presence of smp nice balancing, certain scenarios can have | 
|  | 3072 | * max load less than avg load(as we skip the groups at or below | 
|  | 3073 | * its cpu_power, while calculating max_load..) | 
|  | 3074 | */ | 
|  | 3075 | if (sds->max_load < sds->avg_load) { | 
|  | 3076 | *imbalance = 0; | 
|  | 3077 | return fix_small_imbalance(sds, this_cpu, imbalance); | 
|  | 3078 | } | 
|  | 3079 |  | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 3080 | if (!sds->group_imb) { | 
|  | 3081 | /* | 
|  | 3082 | * Don't want to pull so many tasks that a group would go idle. | 
|  | 3083 | */ | 
|  | 3084 | load_above_capacity = (sds->busiest_nr_running - | 
|  | 3085 | sds->busiest_group_capacity); | 
|  | 3086 |  | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3087 | load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE); | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 3088 |  | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 3089 | load_above_capacity /= sds->busiest->sgp->power; | 
| Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 3090 | } | 
|  | 3091 |  | 
|  | 3092 | /* | 
|  | 3093 | * We're trying to get all the cpus to the average_load, so we don't | 
|  | 3094 | * want to push ourselves above the average load, nor do we wish to | 
|  | 3095 | * reduce the max loaded cpu below the average load. At the same time, | 
|  | 3096 | * we also don't want to reduce the group load below the group capacity | 
|  | 3097 | * (so that we can implement power-savings policies etc). Thus we look | 
|  | 3098 | * for the minimum possible imbalance. | 
|  | 3099 | * Be careful of negative numbers as they'll appear as very large values | 
|  | 3100 | * with unsigned longs. | 
|  | 3101 | */ | 
|  | 3102 | max_pull = min(sds->max_load - sds->avg_load, load_above_capacity); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3103 |  | 
|  | 3104 | /* How much load to actually move to equalise the imbalance */ | 
| Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 3105 | *imbalance = min(max_pull * sds->busiest->sgp->power, | 
|  | 3106 | (sds->avg_load - sds->this_load) * sds->this->sgp->power) | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3107 | / SCHED_POWER_SCALE; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3108 |  | 
|  | 3109 | /* | 
|  | 3110 | * if *imbalance is less than the average load per runnable task | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 3111 | * there is no guarantee that any tasks will be moved so we'll have | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3112 | * a think about bumping its value to force at least one task to be | 
|  | 3113 | * moved | 
|  | 3114 | */ | 
|  | 3115 | if (*imbalance < sds->busiest_load_per_task) | 
|  | 3116 | return fix_small_imbalance(sds, this_cpu, imbalance); | 
|  | 3117 |  | 
|  | 3118 | } | 
| Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 3119 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3120 | /******* find_busiest_group() helpers end here *********************/ | 
|  | 3121 |  | 
|  | 3122 | /** | 
|  | 3123 | * find_busiest_group - Returns the busiest group within the sched_domain | 
|  | 3124 | * if there is an imbalance. If there isn't an imbalance, and | 
|  | 3125 | * the user has opted for power-savings, it returns a group whose | 
|  | 3126 | * CPUs can be put to idle by rebalancing those tasks elsewhere, if | 
|  | 3127 | * such a group exists. | 
|  | 3128 | * | 
|  | 3129 | * Also calculates the amount of weighted load which should be moved | 
|  | 3130 | * to restore balance. | 
|  | 3131 | * | 
|  | 3132 | * @sd: The sched_domain whose busiest group is to be returned. | 
|  | 3133 | * @this_cpu: The cpu for which load balancing is currently being performed. | 
|  | 3134 | * @imbalance: Variable which stores amount of weighted load which should | 
|  | 3135 | *		be moved to restore balance/put a group to idle. | 
|  | 3136 | * @idle: The idle status of this_cpu. | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3137 | * @cpus: The set of CPUs under consideration for load-balancing. | 
|  | 3138 | * @balance: Pointer to a variable indicating if this_cpu | 
|  | 3139 | *	is the appropriate cpu to perform load balancing at this_level. | 
|  | 3140 | * | 
|  | 3141 | * Returns:	- the busiest group if imbalance exists. | 
|  | 3142 | *		- If no imbalance and user has opted for power-savings balance, | 
|  | 3143 | *		   return the least loaded group whose CPUs can be | 
|  | 3144 | *		   put to idle by rebalancing its tasks onto our group. | 
|  | 3145 | */ | 
|  | 3146 | static struct sched_group * | 
|  | 3147 | find_busiest_group(struct sched_domain *sd, int this_cpu, | 
|  | 3148 | unsigned long *imbalance, enum cpu_idle_type idle, | 
| Venkatesh Pallipadi | 46e49b3 | 2011-02-14 14:38:50 -0800 | [diff] [blame] | 3149 | const struct cpumask *cpus, int *balance) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3150 | { | 
|  | 3151 | struct sd_lb_stats sds; | 
|  | 3152 |  | 
|  | 3153 | memset(&sds, 0, sizeof(sds)); | 
|  | 3154 |  | 
|  | 3155 | /* | 
|  | 3156 | * Compute the various statistics relavent for load balancing at | 
|  | 3157 | * this level. | 
|  | 3158 | */ | 
| Venkatesh Pallipadi | 46e49b3 | 2011-02-14 14:38:50 -0800 | [diff] [blame] | 3159 | update_sd_lb_stats(sd, this_cpu, idle, cpus, balance, &sds); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3160 |  | 
| Peter Zijlstra | cc57aa8 | 2011-02-21 18:55:32 +0100 | [diff] [blame] | 3161 | /* | 
|  | 3162 | * this_cpu is not the appropriate cpu to perform load balancing at | 
|  | 3163 | * this level. | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3164 | */ | 
| Peter Zijlstra | 8f190fb | 2009-12-24 14:18:21 +0100 | [diff] [blame] | 3165 | if (!(*balance)) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3166 | goto ret; | 
|  | 3167 |  | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3168 | if ((idle == CPU_IDLE || idle == CPU_NEWLY_IDLE) && | 
|  | 3169 | check_asym_packing(sd, &sds, this_cpu, imbalance)) | 
|  | 3170 | return sds.busiest; | 
|  | 3171 |  | 
| Peter Zijlstra | cc57aa8 | 2011-02-21 18:55:32 +0100 | [diff] [blame] | 3172 | /* There is no busy sibling group to pull tasks from */ | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3173 | if (!sds.busiest || sds.busiest_nr_running == 0) | 
|  | 3174 | goto out_balanced; | 
|  | 3175 |  | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3176 | sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr; | 
| Ken Chen | b0432d8 | 2011-04-07 17:23:22 -0700 | [diff] [blame] | 3177 |  | 
| Peter Zijlstra | 866ab43 | 2011-02-21 18:56:47 +0100 | [diff] [blame] | 3178 | /* | 
|  | 3179 | * If the busiest group is imbalanced the below checks don't | 
|  | 3180 | * work because they assumes all things are equal, which typically | 
|  | 3181 | * isn't true due to cpus_allowed constraints and the like. | 
|  | 3182 | */ | 
|  | 3183 | if (sds.group_imb) | 
|  | 3184 | goto force_balance; | 
|  | 3185 |  | 
| Peter Zijlstra | cc57aa8 | 2011-02-21 18:55:32 +0100 | [diff] [blame] | 3186 | /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */ | 
| Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 3187 | if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity && | 
|  | 3188 | !sds.busiest_has_capacity) | 
|  | 3189 | goto force_balance; | 
|  | 3190 |  | 
| Peter Zijlstra | cc57aa8 | 2011-02-21 18:55:32 +0100 | [diff] [blame] | 3191 | /* | 
|  | 3192 | * If the local group is more busy than the selected busiest group | 
|  | 3193 | * don't try and pull any tasks. | 
|  | 3194 | */ | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3195 | if (sds.this_load >= sds.max_load) | 
|  | 3196 | goto out_balanced; | 
|  | 3197 |  | 
| Peter Zijlstra | cc57aa8 | 2011-02-21 18:55:32 +0100 | [diff] [blame] | 3198 | /* | 
|  | 3199 | * Don't pull any tasks if this group is already above the domain | 
|  | 3200 | * average load. | 
|  | 3201 | */ | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3202 | if (sds.this_load >= sds.avg_load) | 
|  | 3203 | goto out_balanced; | 
|  | 3204 |  | 
| Peter Zijlstra | c186faf | 2011-02-21 18:52:53 +0100 | [diff] [blame] | 3205 | if (idle == CPU_IDLE) { | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 3206 | /* | 
|  | 3207 | * This cpu is idle. If the busiest group load doesn't | 
|  | 3208 | * have more tasks than the number of available cpu's and | 
|  | 3209 | * there is no imbalance between this and busiest group | 
|  | 3210 | * wrt to idle cpu's, it is balanced. | 
|  | 3211 | */ | 
| Peter Zijlstra | c186faf | 2011-02-21 18:52:53 +0100 | [diff] [blame] | 3212 | if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) && | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 3213 | sds.busiest_nr_running <= sds.busiest_group_weight) | 
|  | 3214 | goto out_balanced; | 
| Peter Zijlstra | c186faf | 2011-02-21 18:52:53 +0100 | [diff] [blame] | 3215 | } else { | 
|  | 3216 | /* | 
|  | 3217 | * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use | 
|  | 3218 | * imbalance_pct to be conservative. | 
|  | 3219 | */ | 
|  | 3220 | if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load) | 
|  | 3221 | goto out_balanced; | 
| Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 3222 | } | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3223 |  | 
| Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 3224 | force_balance: | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3225 | /* Looks like there is an imbalance. Compute it */ | 
|  | 3226 | calculate_imbalance(&sds, this_cpu, imbalance); | 
|  | 3227 | return sds.busiest; | 
|  | 3228 |  | 
|  | 3229 | out_balanced: | 
|  | 3230 | /* | 
|  | 3231 | * There is no obvious imbalance. But check if we can do some balancing | 
|  | 3232 | * to save power. | 
|  | 3233 | */ | 
|  | 3234 | if (check_power_save_busiest_group(&sds, this_cpu, imbalance)) | 
|  | 3235 | return sds.busiest; | 
|  | 3236 | ret: | 
|  | 3237 | *imbalance = 0; | 
|  | 3238 | return NULL; | 
|  | 3239 | } | 
|  | 3240 |  | 
|  | 3241 | /* | 
|  | 3242 | * find_busiest_queue - find the busiest runqueue among the cpus in group. | 
|  | 3243 | */ | 
|  | 3244 | static struct rq * | 
| Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3245 | find_busiest_queue(struct sched_domain *sd, struct sched_group *group, | 
|  | 3246 | enum cpu_idle_type idle, unsigned long imbalance, | 
|  | 3247 | const struct cpumask *cpus) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3248 | { | 
|  | 3249 | struct rq *busiest = NULL, *rq; | 
|  | 3250 | unsigned long max_load = 0; | 
|  | 3251 | int i; | 
|  | 3252 |  | 
|  | 3253 | for_each_cpu(i, sched_group_cpus(group)) { | 
|  | 3254 | unsigned long power = power_of(i); | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3255 | unsigned long capacity = DIV_ROUND_CLOSEST(power, | 
|  | 3256 | SCHED_POWER_SCALE); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3257 | unsigned long wl; | 
|  | 3258 |  | 
| Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3259 | if (!capacity) | 
|  | 3260 | capacity = fix_small_capacity(sd, group); | 
|  | 3261 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3262 | if (!cpumask_test_cpu(i, cpus)) | 
|  | 3263 | continue; | 
|  | 3264 |  | 
|  | 3265 | rq = cpu_rq(i); | 
| Thomas Gleixner | 6e40f5b | 2010-02-16 16:48:56 +0100 | [diff] [blame] | 3266 | wl = weighted_cpuload(i); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3267 |  | 
| Thomas Gleixner | 6e40f5b | 2010-02-16 16:48:56 +0100 | [diff] [blame] | 3268 | /* | 
|  | 3269 | * When comparing with imbalance, use weighted_cpuload() | 
|  | 3270 | * which is not scaled with the cpu power. | 
|  | 3271 | */ | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3272 | if (capacity && rq->nr_running == 1 && wl > imbalance) | 
|  | 3273 | continue; | 
|  | 3274 |  | 
| Thomas Gleixner | 6e40f5b | 2010-02-16 16:48:56 +0100 | [diff] [blame] | 3275 | /* | 
|  | 3276 | * For the load comparisons with the other cpu's, consider | 
|  | 3277 | * the weighted_cpuload() scaled with the cpu power, so that | 
|  | 3278 | * the load can be moved away from the cpu that is potentially | 
|  | 3279 | * running at a lower capacity. | 
|  | 3280 | */ | 
| Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3281 | wl = (wl * SCHED_POWER_SCALE) / power; | 
| Thomas Gleixner | 6e40f5b | 2010-02-16 16:48:56 +0100 | [diff] [blame] | 3282 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3283 | if (wl > max_load) { | 
|  | 3284 | max_load = wl; | 
|  | 3285 | busiest = rq; | 
|  | 3286 | } | 
|  | 3287 | } | 
|  | 3288 |  | 
|  | 3289 | return busiest; | 
|  | 3290 | } | 
|  | 3291 |  | 
|  | 3292 | /* | 
|  | 3293 | * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but | 
|  | 3294 | * so long as it is large enough. | 
|  | 3295 | */ | 
|  | 3296 | #define MAX_PINNED_INTERVAL	512 | 
|  | 3297 |  | 
|  | 3298 | /* Working cpumask for load_balance and load_balance_newidle. */ | 
|  | 3299 | static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask); | 
|  | 3300 |  | 
| Venkatesh Pallipadi | 46e49b3 | 2011-02-14 14:38:50 -0800 | [diff] [blame] | 3301 | static int need_active_balance(struct sched_domain *sd, int idle, | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3302 | int busiest_cpu, int this_cpu) | 
| Peter Zijlstra | 1af3ed3 | 2009-12-23 15:10:31 +0100 | [diff] [blame] | 3303 | { | 
|  | 3304 | if (idle == CPU_NEWLY_IDLE) { | 
| Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3305 |  | 
|  | 3306 | /* | 
|  | 3307 | * ASYM_PACKING needs to force migrate tasks from busy but | 
|  | 3308 | * higher numbered CPUs in order to pack all tasks in the | 
|  | 3309 | * lowest numbered CPUs. | 
|  | 3310 | */ | 
|  | 3311 | if ((sd->flags & SD_ASYM_PACKING) && busiest_cpu > this_cpu) | 
|  | 3312 | return 1; | 
|  | 3313 |  | 
| Peter Zijlstra | 1af3ed3 | 2009-12-23 15:10:31 +0100 | [diff] [blame] | 3314 | /* | 
|  | 3315 | * The only task running in a non-idle cpu can be moved to this | 
|  | 3316 | * cpu in an attempt to completely freeup the other CPU | 
|  | 3317 | * package. | 
|  | 3318 | * | 
|  | 3319 | * The package power saving logic comes from | 
|  | 3320 | * find_busiest_group(). If there are no imbalance, then | 
|  | 3321 | * f_b_g() will return NULL. However when sched_mc={1,2} then | 
|  | 3322 | * f_b_g() will select a group from which a running task may be | 
|  | 3323 | * pulled to this cpu in order to make the other package idle. | 
|  | 3324 | * If there is no opportunity to make a package idle and if | 
|  | 3325 | * there are no imbalance, then f_b_g() will return NULL and no | 
|  | 3326 | * action will be taken in load_balance_newidle(). | 
|  | 3327 | * | 
|  | 3328 | * Under normal task pull operation due to imbalance, there | 
|  | 3329 | * will be more than one task in the source run queue and | 
|  | 3330 | * move_tasks() will succeed.  ld_moved will be true and this | 
|  | 3331 | * active balance code will not be triggered. | 
|  | 3332 | */ | 
| Peter Zijlstra | 1af3ed3 | 2009-12-23 15:10:31 +0100 | [diff] [blame] | 3333 | if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP) | 
|  | 3334 | return 0; | 
|  | 3335 | } | 
|  | 3336 |  | 
|  | 3337 | return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2); | 
|  | 3338 | } | 
|  | 3339 |  | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 3340 | static int active_load_balance_cpu_stop(void *data); | 
|  | 3341 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3342 | /* | 
|  | 3343 | * Check this_cpu to ensure it is balanced within domain. Attempt to move | 
|  | 3344 | * tasks if there is an imbalance. | 
|  | 3345 | */ | 
|  | 3346 | static int load_balance(int this_cpu, struct rq *this_rq, | 
|  | 3347 | struct sched_domain *sd, enum cpu_idle_type idle, | 
|  | 3348 | int *balance) | 
|  | 3349 | { | 
| Venkatesh Pallipadi | 46e49b3 | 2011-02-14 14:38:50 -0800 | [diff] [blame] | 3350 | int ld_moved, all_pinned = 0, active_balance = 0; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3351 | struct sched_group *group; | 
|  | 3352 | unsigned long imbalance; | 
|  | 3353 | struct rq *busiest; | 
|  | 3354 | unsigned long flags; | 
|  | 3355 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); | 
|  | 3356 |  | 
|  | 3357 | cpumask_copy(cpus, cpu_active_mask); | 
|  | 3358 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3359 | schedstat_inc(sd, lb_count[idle]); | 
|  | 3360 |  | 
|  | 3361 | redo: | 
| Venkatesh Pallipadi | 46e49b3 | 2011-02-14 14:38:50 -0800 | [diff] [blame] | 3362 | group = find_busiest_group(sd, this_cpu, &imbalance, idle, | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3363 | cpus, balance); | 
|  | 3364 |  | 
|  | 3365 | if (*balance == 0) | 
|  | 3366 | goto out_balanced; | 
|  | 3367 |  | 
|  | 3368 | if (!group) { | 
|  | 3369 | schedstat_inc(sd, lb_nobusyg[idle]); | 
|  | 3370 | goto out_balanced; | 
|  | 3371 | } | 
|  | 3372 |  | 
| Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3373 | busiest = find_busiest_queue(sd, group, idle, imbalance, cpus); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3374 | if (!busiest) { | 
|  | 3375 | schedstat_inc(sd, lb_nobusyq[idle]); | 
|  | 3376 | goto out_balanced; | 
|  | 3377 | } | 
|  | 3378 |  | 
|  | 3379 | BUG_ON(busiest == this_rq); | 
|  | 3380 |  | 
|  | 3381 | schedstat_add(sd, lb_imbalance[idle], imbalance); | 
|  | 3382 |  | 
|  | 3383 | ld_moved = 0; | 
|  | 3384 | if (busiest->nr_running > 1) { | 
|  | 3385 | /* | 
|  | 3386 | * Attempt to move tasks. If find_busiest_group has found | 
|  | 3387 | * an imbalance but busiest->nr_running <= 1, the group is | 
|  | 3388 | * still unbalanced. ld_moved simply stays zero, so it is | 
|  | 3389 | * correctly treated as an imbalance. | 
|  | 3390 | */ | 
| Ken Chen | b30aef1 | 2011-04-08 12:20:16 -0700 | [diff] [blame] | 3391 | all_pinned = 1; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3392 | local_irq_save(flags); | 
|  | 3393 | double_rq_lock(this_rq, busiest); | 
|  | 3394 | ld_moved = move_tasks(this_rq, this_cpu, busiest, | 
|  | 3395 | imbalance, sd, idle, &all_pinned); | 
|  | 3396 | double_rq_unlock(this_rq, busiest); | 
|  | 3397 | local_irq_restore(flags); | 
|  | 3398 |  | 
|  | 3399 | /* | 
|  | 3400 | * some other cpu did the load balance for us. | 
|  | 3401 | */ | 
|  | 3402 | if (ld_moved && this_cpu != smp_processor_id()) | 
|  | 3403 | resched_cpu(this_cpu); | 
|  | 3404 |  | 
|  | 3405 | /* All tasks on this runqueue were pinned by CPU affinity */ | 
|  | 3406 | if (unlikely(all_pinned)) { | 
|  | 3407 | cpumask_clear_cpu(cpu_of(busiest), cpus); | 
|  | 3408 | if (!cpumask_empty(cpus)) | 
|  | 3409 | goto redo; | 
|  | 3410 | goto out_balanced; | 
|  | 3411 | } | 
|  | 3412 | } | 
|  | 3413 |  | 
|  | 3414 | if (!ld_moved) { | 
|  | 3415 | schedstat_inc(sd, lb_failed[idle]); | 
| Venkatesh Pallipadi | 58b26c4 | 2010-09-10 18:19:17 -0700 | [diff] [blame] | 3416 | /* | 
|  | 3417 | * Increment the failure counter only on periodic balance. | 
|  | 3418 | * We do not want newidle balance, which can be very | 
|  | 3419 | * frequent, pollute the failure counter causing | 
|  | 3420 | * excessive cache_hot migrations and active balances. | 
|  | 3421 | */ | 
|  | 3422 | if (idle != CPU_NEWLY_IDLE) | 
|  | 3423 | sd->nr_balance_failed++; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3424 |  | 
| Venkatesh Pallipadi | 46e49b3 | 2011-02-14 14:38:50 -0800 | [diff] [blame] | 3425 | if (need_active_balance(sd, idle, cpu_of(busiest), this_cpu)) { | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3426 | raw_spin_lock_irqsave(&busiest->lock, flags); | 
|  | 3427 |  | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 3428 | /* don't kick the active_load_balance_cpu_stop, | 
|  | 3429 | * if the curr task on busiest cpu can't be | 
|  | 3430 | * moved to this_cpu | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3431 | */ | 
|  | 3432 | if (!cpumask_test_cpu(this_cpu, | 
|  | 3433 | &busiest->curr->cpus_allowed)) { | 
|  | 3434 | raw_spin_unlock_irqrestore(&busiest->lock, | 
|  | 3435 | flags); | 
|  | 3436 | all_pinned = 1; | 
|  | 3437 | goto out_one_pinned; | 
|  | 3438 | } | 
|  | 3439 |  | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 3440 | /* | 
|  | 3441 | * ->active_balance synchronizes accesses to | 
|  | 3442 | * ->active_balance_work.  Once set, it's cleared | 
|  | 3443 | * only after active load balance is finished. | 
|  | 3444 | */ | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3445 | if (!busiest->active_balance) { | 
|  | 3446 | busiest->active_balance = 1; | 
|  | 3447 | busiest->push_cpu = this_cpu; | 
|  | 3448 | active_balance = 1; | 
|  | 3449 | } | 
|  | 3450 | raw_spin_unlock_irqrestore(&busiest->lock, flags); | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 3451 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3452 | if (active_balance) | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 3453 | stop_one_cpu_nowait(cpu_of(busiest), | 
|  | 3454 | active_load_balance_cpu_stop, busiest, | 
|  | 3455 | &busiest->active_balance_work); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3456 |  | 
|  | 3457 | /* | 
|  | 3458 | * We've kicked active balancing, reset the failure | 
|  | 3459 | * counter. | 
|  | 3460 | */ | 
|  | 3461 | sd->nr_balance_failed = sd->cache_nice_tries+1; | 
|  | 3462 | } | 
|  | 3463 | } else | 
|  | 3464 | sd->nr_balance_failed = 0; | 
|  | 3465 |  | 
|  | 3466 | if (likely(!active_balance)) { | 
|  | 3467 | /* We were unbalanced, so reset the balancing interval */ | 
|  | 3468 | sd->balance_interval = sd->min_interval; | 
|  | 3469 | } else { | 
|  | 3470 | /* | 
|  | 3471 | * If we've begun active balancing, start to back off. This | 
|  | 3472 | * case may not be covered by the all_pinned logic if there | 
|  | 3473 | * is only 1 task on the busy runqueue (because we don't call | 
|  | 3474 | * move_tasks). | 
|  | 3475 | */ | 
|  | 3476 | if (sd->balance_interval < sd->max_interval) | 
|  | 3477 | sd->balance_interval *= 2; | 
|  | 3478 | } | 
|  | 3479 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3480 | goto out; | 
|  | 3481 |  | 
|  | 3482 | out_balanced: | 
|  | 3483 | schedstat_inc(sd, lb_balanced[idle]); | 
|  | 3484 |  | 
|  | 3485 | sd->nr_balance_failed = 0; | 
|  | 3486 |  | 
|  | 3487 | out_one_pinned: | 
|  | 3488 | /* tune up the balancing interval */ | 
|  | 3489 | if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) || | 
|  | 3490 | (sd->balance_interval < sd->max_interval)) | 
|  | 3491 | sd->balance_interval *= 2; | 
|  | 3492 |  | 
| Venkatesh Pallipadi | 46e49b3 | 2011-02-14 14:38:50 -0800 | [diff] [blame] | 3493 | ld_moved = 0; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3494 | out: | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3495 | return ld_moved; | 
|  | 3496 | } | 
|  | 3497 |  | 
|  | 3498 | /* | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3499 | * idle_balance is called by schedule() if this_cpu is about to become | 
|  | 3500 | * idle. Attempts to pull tasks from other CPUs. | 
|  | 3501 | */ | 
|  | 3502 | static void idle_balance(int this_cpu, struct rq *this_rq) | 
|  | 3503 | { | 
|  | 3504 | struct sched_domain *sd; | 
|  | 3505 | int pulled_task = 0; | 
|  | 3506 | unsigned long next_balance = jiffies + HZ; | 
|  | 3507 |  | 
|  | 3508 | this_rq->idle_stamp = this_rq->clock; | 
|  | 3509 |  | 
|  | 3510 | if (this_rq->avg_idle < sysctl_sched_migration_cost) | 
|  | 3511 | return; | 
|  | 3512 |  | 
| Peter Zijlstra | f492e12 | 2009-12-23 15:29:42 +0100 | [diff] [blame] | 3513 | /* | 
|  | 3514 | * Drop the rq->lock, but keep IRQ/preempt disabled. | 
|  | 3515 | */ | 
|  | 3516 | raw_spin_unlock(&this_rq->lock); | 
|  | 3517 |  | 
| Paul Turner | c66eaf6 | 2010-11-15 15:47:07 -0800 | [diff] [blame] | 3518 | update_shares(this_cpu); | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 3519 | rcu_read_lock(); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3520 | for_each_domain(this_cpu, sd) { | 
|  | 3521 | unsigned long interval; | 
| Peter Zijlstra | f492e12 | 2009-12-23 15:29:42 +0100 | [diff] [blame] | 3522 | int balance = 1; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3523 |  | 
|  | 3524 | if (!(sd->flags & SD_LOAD_BALANCE)) | 
|  | 3525 | continue; | 
|  | 3526 |  | 
| Peter Zijlstra | f492e12 | 2009-12-23 15:29:42 +0100 | [diff] [blame] | 3527 | if (sd->flags & SD_BALANCE_NEWIDLE) { | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3528 | /* If we've pulled tasks over stop searching: */ | 
| Peter Zijlstra | f492e12 | 2009-12-23 15:29:42 +0100 | [diff] [blame] | 3529 | pulled_task = load_balance(this_cpu, this_rq, | 
|  | 3530 | sd, CPU_NEWLY_IDLE, &balance); | 
|  | 3531 | } | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3532 |  | 
|  | 3533 | interval = msecs_to_jiffies(sd->balance_interval); | 
|  | 3534 | if (time_after(next_balance, sd->last_balance + interval)) | 
|  | 3535 | next_balance = sd->last_balance + interval; | 
| Nikhil Rao | d5ad140 | 2010-11-17 11:42:04 -0800 | [diff] [blame] | 3536 | if (pulled_task) { | 
|  | 3537 | this_rq->idle_stamp = 0; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3538 | break; | 
| Nikhil Rao | d5ad140 | 2010-11-17 11:42:04 -0800 | [diff] [blame] | 3539 | } | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3540 | } | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 3541 | rcu_read_unlock(); | 
| Peter Zijlstra | f492e12 | 2009-12-23 15:29:42 +0100 | [diff] [blame] | 3542 |  | 
|  | 3543 | raw_spin_lock(&this_rq->lock); | 
|  | 3544 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3545 | if (pulled_task || time_after(jiffies, this_rq->next_balance)) { | 
|  | 3546 | /* | 
|  | 3547 | * We are going idle. next_balance may be set based on | 
|  | 3548 | * a busy processor. So reset next_balance. | 
|  | 3549 | */ | 
|  | 3550 | this_rq->next_balance = next_balance; | 
|  | 3551 | } | 
|  | 3552 | } | 
|  | 3553 |  | 
|  | 3554 | /* | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 3555 | * active_load_balance_cpu_stop is run by cpu stopper. It pushes | 
|  | 3556 | * running tasks off the busiest CPU onto idle CPUs. It requires at | 
|  | 3557 | * least 1 task to be running on each physical CPU where possible, and | 
|  | 3558 | * avoids physical / logical imbalances. | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3559 | */ | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 3560 | static int active_load_balance_cpu_stop(void *data) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3561 | { | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 3562 | struct rq *busiest_rq = data; | 
|  | 3563 | int busiest_cpu = cpu_of(busiest_rq); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3564 | int target_cpu = busiest_rq->push_cpu; | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 3565 | struct rq *target_rq = cpu_rq(target_cpu); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3566 | struct sched_domain *sd; | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 3567 |  | 
|  | 3568 | raw_spin_lock_irq(&busiest_rq->lock); | 
|  | 3569 |  | 
|  | 3570 | /* make sure the requested cpu hasn't gone down in the meantime */ | 
|  | 3571 | if (unlikely(busiest_cpu != smp_processor_id() || | 
|  | 3572 | !busiest_rq->active_balance)) | 
|  | 3573 | goto out_unlock; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3574 |  | 
|  | 3575 | /* Is there any task to move? */ | 
|  | 3576 | if (busiest_rq->nr_running <= 1) | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 3577 | goto out_unlock; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3578 |  | 
|  | 3579 | /* | 
|  | 3580 | * This condition is "impossible", if it occurs | 
|  | 3581 | * we need to fix it. Originally reported by | 
|  | 3582 | * Bjorn Helgaas on a 128-cpu setup. | 
|  | 3583 | */ | 
|  | 3584 | BUG_ON(busiest_rq == target_rq); | 
|  | 3585 |  | 
|  | 3586 | /* move a task from busiest_rq to target_rq */ | 
|  | 3587 | double_lock_balance(busiest_rq, target_rq); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3588 |  | 
|  | 3589 | /* Search for an sd spanning us and the target CPU. */ | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 3590 | rcu_read_lock(); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3591 | for_each_domain(target_cpu, sd) { | 
|  | 3592 | if ((sd->flags & SD_LOAD_BALANCE) && | 
|  | 3593 | cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) | 
|  | 3594 | break; | 
|  | 3595 | } | 
|  | 3596 |  | 
|  | 3597 | if (likely(sd)) { | 
|  | 3598 | schedstat_inc(sd, alb_count); | 
|  | 3599 |  | 
|  | 3600 | if (move_one_task(target_rq, target_cpu, busiest_rq, | 
|  | 3601 | sd, CPU_IDLE)) | 
|  | 3602 | schedstat_inc(sd, alb_pushed); | 
|  | 3603 | else | 
|  | 3604 | schedstat_inc(sd, alb_failed); | 
|  | 3605 | } | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 3606 | rcu_read_unlock(); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3607 | double_unlock_balance(busiest_rq, target_rq); | 
| Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 3608 | out_unlock: | 
|  | 3609 | busiest_rq->active_balance = 0; | 
|  | 3610 | raw_spin_unlock_irq(&busiest_rq->lock); | 
|  | 3611 | return 0; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3612 | } | 
|  | 3613 |  | 
|  | 3614 | #ifdef CONFIG_NO_HZ | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3615 |  | 
|  | 3616 | static DEFINE_PER_CPU(struct call_single_data, remote_sched_softirq_cb); | 
|  | 3617 |  | 
|  | 3618 | static void trigger_sched_softirq(void *data) | 
|  | 3619 | { | 
|  | 3620 | raise_softirq_irqoff(SCHED_SOFTIRQ); | 
|  | 3621 | } | 
|  | 3622 |  | 
|  | 3623 | static inline void init_sched_softirq_csd(struct call_single_data *csd) | 
|  | 3624 | { | 
|  | 3625 | csd->func = trigger_sched_softirq; | 
|  | 3626 | csd->info = NULL; | 
|  | 3627 | csd->flags = 0; | 
|  | 3628 | csd->priv = 0; | 
|  | 3629 | } | 
|  | 3630 |  | 
|  | 3631 | /* | 
|  | 3632 | * idle load balancing details | 
|  | 3633 | * - One of the idle CPUs nominates itself as idle load_balancer, while | 
|  | 3634 | *   entering idle. | 
|  | 3635 | * - This idle load balancer CPU will also go into tickless mode when | 
|  | 3636 | *   it is idle, just like all other idle CPUs | 
|  | 3637 | * - When one of the busy CPUs notice that there may be an idle rebalancing | 
|  | 3638 | *   needed, they will kick the idle load balancer, which then does idle | 
|  | 3639 | *   load balancing for all the idle CPUs. | 
|  | 3640 | */ | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3641 | static struct { | 
|  | 3642 | atomic_t load_balancer; | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3643 | atomic_t first_pick_cpu; | 
|  | 3644 | atomic_t second_pick_cpu; | 
|  | 3645 | cpumask_var_t idle_cpus_mask; | 
|  | 3646 | cpumask_var_t grp_idle_mask; | 
|  | 3647 | unsigned long next_balance;     /* in jiffy units */ | 
|  | 3648 | } nohz ____cacheline_aligned; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3649 |  | 
|  | 3650 | int get_nohz_load_balancer(void) | 
|  | 3651 | { | 
|  | 3652 | return atomic_read(&nohz.load_balancer); | 
|  | 3653 | } | 
|  | 3654 |  | 
|  | 3655 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 
|  | 3656 | /** | 
|  | 3657 | * lowest_flag_domain - Return lowest sched_domain containing flag. | 
|  | 3658 | * @cpu:	The cpu whose lowest level of sched domain is to | 
|  | 3659 | *		be returned. | 
|  | 3660 | * @flag:	The flag to check for the lowest sched_domain | 
|  | 3661 | *		for the given cpu. | 
|  | 3662 | * | 
|  | 3663 | * Returns the lowest sched_domain of a cpu which contains the given flag. | 
|  | 3664 | */ | 
|  | 3665 | static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) | 
|  | 3666 | { | 
|  | 3667 | struct sched_domain *sd; | 
|  | 3668 |  | 
|  | 3669 | for_each_domain(cpu, sd) | 
|  | 3670 | if (sd && (sd->flags & flag)) | 
|  | 3671 | break; | 
|  | 3672 |  | 
|  | 3673 | return sd; | 
|  | 3674 | } | 
|  | 3675 |  | 
|  | 3676 | /** | 
|  | 3677 | * for_each_flag_domain - Iterates over sched_domains containing the flag. | 
|  | 3678 | * @cpu:	The cpu whose domains we're iterating over. | 
|  | 3679 | * @sd:		variable holding the value of the power_savings_sd | 
|  | 3680 | *		for cpu. | 
|  | 3681 | * @flag:	The flag to filter the sched_domains to be iterated. | 
|  | 3682 | * | 
|  | 3683 | * Iterates over all the scheduler domains for a given cpu that has the 'flag' | 
|  | 3684 | * set, starting from the lowest sched_domain to the highest. | 
|  | 3685 | */ | 
|  | 3686 | #define for_each_flag_domain(cpu, sd, flag) \ | 
|  | 3687 | for (sd = lowest_flag_domain(cpu, flag); \ | 
|  | 3688 | (sd && (sd->flags & flag)); sd = sd->parent) | 
|  | 3689 |  | 
|  | 3690 | /** | 
|  | 3691 | * is_semi_idle_group - Checks if the given sched_group is semi-idle. | 
|  | 3692 | * @ilb_group:	group to be checked for semi-idleness | 
|  | 3693 | * | 
|  | 3694 | * Returns:	1 if the group is semi-idle. 0 otherwise. | 
|  | 3695 | * | 
|  | 3696 | * We define a sched_group to be semi idle if it has atleast one idle-CPU | 
|  | 3697 | * and atleast one non-idle CPU. This helper function checks if the given | 
|  | 3698 | * sched_group is semi-idle or not. | 
|  | 3699 | */ | 
|  | 3700 | static inline int is_semi_idle_group(struct sched_group *ilb_group) | 
|  | 3701 | { | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3702 | cpumask_and(nohz.grp_idle_mask, nohz.idle_cpus_mask, | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3703 | sched_group_cpus(ilb_group)); | 
|  | 3704 |  | 
|  | 3705 | /* | 
|  | 3706 | * A sched_group is semi-idle when it has atleast one busy cpu | 
|  | 3707 | * and atleast one idle cpu. | 
|  | 3708 | */ | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3709 | if (cpumask_empty(nohz.grp_idle_mask)) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3710 | return 0; | 
|  | 3711 |  | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3712 | if (cpumask_equal(nohz.grp_idle_mask, sched_group_cpus(ilb_group))) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3713 | return 0; | 
|  | 3714 |  | 
|  | 3715 | return 1; | 
|  | 3716 | } | 
|  | 3717 | /** | 
|  | 3718 | * find_new_ilb - Finds the optimum idle load balancer for nomination. | 
|  | 3719 | * @cpu:	The cpu which is nominating a new idle_load_balancer. | 
|  | 3720 | * | 
|  | 3721 | * Returns:	Returns the id of the idle load balancer if it exists, | 
|  | 3722 | *		Else, returns >= nr_cpu_ids. | 
|  | 3723 | * | 
|  | 3724 | * This algorithm picks the idle load balancer such that it belongs to a | 
|  | 3725 | * semi-idle powersavings sched_domain. The idea is to try and avoid | 
|  | 3726 | * completely idle packages/cores just for the purpose of idle load balancing | 
|  | 3727 | * when there are other idle cpu's which are better suited for that job. | 
|  | 3728 | */ | 
|  | 3729 | static int find_new_ilb(int cpu) | 
|  | 3730 | { | 
|  | 3731 | struct sched_domain *sd; | 
|  | 3732 | struct sched_group *ilb_group; | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 3733 | int ilb = nr_cpu_ids; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3734 |  | 
|  | 3735 | /* | 
|  | 3736 | * Have idle load balancer selection from semi-idle packages only | 
|  | 3737 | * when power-aware load balancing is enabled | 
|  | 3738 | */ | 
|  | 3739 | if (!(sched_smt_power_savings || sched_mc_power_savings)) | 
|  | 3740 | goto out_done; | 
|  | 3741 |  | 
|  | 3742 | /* | 
|  | 3743 | * Optimize for the case when we have no idle CPUs or only one | 
|  | 3744 | * idle CPU. Don't walk the sched_domain hierarchy in such cases | 
|  | 3745 | */ | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3746 | if (cpumask_weight(nohz.idle_cpus_mask) < 2) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3747 | goto out_done; | 
|  | 3748 |  | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 3749 | rcu_read_lock(); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3750 | for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) { | 
|  | 3751 | ilb_group = sd->groups; | 
|  | 3752 |  | 
|  | 3753 | do { | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 3754 | if (is_semi_idle_group(ilb_group)) { | 
|  | 3755 | ilb = cpumask_first(nohz.grp_idle_mask); | 
|  | 3756 | goto unlock; | 
|  | 3757 | } | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3758 |  | 
|  | 3759 | ilb_group = ilb_group->next; | 
|  | 3760 |  | 
|  | 3761 | } while (ilb_group != sd->groups); | 
|  | 3762 | } | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 3763 | unlock: | 
|  | 3764 | rcu_read_unlock(); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3765 |  | 
|  | 3766 | out_done: | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 3767 | return ilb; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3768 | } | 
|  | 3769 | #else /*  (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */ | 
|  | 3770 | static inline int find_new_ilb(int call_cpu) | 
|  | 3771 | { | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3772 | return nr_cpu_ids; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3773 | } | 
|  | 3774 | #endif | 
|  | 3775 |  | 
|  | 3776 | /* | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3777 | * Kick a CPU to do the nohz balancing, if it is time for it. We pick the | 
|  | 3778 | * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle | 
|  | 3779 | * CPU (if there is one). | 
|  | 3780 | */ | 
|  | 3781 | static void nohz_balancer_kick(int cpu) | 
|  | 3782 | { | 
|  | 3783 | int ilb_cpu; | 
|  | 3784 |  | 
|  | 3785 | nohz.next_balance++; | 
|  | 3786 |  | 
|  | 3787 | ilb_cpu = get_nohz_load_balancer(); | 
|  | 3788 |  | 
|  | 3789 | if (ilb_cpu >= nr_cpu_ids) { | 
|  | 3790 | ilb_cpu = cpumask_first(nohz.idle_cpus_mask); | 
|  | 3791 | if (ilb_cpu >= nr_cpu_ids) | 
|  | 3792 | return; | 
|  | 3793 | } | 
|  | 3794 |  | 
|  | 3795 | if (!cpu_rq(ilb_cpu)->nohz_balance_kick) { | 
|  | 3796 | struct call_single_data *cp; | 
|  | 3797 |  | 
|  | 3798 | cpu_rq(ilb_cpu)->nohz_balance_kick = 1; | 
|  | 3799 | cp = &per_cpu(remote_sched_softirq_cb, cpu); | 
|  | 3800 | __smp_call_function_single(ilb_cpu, cp, 0); | 
|  | 3801 | } | 
|  | 3802 | return; | 
|  | 3803 | } | 
|  | 3804 |  | 
|  | 3805 | /* | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3806 | * This routine will try to nominate the ilb (idle load balancing) | 
|  | 3807 | * owner among the cpus whose ticks are stopped. ilb owner will do the idle | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3808 | * load balancing on behalf of all those cpus. | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3809 | * | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3810 | * When the ilb owner becomes busy, we will not have new ilb owner until some | 
|  | 3811 | * idle CPU wakes up and goes back to idle or some busy CPU tries to kick | 
|  | 3812 | * idle load balancing by kicking one of the idle CPUs. | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3813 | * | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3814 | * Ticks are stopped for the ilb owner as well, with busy CPU kicking this | 
|  | 3815 | * ilb owner CPU in future (when there is a need for idle load balancing on | 
|  | 3816 | * behalf of all idle CPUs). | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3817 | */ | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3818 | void select_nohz_load_balancer(int stop_tick) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3819 | { | 
|  | 3820 | int cpu = smp_processor_id(); | 
|  | 3821 |  | 
|  | 3822 | if (stop_tick) { | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3823 | if (!cpu_active(cpu)) { | 
|  | 3824 | if (atomic_read(&nohz.load_balancer) != cpu) | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3825 | return; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3826 |  | 
|  | 3827 | /* | 
|  | 3828 | * If we are going offline and still the leader, | 
|  | 3829 | * give up! | 
|  | 3830 | */ | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3831 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, | 
|  | 3832 | nr_cpu_ids) != cpu) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3833 | BUG(); | 
|  | 3834 |  | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3835 | return; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3836 | } | 
|  | 3837 |  | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3838 | cpumask_set_cpu(cpu, nohz.idle_cpus_mask); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3839 |  | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3840 | if (atomic_read(&nohz.first_pick_cpu) == cpu) | 
|  | 3841 | atomic_cmpxchg(&nohz.first_pick_cpu, cpu, nr_cpu_ids); | 
|  | 3842 | if (atomic_read(&nohz.second_pick_cpu) == cpu) | 
|  | 3843 | atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3844 |  | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3845 | if (atomic_read(&nohz.load_balancer) >= nr_cpu_ids) { | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3846 | int new_ilb; | 
|  | 3847 |  | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3848 | /* make me the ilb owner */ | 
|  | 3849 | if (atomic_cmpxchg(&nohz.load_balancer, nr_cpu_ids, | 
|  | 3850 | cpu) != nr_cpu_ids) | 
|  | 3851 | return; | 
|  | 3852 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3853 | /* | 
|  | 3854 | * Check to see if there is a more power-efficient | 
|  | 3855 | * ilb. | 
|  | 3856 | */ | 
|  | 3857 | new_ilb = find_new_ilb(cpu); | 
|  | 3858 | if (new_ilb < nr_cpu_ids && new_ilb != cpu) { | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3859 | atomic_set(&nohz.load_balancer, nr_cpu_ids); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3860 | resched_cpu(new_ilb); | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3861 | return; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3862 | } | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3863 | return; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3864 | } | 
|  | 3865 | } else { | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3866 | if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask)) | 
|  | 3867 | return; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3868 |  | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3869 | cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3870 |  | 
|  | 3871 | if (atomic_read(&nohz.load_balancer) == cpu) | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3872 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, | 
|  | 3873 | nr_cpu_ids) != cpu) | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3874 | BUG(); | 
|  | 3875 | } | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3876 | return; | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3877 | } | 
|  | 3878 | #endif | 
|  | 3879 |  | 
|  | 3880 | static DEFINE_SPINLOCK(balancing); | 
|  | 3881 |  | 
| Peter Zijlstra | 49c022e | 2011-04-05 10:14:25 +0200 | [diff] [blame] | 3882 | static unsigned long __read_mostly max_load_balance_interval = HZ/10; | 
|  | 3883 |  | 
|  | 3884 | /* | 
|  | 3885 | * Scale the max load_balance interval with the number of CPUs in the system. | 
|  | 3886 | * This trades load-balance latency on larger machines for less cross talk. | 
|  | 3887 | */ | 
|  | 3888 | static void update_max_interval(void) | 
|  | 3889 | { | 
|  | 3890 | max_load_balance_interval = HZ*num_online_cpus()/10; | 
|  | 3891 | } | 
|  | 3892 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3893 | /* | 
|  | 3894 | * It checks each scheduling domain to see if it is due to be balanced, | 
|  | 3895 | * and initiates a balancing operation if so. | 
|  | 3896 | * | 
|  | 3897 | * Balancing parameters are set up in arch_init_sched_domains. | 
|  | 3898 | */ | 
|  | 3899 | static void rebalance_domains(int cpu, enum cpu_idle_type idle) | 
|  | 3900 | { | 
|  | 3901 | int balance = 1; | 
|  | 3902 | struct rq *rq = cpu_rq(cpu); | 
|  | 3903 | unsigned long interval; | 
|  | 3904 | struct sched_domain *sd; | 
|  | 3905 | /* Earliest time when we have to do rebalance again */ | 
|  | 3906 | unsigned long next_balance = jiffies + 60*HZ; | 
|  | 3907 | int update_next_balance = 0; | 
|  | 3908 | int need_serialize; | 
|  | 3909 |  | 
| Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 3910 | update_shares(cpu); | 
|  | 3911 |  | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 3912 | rcu_read_lock(); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3913 | for_each_domain(cpu, sd) { | 
|  | 3914 | if (!(sd->flags & SD_LOAD_BALANCE)) | 
|  | 3915 | continue; | 
|  | 3916 |  | 
|  | 3917 | interval = sd->balance_interval; | 
|  | 3918 | if (idle != CPU_IDLE) | 
|  | 3919 | interval *= sd->busy_factor; | 
|  | 3920 |  | 
|  | 3921 | /* scale ms to jiffies */ | 
|  | 3922 | interval = msecs_to_jiffies(interval); | 
| Peter Zijlstra | 49c022e | 2011-04-05 10:14:25 +0200 | [diff] [blame] | 3923 | interval = clamp(interval, 1UL, max_load_balance_interval); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3924 |  | 
|  | 3925 | need_serialize = sd->flags & SD_SERIALIZE; | 
|  | 3926 |  | 
|  | 3927 | if (need_serialize) { | 
|  | 3928 | if (!spin_trylock(&balancing)) | 
|  | 3929 | goto out; | 
|  | 3930 | } | 
|  | 3931 |  | 
|  | 3932 | if (time_after_eq(jiffies, sd->last_balance + interval)) { | 
|  | 3933 | if (load_balance(cpu, rq, sd, idle, &balance)) { | 
|  | 3934 | /* | 
|  | 3935 | * We've pulled tasks over so either we're no | 
| Peter Zijlstra | c186faf | 2011-02-21 18:52:53 +0100 | [diff] [blame] | 3936 | * longer idle. | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3937 | */ | 
|  | 3938 | idle = CPU_NOT_IDLE; | 
|  | 3939 | } | 
|  | 3940 | sd->last_balance = jiffies; | 
|  | 3941 | } | 
|  | 3942 | if (need_serialize) | 
|  | 3943 | spin_unlock(&balancing); | 
|  | 3944 | out: | 
|  | 3945 | if (time_after(next_balance, sd->last_balance + interval)) { | 
|  | 3946 | next_balance = sd->last_balance + interval; | 
|  | 3947 | update_next_balance = 1; | 
|  | 3948 | } | 
|  | 3949 |  | 
|  | 3950 | /* | 
|  | 3951 | * Stop the load balance at this level. There is another | 
|  | 3952 | * CPU in our sched group which is doing load balancing more | 
|  | 3953 | * actively. | 
|  | 3954 | */ | 
|  | 3955 | if (!balance) | 
|  | 3956 | break; | 
|  | 3957 | } | 
| Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 3958 | rcu_read_unlock(); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3959 |  | 
|  | 3960 | /* | 
|  | 3961 | * next_balance will be updated only when there is a need. | 
|  | 3962 | * When the cpu is attached to null domain for ex, it will not be | 
|  | 3963 | * updated. | 
|  | 3964 | */ | 
|  | 3965 | if (likely(update_next_balance)) | 
|  | 3966 | rq->next_balance = next_balance; | 
|  | 3967 | } | 
|  | 3968 |  | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3969 | #ifdef CONFIG_NO_HZ | 
|  | 3970 | /* | 
|  | 3971 | * In CONFIG_NO_HZ case, the idle balance kickee will do the | 
|  | 3972 | * rebalancing for all the cpus for whom scheduler ticks are stopped. | 
|  | 3973 | */ | 
|  | 3974 | static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) | 
|  | 3975 | { | 
|  | 3976 | struct rq *this_rq = cpu_rq(this_cpu); | 
|  | 3977 | struct rq *rq; | 
|  | 3978 | int balance_cpu; | 
|  | 3979 |  | 
|  | 3980 | if (idle != CPU_IDLE || !this_rq->nohz_balance_kick) | 
|  | 3981 | return; | 
|  | 3982 |  | 
|  | 3983 | for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { | 
|  | 3984 | if (balance_cpu == this_cpu) | 
|  | 3985 | continue; | 
|  | 3986 |  | 
|  | 3987 | /* | 
|  | 3988 | * If this cpu gets work to do, stop the load balancing | 
|  | 3989 | * work being done for other cpus. Next load | 
|  | 3990 | * balancing owner will pick it up. | 
|  | 3991 | */ | 
|  | 3992 | if (need_resched()) { | 
|  | 3993 | this_rq->nohz_balance_kick = 0; | 
|  | 3994 | break; | 
|  | 3995 | } | 
|  | 3996 |  | 
|  | 3997 | raw_spin_lock_irq(&this_rq->lock); | 
| Suresh Siddha | 5343bdb | 2010-07-09 15:19:54 +0200 | [diff] [blame] | 3998 | update_rq_clock(this_rq); | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 3999 | update_cpu_load(this_rq); | 
|  | 4000 | raw_spin_unlock_irq(&this_rq->lock); | 
|  | 4001 |  | 
|  | 4002 | rebalance_domains(balance_cpu, CPU_IDLE); | 
|  | 4003 |  | 
|  | 4004 | rq = cpu_rq(balance_cpu); | 
|  | 4005 | if (time_after(this_rq->next_balance, rq->next_balance)) | 
|  | 4006 | this_rq->next_balance = rq->next_balance; | 
|  | 4007 | } | 
|  | 4008 | nohz.next_balance = this_rq->next_balance; | 
|  | 4009 | this_rq->nohz_balance_kick = 0; | 
|  | 4010 | } | 
|  | 4011 |  | 
|  | 4012 | /* | 
|  | 4013 | * Current heuristic for kicking the idle load balancer | 
|  | 4014 | * - first_pick_cpu is the one of the busy CPUs. It will kick | 
|  | 4015 | *   idle load balancer when it has more than one process active. This | 
|  | 4016 | *   eliminates the need for idle load balancing altogether when we have | 
|  | 4017 | *   only one running process in the system (common case). | 
|  | 4018 | * - If there are more than one busy CPU, idle load balancer may have | 
|  | 4019 | *   to run for active_load_balance to happen (i.e., two busy CPUs are | 
|  | 4020 | *   SMT or core siblings and can run better if they move to different | 
|  | 4021 | *   physical CPUs). So, second_pick_cpu is the second of the busy CPUs | 
|  | 4022 | *   which will kick idle load balancer as soon as it has any load. | 
|  | 4023 | */ | 
|  | 4024 | static inline int nohz_kick_needed(struct rq *rq, int cpu) | 
|  | 4025 | { | 
|  | 4026 | unsigned long now = jiffies; | 
|  | 4027 | int ret; | 
|  | 4028 | int first_pick_cpu, second_pick_cpu; | 
|  | 4029 |  | 
|  | 4030 | if (time_before(now, nohz.next_balance)) | 
|  | 4031 | return 0; | 
|  | 4032 |  | 
| Suresh Siddha | f6c3f16 | 2010-09-13 11:02:21 -0700 | [diff] [blame] | 4033 | if (rq->idle_at_tick) | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4034 | return 0; | 
|  | 4035 |  | 
|  | 4036 | first_pick_cpu = atomic_read(&nohz.first_pick_cpu); | 
|  | 4037 | second_pick_cpu = atomic_read(&nohz.second_pick_cpu); | 
|  | 4038 |  | 
|  | 4039 | if (first_pick_cpu < nr_cpu_ids && first_pick_cpu != cpu && | 
|  | 4040 | second_pick_cpu < nr_cpu_ids && second_pick_cpu != cpu) | 
|  | 4041 | return 0; | 
|  | 4042 |  | 
|  | 4043 | ret = atomic_cmpxchg(&nohz.first_pick_cpu, nr_cpu_ids, cpu); | 
|  | 4044 | if (ret == nr_cpu_ids || ret == cpu) { | 
|  | 4045 | atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids); | 
|  | 4046 | if (rq->nr_running > 1) | 
|  | 4047 | return 1; | 
|  | 4048 | } else { | 
|  | 4049 | ret = atomic_cmpxchg(&nohz.second_pick_cpu, nr_cpu_ids, cpu); | 
|  | 4050 | if (ret == nr_cpu_ids || ret == cpu) { | 
|  | 4051 | if (rq->nr_running) | 
|  | 4052 | return 1; | 
|  | 4053 | } | 
|  | 4054 | } | 
|  | 4055 | return 0; | 
|  | 4056 | } | 
|  | 4057 | #else | 
|  | 4058 | static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { } | 
|  | 4059 | #endif | 
|  | 4060 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4061 | /* | 
|  | 4062 | * run_rebalance_domains is triggered when needed from the scheduler tick. | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4063 | * Also triggered for nohz idle balancing (with nohz_balancing_kick set). | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4064 | */ | 
|  | 4065 | static void run_rebalance_domains(struct softirq_action *h) | 
|  | 4066 | { | 
|  | 4067 | int this_cpu = smp_processor_id(); | 
|  | 4068 | struct rq *this_rq = cpu_rq(this_cpu); | 
|  | 4069 | enum cpu_idle_type idle = this_rq->idle_at_tick ? | 
|  | 4070 | CPU_IDLE : CPU_NOT_IDLE; | 
|  | 4071 |  | 
|  | 4072 | rebalance_domains(this_cpu, idle); | 
|  | 4073 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4074 | /* | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4075 | * If this cpu has a pending nohz_balance_kick, then do the | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4076 | * balancing on behalf of the other idle cpus whose ticks are | 
|  | 4077 | * stopped. | 
|  | 4078 | */ | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4079 | nohz_idle_balance(this_cpu, idle); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4080 | } | 
|  | 4081 |  | 
|  | 4082 | static inline int on_null_domain(int cpu) | 
|  | 4083 | { | 
| Paul E. McKenney | 90a6501 | 2010-02-28 08:32:18 -0800 | [diff] [blame] | 4084 | return !rcu_dereference_sched(cpu_rq(cpu)->sd); | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4085 | } | 
|  | 4086 |  | 
|  | 4087 | /* | 
|  | 4088 | * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4089 | */ | 
|  | 4090 | static inline void trigger_load_balance(struct rq *rq, int cpu) | 
|  | 4091 | { | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4092 | /* Don't need to rebalance while attached to NULL domain */ | 
|  | 4093 | if (time_after_eq(jiffies, rq->next_balance) && | 
|  | 4094 | likely(!on_null_domain(cpu))) | 
|  | 4095 | raise_softirq(SCHED_SOFTIRQ); | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4096 | #ifdef CONFIG_NO_HZ | 
|  | 4097 | else if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu))) | 
|  | 4098 | nohz_balancer_kick(cpu); | 
|  | 4099 | #endif | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4100 | } | 
|  | 4101 |  | 
| Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 4102 | static void rq_online_fair(struct rq *rq) | 
|  | 4103 | { | 
|  | 4104 | update_sysctl(); | 
|  | 4105 | } | 
|  | 4106 |  | 
|  | 4107 | static void rq_offline_fair(struct rq *rq) | 
|  | 4108 | { | 
|  | 4109 | update_sysctl(); | 
|  | 4110 | } | 
|  | 4111 |  | 
| Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4112 | #else	/* CONFIG_SMP */ | 
|  | 4113 |  | 
|  | 4114 | /* | 
|  | 4115 | * on UP we do not need to balance between CPUs: | 
|  | 4116 | */ | 
|  | 4117 | static inline void idle_balance(int cpu, struct rq *rq) | 
|  | 4118 | { | 
|  | 4119 | } | 
|  | 4120 |  | 
| Dhaval Giani | 55e12e5 | 2008-06-24 23:39:43 +0530 | [diff] [blame] | 4121 | #endif /* CONFIG_SMP */ | 
| Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 4122 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 4123 | /* | 
|  | 4124 | * scheduler tick hitting a task of our scheduling class: | 
|  | 4125 | */ | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 4126 | static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 4127 | { | 
|  | 4128 | struct cfs_rq *cfs_rq; | 
|  | 4129 | struct sched_entity *se = &curr->se; | 
|  | 4130 |  | 
|  | 4131 | for_each_sched_entity(se) { | 
|  | 4132 | cfs_rq = cfs_rq_of(se); | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 4133 | entity_tick(cfs_rq, se, queued); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 4134 | } | 
|  | 4135 | } | 
|  | 4136 |  | 
|  | 4137 | /* | 
| Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 4138 | * called on fork with the child task as argument from the parent's context | 
|  | 4139 | *  - child not yet on the tasklist | 
|  | 4140 | *  - preemption disabled | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 4141 | */ | 
| Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 4142 | static void task_fork_fair(struct task_struct *p) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 4143 | { | 
| Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 4144 | struct cfs_rq *cfs_rq = task_cfs_rq(current); | 
| Ingo Molnar | 429d43bc | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 4145 | struct sched_entity *se = &p->se, *curr = cfs_rq->curr; | 
| Ingo Molnar | 00bf7bf | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4146 | int this_cpu = smp_processor_id(); | 
| Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 4147 | struct rq *rq = this_rq(); | 
|  | 4148 | unsigned long flags; | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 4149 |  | 
| Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 4150 | raw_spin_lock_irqsave(&rq->lock, flags); | 
| Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 4151 |  | 
| Peter Zijlstra | 861d034 | 2010-08-19 13:31:43 +0200 | [diff] [blame] | 4152 | update_rq_clock(rq); | 
|  | 4153 |  | 
| Paul E. McKenney | b0a0f66 | 2010-10-06 17:32:51 -0700 | [diff] [blame] | 4154 | if (unlikely(task_cpu(p) != this_cpu)) { | 
|  | 4155 | rcu_read_lock(); | 
| Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 4156 | __set_task_cpu(p, this_cpu); | 
| Paul E. McKenney | b0a0f66 | 2010-10-06 17:32:51 -0700 | [diff] [blame] | 4157 | rcu_read_unlock(); | 
|  | 4158 | } | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 4159 |  | 
| Ting Yang | 7109c44 | 2007-08-28 12:53:24 +0200 | [diff] [blame] | 4160 | update_curr(cfs_rq); | 
| Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 4161 |  | 
| Mike Galbraith | b5d9d73 | 2009-09-08 11:12:28 +0200 | [diff] [blame] | 4162 | if (curr) | 
|  | 4163 | se->vruntime = curr->vruntime; | 
| Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 4164 | place_entity(cfs_rq, se, 1); | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 4165 |  | 
| Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 4166 | if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { | 
| Dmitry Adamushko | 87fefa3 | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 4167 | /* | 
| Ingo Molnar | edcb60a | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 4168 | * Upon rescheduling, sched_class::put_prev_task() will place | 
|  | 4169 | * 'current' within the tree based on its new key value. | 
|  | 4170 | */ | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 4171 | swap(curr->vruntime, se->vruntime); | 
| Bharata B Rao | aec0a51 | 2008-08-28 14:42:49 +0530 | [diff] [blame] | 4172 | resched_task(rq->curr); | 
| Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 4173 | } | 
|  | 4174 |  | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 4175 | se->vruntime -= cfs_rq->min_vruntime; | 
|  | 4176 |  | 
| Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 4177 | raw_spin_unlock_irqrestore(&rq->lock, flags); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 4178 | } | 
|  | 4179 |  | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 4180 | /* | 
|  | 4181 | * Priority of the task has changed. Check to see if we preempt | 
|  | 4182 | * the current task. | 
|  | 4183 | */ | 
| Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 4184 | static void | 
|  | 4185 | prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 4186 | { | 
| Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 4187 | if (!p->se.on_rq) | 
|  | 4188 | return; | 
|  | 4189 |  | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 4190 | /* | 
|  | 4191 | * Reschedule if we are currently running on this runqueue and | 
|  | 4192 | * our priority decreased, or if we are not currently running on | 
|  | 4193 | * this runqueue and our priority is higher than the current's | 
|  | 4194 | */ | 
| Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 4195 | if (rq->curr == p) { | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 4196 | if (p->prio > oldprio) | 
|  | 4197 | resched_task(rq->curr); | 
|  | 4198 | } else | 
| Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 4199 | check_preempt_curr(rq, p, 0); | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 4200 | } | 
|  | 4201 |  | 
| Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 4202 | static void switched_from_fair(struct rq *rq, struct task_struct *p) | 
|  | 4203 | { | 
|  | 4204 | struct sched_entity *se = &p->se; | 
|  | 4205 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 
|  | 4206 |  | 
|  | 4207 | /* | 
|  | 4208 | * Ensure the task's vruntime is normalized, so that when its | 
|  | 4209 | * switched back to the fair class the enqueue_entity(.flags=0) will | 
|  | 4210 | * do the right thing. | 
|  | 4211 | * | 
|  | 4212 | * If it was on_rq, then the dequeue_entity(.flags=0) will already | 
|  | 4213 | * have normalized the vruntime, if it was !on_rq, then only when | 
|  | 4214 | * the task is sleeping will it still have non-normalized vruntime. | 
|  | 4215 | */ | 
|  | 4216 | if (!se->on_rq && p->state != TASK_RUNNING) { | 
|  | 4217 | /* | 
|  | 4218 | * Fix up our vruntime so that the current sleep doesn't | 
|  | 4219 | * cause 'unlimited' sleep bonus. | 
|  | 4220 | */ | 
|  | 4221 | place_entity(cfs_rq, se, 0); | 
|  | 4222 | se->vruntime -= cfs_rq->min_vruntime; | 
|  | 4223 | } | 
|  | 4224 | } | 
|  | 4225 |  | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 4226 | /* | 
|  | 4227 | * We switched to the sched_fair class. | 
|  | 4228 | */ | 
| Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 4229 | static void switched_to_fair(struct rq *rq, struct task_struct *p) | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 4230 | { | 
| Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 4231 | if (!p->se.on_rq) | 
|  | 4232 | return; | 
|  | 4233 |  | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 4234 | /* | 
|  | 4235 | * We were most likely switched from sched_rt, so | 
|  | 4236 | * kick off the schedule if running, otherwise just see | 
|  | 4237 | * if we can still preempt the current task. | 
|  | 4238 | */ | 
| Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 4239 | if (rq->curr == p) | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 4240 | resched_task(rq->curr); | 
|  | 4241 | else | 
| Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 4242 | check_preempt_curr(rq, p, 0); | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 4243 | } | 
|  | 4244 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 4245 | /* Account for a task changing its policy or group. | 
|  | 4246 | * | 
|  | 4247 | * This routine is mostly called to set cfs_rq->curr field when a task | 
|  | 4248 | * migrates between groups/classes. | 
|  | 4249 | */ | 
|  | 4250 | static void set_curr_task_fair(struct rq *rq) | 
|  | 4251 | { | 
|  | 4252 | struct sched_entity *se = &rq->curr->se; | 
|  | 4253 |  | 
|  | 4254 | for_each_sched_entity(se) | 
|  | 4255 | set_next_entity(cfs_rq_of(se), se); | 
|  | 4256 | } | 
|  | 4257 |  | 
| Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 4258 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| Peter Zijlstra | b2b5ce0 | 2010-10-15 15:24:15 +0200 | [diff] [blame] | 4259 | static void task_move_group_fair(struct task_struct *p, int on_rq) | 
| Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 4260 | { | 
| Peter Zijlstra | b2b5ce0 | 2010-10-15 15:24:15 +0200 | [diff] [blame] | 4261 | /* | 
|  | 4262 | * If the task was not on the rq at the time of this cgroup movement | 
|  | 4263 | * it must have been asleep, sleeping tasks keep their ->vruntime | 
|  | 4264 | * absolute on their old rq until wakeup (needed for the fair sleeper | 
|  | 4265 | * bonus in place_entity()). | 
|  | 4266 | * | 
|  | 4267 | * If it was on the rq, we've just 'preempted' it, which does convert | 
|  | 4268 | * ->vruntime to a relative base. | 
|  | 4269 | * | 
|  | 4270 | * Make sure both cases convert their relative position when migrating | 
|  | 4271 | * to another cgroup's rq. This does somewhat interfere with the | 
|  | 4272 | * fair sleeper stuff for the first placement, but who cares. | 
|  | 4273 | */ | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 4274 | if (!on_rq) | 
| Peter Zijlstra | b2b5ce0 | 2010-10-15 15:24:15 +0200 | [diff] [blame] | 4275 | p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime; | 
|  | 4276 | set_task_rq(p, task_cpu(p)); | 
|  | 4277 | if (!on_rq) | 
|  | 4278 | p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime; | 
| Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 4279 | } | 
|  | 4280 | #endif | 
|  | 4281 |  | 
| H Hartley Sweeten | 6d686f4 | 2010-01-13 20:21:52 -0700 | [diff] [blame] | 4282 | static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) | 
| Peter Williams | 0d721ce | 2009-09-21 01:31:53 +0000 | [diff] [blame] | 4283 | { | 
|  | 4284 | struct sched_entity *se = &task->se; | 
| Peter Williams | 0d721ce | 2009-09-21 01:31:53 +0000 | [diff] [blame] | 4285 | unsigned int rr_interval = 0; | 
|  | 4286 |  | 
|  | 4287 | /* | 
|  | 4288 | * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise | 
|  | 4289 | * idle runqueue: | 
|  | 4290 | */ | 
| Peter Williams | 0d721ce | 2009-09-21 01:31:53 +0000 | [diff] [blame] | 4291 | if (rq->cfs.load.weight) | 
|  | 4292 | rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se)); | 
| Peter Williams | 0d721ce | 2009-09-21 01:31:53 +0000 | [diff] [blame] | 4293 |  | 
|  | 4294 | return rr_interval; | 
|  | 4295 | } | 
|  | 4296 |  | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 4297 | /* | 
|  | 4298 | * All the scheduling class methods: | 
|  | 4299 | */ | 
| Ingo Molnar | 5522d5d | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 4300 | static const struct sched_class fair_sched_class = { | 
|  | 4301 | .next			= &idle_sched_class, | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 4302 | .enqueue_task		= enqueue_task_fair, | 
|  | 4303 | .dequeue_task		= dequeue_task_fair, | 
|  | 4304 | .yield_task		= yield_task_fair, | 
| Mike Galbraith | d95f412 | 2011-02-01 09:50:51 -0500 | [diff] [blame] | 4305 | .yield_to_task		= yield_to_task_fair, | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 4306 |  | 
| Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 4307 | .check_preempt_curr	= check_preempt_wakeup, | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 4308 |  | 
|  | 4309 | .pick_next_task		= pick_next_task_fair, | 
|  | 4310 | .put_prev_task		= put_prev_task_fair, | 
|  | 4311 |  | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 4312 | #ifdef CONFIG_SMP | 
| Li Zefan | 4ce72a2 | 2008-10-22 15:25:26 +0800 | [diff] [blame] | 4313 | .select_task_rq		= select_task_rq_fair, | 
|  | 4314 |  | 
| Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 4315 | .rq_online		= rq_online_fair, | 
|  | 4316 | .rq_offline		= rq_offline_fair, | 
| Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 4317 |  | 
|  | 4318 | .task_waking		= task_waking_fair, | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 4319 | #endif | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 4320 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 4321 | .set_curr_task          = set_curr_task_fair, | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 4322 | .task_tick		= task_tick_fair, | 
| Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 4323 | .task_fork		= task_fork_fair, | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 4324 |  | 
|  | 4325 | .prio_changed		= prio_changed_fair, | 
| Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 4326 | .switched_from		= switched_from_fair, | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 4327 | .switched_to		= switched_to_fair, | 
| Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 4328 |  | 
| Peter Williams | 0d721ce | 2009-09-21 01:31:53 +0000 | [diff] [blame] | 4329 | .get_rr_interval	= get_rr_interval_fair, | 
|  | 4330 |  | 
| Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 4331 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| Peter Zijlstra | b2b5ce0 | 2010-10-15 15:24:15 +0200 | [diff] [blame] | 4332 | .task_move_group	= task_move_group_fair, | 
| Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 4333 | #endif | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 4334 | }; | 
|  | 4335 |  | 
|  | 4336 | #ifdef CONFIG_SCHED_DEBUG | 
| Ingo Molnar | 5cef9ec | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 4337 | static void print_cfs_stats(struct seq_file *m, int cpu) | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 4338 | { | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 4339 | struct cfs_rq *cfs_rq; | 
|  | 4340 |  | 
| Peter Zijlstra | 5973e5b | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 4341 | rcu_read_lock(); | 
| Ingo Molnar | c3b64f1 | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 4342 | for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) | 
| Ingo Molnar | 5cef9ec | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 4343 | print_cfs_rq(m, cpu, cfs_rq); | 
| Peter Zijlstra | 5973e5b | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 4344 | rcu_read_unlock(); | 
| Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 4345 | } | 
|  | 4346 | #endif |