blob: 6ffddca687feafb005250200857212c9a48f2d8c [file] [log] [blame]
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
Peter Zijlstra21805082007-08-25 18:41:53 +020018 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020021 */
22
Arjan van de Ven97455122008-01-25 21:08:34 +010023#include <linux/latencytop.h>
Christian Ehrhardt1983a922009-11-30 12:16:47 +010024#include <linux/sched.h>
Sisir Koppaka3436ae12011-03-26 18:22:55 +053025#include <linux/cpumask.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020026#include <linux/slab.h>
27#include <linux/profile.h>
28#include <linux/interrupt.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020029#include <linux/mempolicy.h>
Mel Gormane14808b2012-11-19 10:59:15 +000030#include <linux/migrate.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020031#include <linux/task_work.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020032
33#include <trace/events/sched.h>
34
35#include "sched.h"
Arjan van de Ven97455122008-01-25 21:08:34 +010036
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020037/*
Peter Zijlstra21805082007-08-25 18:41:53 +020038 * Targeted preemption latency for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090039 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020040 *
Peter Zijlstra21805082007-08-25 18:41:53 +020041 * NOTE: this latency value is not the same as the concept of
Ingo Molnard274a4c2007-10-15 17:00:14 +020042 * 'timeslice length' - timeslices in CFS are of variable length
43 * and have no persistent notion like in traditional, time-slice
44 * based scheduling concepts.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020045 *
Ingo Molnard274a4c2007-10-15 17:00:14 +020046 * (to see the precise effective timeslice length of your workload,
47 * run vmstat and monitor the context-switches (cs) field)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020048 */
Mike Galbraith21406922010-03-11 17:17:15 +010049unsigned int sysctl_sched_latency = 6000000ULL;
50unsigned int normalized_sysctl_sched_latency = 6000000ULL;
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020051
52/*
Christian Ehrhardt1983a922009-11-30 12:16:47 +010053 * The initial- and re-scaling of tunables is configurable
54 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
55 *
56 * Options are:
57 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
58 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
59 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
60 */
61enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 = SCHED_TUNABLESCALING_LOG;
63
64/*
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010065 * Minimal preemption granularity for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090066 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010067 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020068unsigned int sysctl_sched_min_granularity = 750000ULL;
69unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010070
71/*
72 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
73 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020074static unsigned int sched_nr_latency = 8;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010075
76/*
Mike Galbraith2bba22c2009-09-09 15:41:37 +020077 * After fork, child runs first. If set to 0 (default) then
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020078 * parent will (try to) run first.
79 */
Mike Galbraith2bba22c2009-09-09 15:41:37 +020080unsigned int sysctl_sched_child_runs_first __read_mostly;
Peter Zijlstra21805082007-08-25 18:41:53 +020081
82/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020083 * SCHED_OTHER wake-up granularity.
Mike Galbraith172e0822009-09-09 15:41:37 +020084 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020085 *
86 * This option delays the preemption effects of decoupled workloads
87 * and reduces their over-scheduling. Synchronous workloads will still
88 * have immediate wakeup/sleep latencies.
89 */
Mike Galbraith172e0822009-09-09 15:41:37 +020090unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +010091unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020092
Ingo Molnarda84d962007-10-15 17:00:18 +020093const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
94
Paul Turnera7a4f8a2010-11-15 15:47:06 -080095/*
96 * The exponential sliding window over which load is averaged for shares
97 * distribution.
98 * (default: 10msec)
99 */
100unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101
Paul Turnerec12cb72011-07-21 09:43:30 -0700102#ifdef CONFIG_CFS_BANDWIDTH
103/*
104 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105 * each time a cfs_rq requests quota.
106 *
107 * Note: in the case that the slice exceeds the runtime remaining (either due
108 * to consumption or the quota being specified to be smaller than the slice)
109 * we will always only issue the remaining available time.
110 *
111 * default: 5 msec, units: microseconds
112 */
113unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114#endif
115
Paul Gortmaker85276322013-04-19 15:10:50 -0400116static inline void update_load_add(struct load_weight *lw, unsigned long inc)
117{
118 lw->weight += inc;
119 lw->inv_weight = 0;
120}
121
122static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
123{
124 lw->weight -= dec;
125 lw->inv_weight = 0;
126}
127
128static inline void update_load_set(struct load_weight *lw, unsigned long w)
129{
130 lw->weight = w;
131 lw->inv_weight = 0;
132}
133
Peter Zijlstra029632f2011-10-25 10:00:11 +0200134/*
135 * Increase the granularity value when there are more CPUs,
136 * because with more CPUs the 'effective latency' as visible
137 * to users decreases. But the relationship is not linear,
138 * so pick a second-best guess by going with the log2 of the
139 * number of CPUs.
140 *
141 * This idea comes from the SD scheduler of Con Kolivas:
142 */
143static int get_update_sysctl_factor(void)
144{
145 unsigned int cpus = min_t(int, num_online_cpus(), 8);
146 unsigned int factor;
147
148 switch (sysctl_sched_tunable_scaling) {
149 case SCHED_TUNABLESCALING_NONE:
150 factor = 1;
151 break;
152 case SCHED_TUNABLESCALING_LINEAR:
153 factor = cpus;
154 break;
155 case SCHED_TUNABLESCALING_LOG:
156 default:
157 factor = 1 + ilog2(cpus);
158 break;
159 }
160
161 return factor;
162}
163
164static void update_sysctl(void)
165{
166 unsigned int factor = get_update_sysctl_factor();
167
168#define SET_SYSCTL(name) \
169 (sysctl_##name = (factor) * normalized_sysctl_##name)
170 SET_SYSCTL(sched_min_granularity);
171 SET_SYSCTL(sched_latency);
172 SET_SYSCTL(sched_wakeup_granularity);
173#undef SET_SYSCTL
174}
175
176void sched_init_granularity(void)
177{
178 update_sysctl();
179}
180
181#if BITS_PER_LONG == 32
182# define WMULT_CONST (~0UL)
183#else
184# define WMULT_CONST (1UL << 32)
185#endif
186
187#define WMULT_SHIFT 32
188
189/*
190 * Shift right and round:
191 */
192#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
193
194/*
195 * delta *= weight / lw
196 */
197static unsigned long
198calc_delta_mine(unsigned long delta_exec, unsigned long weight,
199 struct load_weight *lw)
200{
201 u64 tmp;
202
203 /*
204 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
205 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
206 * 2^SCHED_LOAD_RESOLUTION.
207 */
208 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
209 tmp = (u64)delta_exec * scale_load_down(weight);
210 else
211 tmp = (u64)delta_exec;
212
213 if (!lw->inv_weight) {
214 unsigned long w = scale_load_down(lw->weight);
215
216 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
217 lw->inv_weight = 1;
218 else if (unlikely(!w))
219 lw->inv_weight = WMULT_CONST;
220 else
221 lw->inv_weight = WMULT_CONST / w;
222 }
223
224 /*
225 * Check whether we'd overflow the 64-bit multiplication:
226 */
227 if (unlikely(tmp > WMULT_CONST))
228 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
229 WMULT_SHIFT/2);
230 else
231 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
232
233 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
234}
235
236
237const struct sched_class fair_sched_class;
Peter Zijlstraa4c2f002008-10-17 19:27:03 +0200238
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200239/**************************************************************
240 * CFS operations on generic schedulable entities:
241 */
242
243#ifdef CONFIG_FAIR_GROUP_SCHED
244
245/* cpu runqueue to which this cfs_rq is attached */
246static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
247{
248 return cfs_rq->rq;
249}
250
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200251/* An entity is a task if it doesn't "own" a runqueue */
252#define entity_is_task(se) (!se->my_q)
253
Peter Zijlstra8f488942009-07-24 12:25:30 +0200254static inline struct task_struct *task_of(struct sched_entity *se)
255{
256#ifdef CONFIG_SCHED_DEBUG
257 WARN_ON_ONCE(!entity_is_task(se));
258#endif
259 return container_of(se, struct task_struct, se);
260}
261
Peter Zijlstrab7581492008-04-19 19:45:00 +0200262/* Walk up scheduling entities hierarchy */
263#define for_each_sched_entity(se) \
264 for (; se; se = se->parent)
265
266static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
267{
268 return p->se.cfs_rq;
269}
270
271/* runqueue on which this entity is (to be) queued */
272static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
273{
274 return se->cfs_rq;
275}
276
277/* runqueue "owned" by this group */
278static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
279{
280 return grp->my_q;
281}
282
Paul Turneraff3e492012-10-04 13:18:30 +0200283static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
284 int force_update);
Paul Turner9ee474f2012-10-04 13:18:30 +0200285
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800286static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
287{
288 if (!cfs_rq->on_list) {
Paul Turner67e86252010-11-15 15:47:05 -0800289 /*
290 * Ensure we either appear before our parent (if already
291 * enqueued) or force our parent to appear after us when it is
292 * enqueued. The fact that we always enqueue bottom-up
293 * reduces this to two cases.
294 */
295 if (cfs_rq->tg->parent &&
296 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
297 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800298 &rq_of(cfs_rq)->leaf_cfs_rq_list);
Paul Turner67e86252010-11-15 15:47:05 -0800299 } else {
300 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
301 &rq_of(cfs_rq)->leaf_cfs_rq_list);
302 }
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800303
304 cfs_rq->on_list = 1;
Paul Turner9ee474f2012-10-04 13:18:30 +0200305 /* We should have no load, but we need to update last_decay. */
Paul Turneraff3e492012-10-04 13:18:30 +0200306 update_cfs_rq_blocked_load(cfs_rq, 0);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800307 }
308}
309
310static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
311{
312 if (cfs_rq->on_list) {
313 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
314 cfs_rq->on_list = 0;
315 }
316}
317
Peter Zijlstrab7581492008-04-19 19:45:00 +0200318/* Iterate thr' all leaf cfs_rq's on a runqueue */
319#define for_each_leaf_cfs_rq(rq, cfs_rq) \
320 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
321
322/* Do the two (enqueued) entities belong to the same group ? */
323static inline int
324is_same_group(struct sched_entity *se, struct sched_entity *pse)
325{
326 if (se->cfs_rq == pse->cfs_rq)
327 return 1;
328
329 return 0;
330}
331
332static inline struct sched_entity *parent_entity(struct sched_entity *se)
333{
334 return se->parent;
335}
336
Peter Zijlstra464b7522008-10-24 11:06:15 +0200337/* return depth at which a sched entity is present in the hierarchy */
338static inline int depth_se(struct sched_entity *se)
339{
340 int depth = 0;
341
342 for_each_sched_entity(se)
343 depth++;
344
345 return depth;
346}
347
348static void
349find_matching_se(struct sched_entity **se, struct sched_entity **pse)
350{
351 int se_depth, pse_depth;
352
353 /*
354 * preemption test can be made between sibling entities who are in the
355 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
356 * both tasks until we find their ancestors who are siblings of common
357 * parent.
358 */
359
360 /* First walk up until both entities are at same depth */
361 se_depth = depth_se(*se);
362 pse_depth = depth_se(*pse);
363
364 while (se_depth > pse_depth) {
365 se_depth--;
366 *se = parent_entity(*se);
367 }
368
369 while (pse_depth > se_depth) {
370 pse_depth--;
371 *pse = parent_entity(*pse);
372 }
373
374 while (!is_same_group(*se, *pse)) {
375 *se = parent_entity(*se);
376 *pse = parent_entity(*pse);
377 }
378}
379
Peter Zijlstra8f488942009-07-24 12:25:30 +0200380#else /* !CONFIG_FAIR_GROUP_SCHED */
381
382static inline struct task_struct *task_of(struct sched_entity *se)
383{
384 return container_of(se, struct task_struct, se);
385}
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200386
387static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
388{
389 return container_of(cfs_rq, struct rq, cfs);
390}
391
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200392#define entity_is_task(se) 1
393
Peter Zijlstrab7581492008-04-19 19:45:00 +0200394#define for_each_sched_entity(se) \
395 for (; se; se = NULL)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200396
Peter Zijlstrab7581492008-04-19 19:45:00 +0200397static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200398{
Peter Zijlstrab7581492008-04-19 19:45:00 +0200399 return &task_rq(p)->cfs;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200400}
401
Peter Zijlstrab7581492008-04-19 19:45:00 +0200402static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
403{
404 struct task_struct *p = task_of(se);
405 struct rq *rq = task_rq(p);
406
407 return &rq->cfs;
408}
409
410/* runqueue "owned" by this group */
411static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
412{
413 return NULL;
414}
415
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800416static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
417{
418}
419
420static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
421{
422}
423
Peter Zijlstrab7581492008-04-19 19:45:00 +0200424#define for_each_leaf_cfs_rq(rq, cfs_rq) \
425 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
426
427static inline int
428is_same_group(struct sched_entity *se, struct sched_entity *pse)
429{
430 return 1;
431}
432
433static inline struct sched_entity *parent_entity(struct sched_entity *se)
434{
435 return NULL;
436}
437
Peter Zijlstra464b7522008-10-24 11:06:15 +0200438static inline void
439find_matching_se(struct sched_entity **se, struct sched_entity **pse)
440{
441}
442
Peter Zijlstrab7581492008-04-19 19:45:00 +0200443#endif /* CONFIG_FAIR_GROUP_SCHED */
444
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -0700445static __always_inline
446void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200447
448/**************************************************************
449 * Scheduling class tree data structure manipulation methods:
450 */
451
Andrei Epure1bf08232013-03-12 21:12:24 +0200452static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
Peter Zijlstra02e04312007-10-15 17:00:07 +0200453{
Andrei Epure1bf08232013-03-12 21:12:24 +0200454 s64 delta = (s64)(vruntime - max_vruntime);
Peter Zijlstra368059a2007-10-15 17:00:11 +0200455 if (delta > 0)
Andrei Epure1bf08232013-03-12 21:12:24 +0200456 max_vruntime = vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200457
Andrei Epure1bf08232013-03-12 21:12:24 +0200458 return max_vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200459}
460
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200461static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
Peter Zijlstrab0ffd242007-10-15 17:00:12 +0200462{
463 s64 delta = (s64)(vruntime - min_vruntime);
464 if (delta < 0)
465 min_vruntime = vruntime;
466
467 return min_vruntime;
468}
469
Fabio Checconi54fdc582009-07-16 12:32:27 +0200470static inline int entity_before(struct sched_entity *a,
471 struct sched_entity *b)
472{
473 return (s64)(a->vruntime - b->vruntime) < 0;
474}
475
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200476static void update_min_vruntime(struct cfs_rq *cfs_rq)
477{
478 u64 vruntime = cfs_rq->min_vruntime;
479
480 if (cfs_rq->curr)
481 vruntime = cfs_rq->curr->vruntime;
482
483 if (cfs_rq->rb_leftmost) {
484 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
485 struct sched_entity,
486 run_node);
487
Peter Zijlstrae17036d2009-01-15 14:53:39 +0100488 if (!cfs_rq->curr)
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200489 vruntime = se->vruntime;
490 else
491 vruntime = min_vruntime(vruntime, se->vruntime);
492 }
493
Andrei Epure1bf08232013-03-12 21:12:24 +0200494 /* ensure we never gain time by being placed backwards. */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200495 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
Peter Zijlstra3fe16982011-04-05 17:23:48 +0200496#ifndef CONFIG_64BIT
497 smp_wmb();
498 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
499#endif
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200500}
501
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200502/*
503 * Enqueue an entity into the rb-tree:
504 */
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200505static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200506{
507 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
508 struct rb_node *parent = NULL;
509 struct sched_entity *entry;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200510 int leftmost = 1;
511
512 /*
513 * Find the right place in the rbtree:
514 */
515 while (*link) {
516 parent = *link;
517 entry = rb_entry(parent, struct sched_entity, run_node);
518 /*
519 * We dont care about collisions. Nodes with
520 * the same key stay together.
521 */
Stephan Baerwolf2bd2d6f2011-07-20 14:46:59 +0200522 if (entity_before(se, entry)) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200523 link = &parent->rb_left;
524 } else {
525 link = &parent->rb_right;
526 leftmost = 0;
527 }
528 }
529
530 /*
531 * Maintain a cache of leftmost tree entries (it is frequently
532 * used):
533 */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200534 if (leftmost)
Ingo Molnar57cb4992007-10-15 17:00:11 +0200535 cfs_rq->rb_leftmost = &se->run_node;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200536
537 rb_link_node(&se->run_node, parent, link);
538 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200539}
540
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200541static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200542{
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100543 if (cfs_rq->rb_leftmost == &se->run_node) {
544 struct rb_node *next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100545
546 next_node = rb_next(&se->run_node);
547 cfs_rq->rb_leftmost = next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100548 }
Ingo Molnare9acbff2007-10-15 17:00:04 +0200549
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200550 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200551}
552
Peter Zijlstra029632f2011-10-25 10:00:11 +0200553struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200554{
Peter Zijlstraf4b67552008-11-04 21:25:07 +0100555 struct rb_node *left = cfs_rq->rb_leftmost;
556
557 if (!left)
558 return NULL;
559
560 return rb_entry(left, struct sched_entity, run_node);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200561}
562
Rik van Rielac53db52011-02-01 09:51:03 -0500563static struct sched_entity *__pick_next_entity(struct sched_entity *se)
564{
565 struct rb_node *next = rb_next(&se->run_node);
566
567 if (!next)
568 return NULL;
569
570 return rb_entry(next, struct sched_entity, run_node);
571}
572
573#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +0200574struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200575{
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100576 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200577
Balbir Singh70eee742008-02-22 13:25:53 +0530578 if (!last)
579 return NULL;
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100580
581 return rb_entry(last, struct sched_entity, run_node);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200582}
583
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200584/**************************************************************
585 * Scheduling class statistics methods:
586 */
587
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100588int sched_proc_update_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700589 void __user *buffer, size_t *lenp,
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100590 loff_t *ppos)
591{
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700592 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100593 int factor = get_update_sysctl_factor();
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100594
595 if (ret || !write)
596 return ret;
597
598 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
599 sysctl_sched_min_granularity);
600
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100601#define WRT_SYSCTL(name) \
602 (normalized_sysctl_##name = sysctl_##name / (factor))
603 WRT_SYSCTL(sched_min_granularity);
604 WRT_SYSCTL(sched_latency);
605 WRT_SYSCTL(sched_wakeup_granularity);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100606#undef WRT_SYSCTL
607
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100608 return 0;
609}
610#endif
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200611
612/*
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200613 * delta /= w
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200614 */
615static inline unsigned long
616calc_delta_fair(unsigned long delta, struct sched_entity *se)
617{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200618 if (unlikely(se->load.weight != NICE_0_LOAD))
619 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200620
621 return delta;
622}
623
624/*
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200625 * The idea is to set a period in which each task runs once.
626 *
Borislav Petkov532b1852012-08-08 16:16:04 +0200627 * When there are too many tasks (sched_nr_latency) we have to stretch
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200628 * this period because otherwise the slices get too small.
629 *
630 * p = (nr <= nl) ? l : l*nr/nl
631 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200632static u64 __sched_period(unsigned long nr_running)
633{
634 u64 period = sysctl_sched_latency;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100635 unsigned long nr_latency = sched_nr_latency;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200636
637 if (unlikely(nr_running > nr_latency)) {
Peter Zijlstra4bf0b772008-01-25 21:08:21 +0100638 period = sysctl_sched_min_granularity;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200639 period *= nr_running;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200640 }
641
642 return period;
643}
644
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200645/*
646 * We calculate the wall-time slice from the period by taking a part
647 * proportional to the weight.
648 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200649 * s = p*P[w/rw]
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200650 */
Peter Zijlstra6d0f0eb2007-10-15 17:00:05 +0200651static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Peter Zijlstra21805082007-08-25 18:41:53 +0200652{
Mike Galbraith0a582442009-01-02 12:16:42 +0100653 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200654
Mike Galbraith0a582442009-01-02 12:16:42 +0100655 for_each_sched_entity(se) {
Lin Ming6272d682009-01-15 17:17:15 +0100656 struct load_weight *load;
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200657 struct load_weight lw;
Lin Ming6272d682009-01-15 17:17:15 +0100658
659 cfs_rq = cfs_rq_of(se);
660 load = &cfs_rq->load;
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200661
Mike Galbraith0a582442009-01-02 12:16:42 +0100662 if (unlikely(!se->on_rq)) {
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200663 lw = cfs_rq->load;
Mike Galbraith0a582442009-01-02 12:16:42 +0100664
665 update_load_add(&lw, se->load.weight);
666 load = &lw;
667 }
668 slice = calc_delta_mine(slice, se->load.weight, load);
669 }
670 return slice;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200671}
672
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200673/*
Andrei Epure660cc002013-03-11 12:03:20 +0200674 * We calculate the vruntime slice of a to-be-inserted task.
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200675 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200676 * vs = s/w
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200677 */
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200678static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200679{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200680 return calc_delta_fair(sched_slice(cfs_rq, se), se);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200681}
682
Alex Shia75cdaa2013-06-20 10:18:47 +0800683#ifdef CONFIG_SMP
684static inline void __update_task_entity_contrib(struct sched_entity *se);
685
686/* Give new task start runnable values to heavy its load in infant time */
687void init_task_runnable_average(struct task_struct *p)
688{
689 u32 slice;
690
691 p->se.avg.decay_count = 0;
692 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
693 p->se.avg.runnable_avg_sum = slice;
694 p->se.avg.runnable_avg_period = slice;
695 __update_task_entity_contrib(&p->se);
696}
697#else
698void init_task_runnable_average(struct task_struct *p)
699{
700}
701#endif
702
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200703/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200704 * Update the current task's runtime statistics. Skip current tasks that
705 * are not in our scheduling class.
706 */
707static inline void
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200708__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
709 unsigned long delta_exec)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200710{
Ingo Molnarbbdba7c2007-10-15 17:00:06 +0200711 unsigned long delta_exec_weighted;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200712
Lucas De Marchi41acab82010-03-10 23:37:45 -0300713 schedstat_set(curr->statistics.exec_max,
714 max((u64)delta_exec, curr->statistics.exec_max));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200715
716 curr->sum_exec_runtime += delta_exec;
Ingo Molnar7a62eab2007-10-15 17:00:06 +0200717 schedstat_add(cfs_rq, exec_clock, delta_exec);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200718 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +0100719
Ingo Molnare9acbff2007-10-15 17:00:04 +0200720 curr->vruntime += delta_exec_weighted;
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200721 update_min_vruntime(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200722}
723
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200724static void update_curr(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200725{
Ingo Molnar429d43b2007-10-15 17:00:03 +0200726 struct sched_entity *curr = cfs_rq->curr;
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200727 u64 now = rq_clock_task(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200728 unsigned long delta_exec;
729
730 if (unlikely(!curr))
731 return;
732
733 /*
734 * Get the amount of time the current task was running
735 * since the last time we changed load (this cannot
736 * overflow on 32 bits):
737 */
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200738 delta_exec = (unsigned long)(now - curr->exec_start);
Peter Zijlstra34f28ec2008-12-16 08:45:31 +0100739 if (!delta_exec)
740 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200741
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200742 __update_curr(cfs_rq, curr, delta_exec);
743 curr->exec_start = now;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100744
745 if (entity_is_task(curr)) {
746 struct task_struct *curtask = task_of(curr);
747
Ingo Molnarf977bb42009-09-13 18:15:54 +0200748 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100749 cpuacct_charge(curtask, delta_exec);
Frank Mayharf06febc2008-09-12 09:54:39 -0700750 account_group_exec_runtime(curtask, delta_exec);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100751 }
Paul Turnerec12cb72011-07-21 09:43:30 -0700752
753 account_cfs_rq_runtime(cfs_rq, delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200754}
755
756static inline void
Ingo Molnar5870db52007-08-09 11:16:47 +0200757update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200758{
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200759 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200760}
761
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200762/*
763 * Task is being enqueued - update stats:
764 */
Ingo Molnard2417e52007-08-09 11:16:47 +0200765static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200766{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200767 /*
768 * Are we enqueueing a waiting task? (for current tasks
769 * a dequeue/enqueue event is a NOP)
770 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200771 if (se != cfs_rq->curr)
Ingo Molnar5870db52007-08-09 11:16:47 +0200772 update_stats_wait_start(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200773}
774
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200775static void
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200776update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200777{
Lucas De Marchi41acab82010-03-10 23:37:45 -0300778 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200779 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
Lucas De Marchi41acab82010-03-10 23:37:45 -0300780 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
781 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200782 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200783#ifdef CONFIG_SCHEDSTATS
784 if (entity_is_task(se)) {
785 trace_sched_stat_wait(task_of(se),
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200786 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200787 }
788#endif
Lucas De Marchi41acab82010-03-10 23:37:45 -0300789 schedstat_set(se->statistics.wait_start, 0);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200790}
791
792static inline void
Ingo Molnar19b6a2e2007-08-09 11:16:48 +0200793update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200794{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200795 /*
796 * Mark the end of the wait period if dequeueing a
797 * waiting task:
798 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200799 if (se != cfs_rq->curr)
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200800 update_stats_wait_end(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200801}
802
803/*
804 * We are picking a new current task - update its stats:
805 */
806static inline void
Ingo Molnar79303e92007-08-09 11:16:47 +0200807update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200808{
809 /*
810 * We are starting a new run period:
811 */
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200812 se->exec_start = rq_clock_task(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200813}
814
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200815/**************************************************
816 * Scheduling class queueing methods:
817 */
818
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200819#ifdef CONFIG_NUMA_BALANCING
820/*
Mel Gorman598f0ec2013-10-07 11:28:55 +0100821 * Approximate time to scan a full NUMA task in ms. The task scan period is
822 * calculated based on the tasks virtual memory size and
823 * numa_balancing_scan_size.
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200824 */
Mel Gorman598f0ec2013-10-07 11:28:55 +0100825unsigned int sysctl_numa_balancing_scan_period_min = 1000;
826unsigned int sysctl_numa_balancing_scan_period_max = 60000;
827unsigned int sysctl_numa_balancing_scan_period_reset = 60000;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200828
829/* Portion of address space to scan in MB */
830unsigned int sysctl_numa_balancing_scan_size = 256;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200831
Peter Zijlstra4b96a292012-10-25 14:16:47 +0200832/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
833unsigned int sysctl_numa_balancing_scan_delay = 1000;
834
Mel Gorman598f0ec2013-10-07 11:28:55 +0100835static unsigned int task_nr_scan_windows(struct task_struct *p)
836{
837 unsigned long rss = 0;
838 unsigned long nr_scan_pages;
839
840 /*
841 * Calculations based on RSS as non-present and empty pages are skipped
842 * by the PTE scanner and NUMA hinting faults should be trapped based
843 * on resident pages
844 */
845 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
846 rss = get_mm_rss(p->mm);
847 if (!rss)
848 rss = nr_scan_pages;
849
850 rss = round_up(rss, nr_scan_pages);
851 return rss / nr_scan_pages;
852}
853
854/* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
855#define MAX_SCAN_WINDOW 2560
856
857static unsigned int task_scan_min(struct task_struct *p)
858{
859 unsigned int scan, floor;
860 unsigned int windows = 1;
861
862 if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW)
863 windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size;
864 floor = 1000 / windows;
865
866 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
867 return max_t(unsigned int, floor, scan);
868}
869
870static unsigned int task_scan_max(struct task_struct *p)
871{
872 unsigned int smin = task_scan_min(p);
873 unsigned int smax;
874
875 /* Watch for min being lower than max due to floor calculations */
876 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
877 return max(smin, smax);
878}
879
Mel Gorman3a7053b2013-10-07 11:29:00 +0100880/*
881 * Once a preferred node is selected the scheduler balancer will prefer moving
882 * a task to that node for sysctl_numa_balancing_settle_count number of PTE
883 * scans. This will give the process the chance to accumulate more faults on
884 * the preferred node but still allow the scheduler to move the task again if
885 * the nodes CPUs are overloaded.
886 */
887unsigned int sysctl_numa_balancing_settle_count __read_mostly = 3;
888
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200889static void task_numa_placement(struct task_struct *p)
890{
Mel Gorman688b7582013-10-07 11:28:58 +0100891 int seq, nid, max_nid = -1;
892 unsigned long max_faults = 0;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200893
Hugh Dickins2832bc12012-12-19 17:42:16 -0800894 if (!p->mm) /* for example, ksmd faulting in a user's mm */
895 return;
896 seq = ACCESS_ONCE(p->mm->numa_scan_seq);
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200897 if (p->numa_scan_seq == seq)
898 return;
899 p->numa_scan_seq = seq;
Mel Gorman3a7053b2013-10-07 11:29:00 +0100900 p->numa_migrate_seq++;
Mel Gorman598f0ec2013-10-07 11:28:55 +0100901 p->numa_scan_period_max = task_scan_max(p);
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200902
Mel Gorman688b7582013-10-07 11:28:58 +0100903 /* Find the node with the highest number of faults */
904 for_each_online_node(nid) {
Mel Gorman745d6142013-10-07 11:28:59 +0100905 unsigned long faults;
906
907 /* Decay existing window and copy faults since last scan */
Mel Gorman688b7582013-10-07 11:28:58 +0100908 p->numa_faults[nid] >>= 1;
Mel Gorman745d6142013-10-07 11:28:59 +0100909 p->numa_faults[nid] += p->numa_faults_buffer[nid];
910 p->numa_faults_buffer[nid] = 0;
911
912 faults = p->numa_faults[nid];
Mel Gorman688b7582013-10-07 11:28:58 +0100913 if (faults > max_faults) {
914 max_faults = faults;
915 max_nid = nid;
916 }
917 }
918
919 /* Update the tasks preferred node if necessary */
Mel Gorman3a7053b2013-10-07 11:29:00 +0100920 if (max_faults && max_nid != p->numa_preferred_nid) {
Mel Gorman688b7582013-10-07 11:28:58 +0100921 p->numa_preferred_nid = max_nid;
Mel Gorman3a7053b2013-10-07 11:29:00 +0100922 p->numa_migrate_seq = 0;
923 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200924}
925
926/*
927 * Got a PROT_NONE fault for a page on @node.
928 */
Mel Gormanb8593bf2012-11-21 01:18:23 +0000929void task_numa_fault(int node, int pages, bool migrated)
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200930{
931 struct task_struct *p = current;
932
Dave Kleikamp10e84b92013-07-31 13:53:35 -0700933 if (!numabalancing_enabled)
Mel Gorman1a687c22012-11-22 11:16:36 +0000934 return;
935
Mel Gormanf809ca92013-10-07 11:28:57 +0100936 /* Allocate buffer to track faults on a per-node basis */
937 if (unlikely(!p->numa_faults)) {
938 int size = sizeof(*p->numa_faults) * nr_node_ids;
939
Mel Gorman745d6142013-10-07 11:28:59 +0100940 /* numa_faults and numa_faults_buffer share the allocation */
941 p->numa_faults = kzalloc(size * 2, GFP_KERNEL|__GFP_NOWARN);
Mel Gormanf809ca92013-10-07 11:28:57 +0100942 if (!p->numa_faults)
943 return;
Mel Gorman745d6142013-10-07 11:28:59 +0100944
945 BUG_ON(p->numa_faults_buffer);
946 p->numa_faults_buffer = p->numa_faults + nr_node_ids;
Mel Gormanf809ca92013-10-07 11:28:57 +0100947 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200948
Mel Gormanfb003b82012-11-15 09:01:14 +0000949 /*
Mel Gormanb8593bf2012-11-21 01:18:23 +0000950 * If pages are properly placed (did not migrate) then scan slower.
951 * This is reset periodically in case of phase changes
Mel Gormanfb003b82012-11-15 09:01:14 +0000952 */
Mel Gorman598f0ec2013-10-07 11:28:55 +0100953 if (!migrated) {
954 /* Initialise if necessary */
955 if (!p->numa_scan_period_max)
956 p->numa_scan_period_max = task_scan_max(p);
957
958 p->numa_scan_period = min(p->numa_scan_period_max,
959 p->numa_scan_period + 10);
960 }
Mel Gormanfb003b82012-11-15 09:01:14 +0000961
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200962 task_numa_placement(p);
Mel Gormanf809ca92013-10-07 11:28:57 +0100963
Mel Gorman745d6142013-10-07 11:28:59 +0100964 p->numa_faults_buffer[node] += pages;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200965}
966
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200967static void reset_ptenuma_scan(struct task_struct *p)
968{
969 ACCESS_ONCE(p->mm->numa_scan_seq)++;
970 p->mm->numa_scan_offset = 0;
971}
972
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200973/*
974 * The expensive part of numa migration is done from task_work context.
975 * Triggered from task_tick_numa().
976 */
977void task_numa_work(struct callback_head *work)
978{
979 unsigned long migrate, next_scan, now = jiffies;
980 struct task_struct *p = current;
981 struct mm_struct *mm = p->mm;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200982 struct vm_area_struct *vma;
Mel Gorman9f406042012-11-14 18:34:32 +0000983 unsigned long start, end;
Mel Gorman598f0ec2013-10-07 11:28:55 +0100984 unsigned long nr_pte_updates = 0;
Mel Gorman9f406042012-11-14 18:34:32 +0000985 long pages;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200986
987 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
988
989 work->next = work; /* protect against double add */
990 /*
991 * Who cares about NUMA placement when they're dying.
992 *
993 * NOTE: make sure not to dereference p->mm before this check,
994 * exit_task_work() happens _after_ exit_mm() so we could be called
995 * without p->mm even though we still had it when we enqueued this
996 * work.
997 */
998 if (p->flags & PF_EXITING)
999 return;
1000
Mel Gorman7e8d16b2013-10-07 11:28:54 +01001001 if (!mm->numa_next_reset || !mm->numa_next_scan) {
1002 mm->numa_next_scan = now +
1003 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
1004 mm->numa_next_reset = now +
1005 msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
1006 }
1007
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001008 /*
Mel Gormanb8593bf2012-11-21 01:18:23 +00001009 * Reset the scan period if enough time has gone by. Objective is that
1010 * scanning will be reduced if pages are properly placed. As tasks
1011 * can enter different phases this needs to be re-examined. Lacking
1012 * proper tracking of reference behaviour, this blunt hammer is used.
1013 */
1014 migrate = mm->numa_next_reset;
1015 if (time_after(now, migrate)) {
Mel Gorman598f0ec2013-10-07 11:28:55 +01001016 p->numa_scan_period = task_scan_min(p);
Mel Gormanb8593bf2012-11-21 01:18:23 +00001017 next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
1018 xchg(&mm->numa_next_reset, next_scan);
1019 }
1020
1021 /*
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001022 * Enforce maximal scan/migration frequency..
1023 */
1024 migrate = mm->numa_next_scan;
1025 if (time_before(now, migrate))
1026 return;
1027
Mel Gorman598f0ec2013-10-07 11:28:55 +01001028 if (p->numa_scan_period == 0) {
1029 p->numa_scan_period_max = task_scan_max(p);
1030 p->numa_scan_period = task_scan_min(p);
1031 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001032
Mel Gormanfb003b82012-11-15 09:01:14 +00001033 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001034 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
1035 return;
1036
Mel Gormane14808b2012-11-19 10:59:15 +00001037 /*
Peter Zijlstra19a78d12013-10-07 11:28:51 +01001038 * Delay this task enough that another task of this mm will likely win
1039 * the next time around.
1040 */
1041 p->node_stamp += 2 * TICK_NSEC;
1042
Mel Gorman9f406042012-11-14 18:34:32 +00001043 start = mm->numa_scan_offset;
1044 pages = sysctl_numa_balancing_scan_size;
1045 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
1046 if (!pages)
1047 return;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001048
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001049 down_read(&mm->mmap_sem);
Mel Gorman9f406042012-11-14 18:34:32 +00001050 vma = find_vma(mm, start);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001051 if (!vma) {
1052 reset_ptenuma_scan(p);
Mel Gorman9f406042012-11-14 18:34:32 +00001053 start = 0;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001054 vma = mm->mmap;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001055 }
Mel Gorman9f406042012-11-14 18:34:32 +00001056 for (; vma; vma = vma->vm_next) {
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001057 if (!vma_migratable(vma))
1058 continue;
1059
1060 /* Skip small VMAs. They are not likely to be of relevance */
Mel Gorman221392c2012-12-17 14:05:53 +00001061 if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001062 continue;
1063
Mel Gorman9f406042012-11-14 18:34:32 +00001064 do {
1065 start = max(start, vma->vm_start);
1066 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
1067 end = min(end, vma->vm_end);
Mel Gorman598f0ec2013-10-07 11:28:55 +01001068 nr_pte_updates += change_prot_numa(vma, start, end);
1069
1070 /*
1071 * Scan sysctl_numa_balancing_scan_size but ensure that
1072 * at least one PTE is updated so that unused virtual
1073 * address space is quickly skipped.
1074 */
1075 if (nr_pte_updates)
1076 pages -= (end - start) >> PAGE_SHIFT;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001077
Mel Gorman9f406042012-11-14 18:34:32 +00001078 start = end;
1079 if (pages <= 0)
1080 goto out;
1081 } while (end != vma->vm_end);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001082 }
1083
Mel Gorman9f406042012-11-14 18:34:32 +00001084out:
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001085 /*
Mel Gormanf307cd12013-10-07 11:28:56 +01001086 * If the whole process was scanned without updates then no NUMA
1087 * hinting faults are being recorded and scan rate should be lower.
1088 */
1089 if (mm->numa_scan_offset == 0 && !nr_pte_updates) {
1090 p->numa_scan_period = min(p->numa_scan_period_max,
1091 p->numa_scan_period << 1);
1092
1093 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
1094 mm->numa_next_scan = next_scan;
1095 }
1096
1097 /*
Peter Zijlstrac69307d2013-10-07 11:28:41 +01001098 * It is possible to reach the end of the VMA list but the last few
1099 * VMAs are not guaranteed to the vma_migratable. If they are not, we
1100 * would find the !migratable VMA on the next scan but not reset the
1101 * scanner to the start so check it now.
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001102 */
1103 if (vma)
Mel Gorman9f406042012-11-14 18:34:32 +00001104 mm->numa_scan_offset = start;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001105 else
1106 reset_ptenuma_scan(p);
1107 up_read(&mm->mmap_sem);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001108}
1109
1110/*
1111 * Drive the periodic memory faults..
1112 */
1113void task_tick_numa(struct rq *rq, struct task_struct *curr)
1114{
1115 struct callback_head *work = &curr->numa_work;
1116 u64 period, now;
1117
1118 /*
1119 * We don't care about NUMA placement if we don't have memory.
1120 */
1121 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
1122 return;
1123
1124 /*
1125 * Using runtime rather than walltime has the dual advantage that
1126 * we (mostly) drive the selection from busy threads and that the
1127 * task needs to have done some actual work before we bother with
1128 * NUMA placement.
1129 */
1130 now = curr->se.sum_exec_runtime;
1131 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
1132
1133 if (now - curr->node_stamp > period) {
Peter Zijlstra4b96a292012-10-25 14:16:47 +02001134 if (!curr->node_stamp)
Mel Gorman598f0ec2013-10-07 11:28:55 +01001135 curr->numa_scan_period = task_scan_min(curr);
Peter Zijlstra19a78d12013-10-07 11:28:51 +01001136 curr->node_stamp += period;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001137
1138 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
1139 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
1140 task_work_add(curr, work, true);
1141 }
1142 }
1143}
1144#else
1145static void task_tick_numa(struct rq *rq, struct task_struct *curr)
1146{
1147}
1148#endif /* CONFIG_NUMA_BALANCING */
1149
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001150static void
1151account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1152{
1153 update_load_add(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001154 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02001155 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001156#ifdef CONFIG_SMP
1157 if (entity_is_task(se))
Peter Zijlstraeb953082012-04-17 13:38:40 +02001158 list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001159#endif
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001160 cfs_rq->nr_running++;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001161}
1162
1163static void
1164account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1165{
1166 update_load_sub(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001167 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02001168 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001169 if (entity_is_task(se))
Bharata B Raob87f1722008-09-25 09:53:54 +05301170 list_del_init(&se->group_node);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001171 cfs_rq->nr_running--;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001172}
1173
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001174#ifdef CONFIG_FAIR_GROUP_SCHED
1175# ifdef CONFIG_SMP
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001176static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
1177{
1178 long tg_weight;
1179
1180 /*
1181 * Use this CPU's actual weight instead of the last load_contribution
1182 * to gain a more accurate current total weight. See
1183 * update_cfs_rq_load_contribution().
1184 */
Alex Shibf5b9862013-06-20 10:18:54 +08001185 tg_weight = atomic_long_read(&tg->load_avg);
Paul Turner82958362012-10-04 13:18:31 +02001186 tg_weight -= cfs_rq->tg_load_contrib;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001187 tg_weight += cfs_rq->load.weight;
1188
1189 return tg_weight;
1190}
1191
Paul Turner6d5ab292011-01-21 20:45:01 -08001192static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001193{
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001194 long tg_weight, load, shares;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001195
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001196 tg_weight = calc_tg_weight(tg, cfs_rq);
Paul Turner6d5ab292011-01-21 20:45:01 -08001197 load = cfs_rq->load.weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001198
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001199 shares = (tg->shares * load);
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001200 if (tg_weight)
1201 shares /= tg_weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001202
1203 if (shares < MIN_SHARES)
1204 shares = MIN_SHARES;
1205 if (shares > tg->shares)
1206 shares = tg->shares;
1207
1208 return shares;
1209}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001210# else /* CONFIG_SMP */
Paul Turner6d5ab292011-01-21 20:45:01 -08001211static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001212{
1213 return tg->shares;
1214}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001215# endif /* CONFIG_SMP */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001216static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
1217 unsigned long weight)
1218{
Paul Turner19e5eeb2010-12-15 19:10:18 -08001219 if (se->on_rq) {
1220 /* commit outstanding execution time */
1221 if (cfs_rq->curr == se)
1222 update_curr(cfs_rq);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001223 account_entity_dequeue(cfs_rq, se);
Paul Turner19e5eeb2010-12-15 19:10:18 -08001224 }
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001225
1226 update_load_set(&se->load, weight);
1227
1228 if (se->on_rq)
1229 account_entity_enqueue(cfs_rq, se);
1230}
1231
Paul Turner82958362012-10-04 13:18:31 +02001232static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
1233
Paul Turner6d5ab292011-01-21 20:45:01 -08001234static void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001235{
1236 struct task_group *tg;
1237 struct sched_entity *se;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001238 long shares;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001239
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001240 tg = cfs_rq->tg;
1241 se = tg->se[cpu_of(rq_of(cfs_rq))];
Paul Turner64660c82011-07-21 09:43:36 -07001242 if (!se || throttled_hierarchy(cfs_rq))
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001243 return;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001244#ifndef CONFIG_SMP
1245 if (likely(se->load.weight == tg->shares))
1246 return;
1247#endif
Paul Turner6d5ab292011-01-21 20:45:01 -08001248 shares = calc_cfs_shares(cfs_rq, tg);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001249
1250 reweight_entity(cfs_rq_of(se), se, shares);
1251}
1252#else /* CONFIG_FAIR_GROUP_SCHED */
Paul Turner6d5ab292011-01-21 20:45:01 -08001253static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001254{
1255}
1256#endif /* CONFIG_FAIR_GROUP_SCHED */
1257
Alex Shi141965c2013-06-26 13:05:39 +08001258#ifdef CONFIG_SMP
Paul Turner9d85f212012-10-04 13:18:29 +02001259/*
Paul Turner5b51f2f2012-10-04 13:18:32 +02001260 * We choose a half-life close to 1 scheduling period.
1261 * Note: The tables below are dependent on this value.
1262 */
1263#define LOAD_AVG_PERIOD 32
1264#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
1265#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
1266
1267/* Precomputed fixed inverse multiplies for multiplication by y^n */
1268static const u32 runnable_avg_yN_inv[] = {
1269 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
1270 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
1271 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
1272 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
1273 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
1274 0x85aac367, 0x82cd8698,
1275};
1276
1277/*
1278 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
1279 * over-estimates when re-combining.
1280 */
1281static const u32 runnable_avg_yN_sum[] = {
1282 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
1283 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
1284 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
1285};
1286
1287/*
Paul Turner9d85f212012-10-04 13:18:29 +02001288 * Approximate:
1289 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
1290 */
1291static __always_inline u64 decay_load(u64 val, u64 n)
1292{
Paul Turner5b51f2f2012-10-04 13:18:32 +02001293 unsigned int local_n;
1294
1295 if (!n)
1296 return val;
1297 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
1298 return 0;
1299
1300 /* after bounds checking we can collapse to 32-bit */
1301 local_n = n;
1302
1303 /*
1304 * As y^PERIOD = 1/2, we can combine
1305 * y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
1306 * With a look-up table which covers k^n (n<PERIOD)
1307 *
1308 * To achieve constant time decay_load.
1309 */
1310 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
1311 val >>= local_n / LOAD_AVG_PERIOD;
1312 local_n %= LOAD_AVG_PERIOD;
Paul Turner9d85f212012-10-04 13:18:29 +02001313 }
1314
Paul Turner5b51f2f2012-10-04 13:18:32 +02001315 val *= runnable_avg_yN_inv[local_n];
1316 /* We don't use SRR here since we always want to round down. */
1317 return val >> 32;
1318}
1319
1320/*
1321 * For updates fully spanning n periods, the contribution to runnable
1322 * average will be: \Sum 1024*y^n
1323 *
1324 * We can compute this reasonably efficiently by combining:
1325 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
1326 */
1327static u32 __compute_runnable_contrib(u64 n)
1328{
1329 u32 contrib = 0;
1330
1331 if (likely(n <= LOAD_AVG_PERIOD))
1332 return runnable_avg_yN_sum[n];
1333 else if (unlikely(n >= LOAD_AVG_MAX_N))
1334 return LOAD_AVG_MAX;
1335
1336 /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
1337 do {
1338 contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
1339 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
1340
1341 n -= LOAD_AVG_PERIOD;
1342 } while (n > LOAD_AVG_PERIOD);
1343
1344 contrib = decay_load(contrib, n);
1345 return contrib + runnable_avg_yN_sum[n];
Paul Turner9d85f212012-10-04 13:18:29 +02001346}
1347
1348/*
1349 * We can represent the historical contribution to runnable average as the
1350 * coefficients of a geometric series. To do this we sub-divide our runnable
1351 * history into segments of approximately 1ms (1024us); label the segment that
1352 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
1353 *
1354 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
1355 * p0 p1 p2
1356 * (now) (~1ms ago) (~2ms ago)
1357 *
1358 * Let u_i denote the fraction of p_i that the entity was runnable.
1359 *
1360 * We then designate the fractions u_i as our co-efficients, yielding the
1361 * following representation of historical load:
1362 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
1363 *
1364 * We choose y based on the with of a reasonably scheduling period, fixing:
1365 * y^32 = 0.5
1366 *
1367 * This means that the contribution to load ~32ms ago (u_32) will be weighted
1368 * approximately half as much as the contribution to load within the last ms
1369 * (u_0).
1370 *
1371 * When a period "rolls over" and we have new u_0`, multiplying the previous
1372 * sum again by y is sufficient to update:
1373 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
1374 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
1375 */
1376static __always_inline int __update_entity_runnable_avg(u64 now,
1377 struct sched_avg *sa,
1378 int runnable)
1379{
Paul Turner5b51f2f2012-10-04 13:18:32 +02001380 u64 delta, periods;
1381 u32 runnable_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02001382 int delta_w, decayed = 0;
1383
1384 delta = now - sa->last_runnable_update;
1385 /*
1386 * This should only happen when time goes backwards, which it
1387 * unfortunately does during sched clock init when we swap over to TSC.
1388 */
1389 if ((s64)delta < 0) {
1390 sa->last_runnable_update = now;
1391 return 0;
1392 }
1393
1394 /*
1395 * Use 1024ns as the unit of measurement since it's a reasonable
1396 * approximation of 1us and fast to compute.
1397 */
1398 delta >>= 10;
1399 if (!delta)
1400 return 0;
1401 sa->last_runnable_update = now;
1402
1403 /* delta_w is the amount already accumulated against our next period */
1404 delta_w = sa->runnable_avg_period % 1024;
1405 if (delta + delta_w >= 1024) {
1406 /* period roll-over */
1407 decayed = 1;
1408
1409 /*
1410 * Now that we know we're crossing a period boundary, figure
1411 * out how much from delta we need to complete the current
1412 * period and accrue it.
1413 */
1414 delta_w = 1024 - delta_w;
Paul Turner5b51f2f2012-10-04 13:18:32 +02001415 if (runnable)
1416 sa->runnable_avg_sum += delta_w;
1417 sa->runnable_avg_period += delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02001418
Paul Turner5b51f2f2012-10-04 13:18:32 +02001419 delta -= delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02001420
Paul Turner5b51f2f2012-10-04 13:18:32 +02001421 /* Figure out how many additional periods this update spans */
1422 periods = delta / 1024;
1423 delta %= 1024;
1424
1425 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
1426 periods + 1);
1427 sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
1428 periods + 1);
1429
1430 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
1431 runnable_contrib = __compute_runnable_contrib(periods);
1432 if (runnable)
1433 sa->runnable_avg_sum += runnable_contrib;
1434 sa->runnable_avg_period += runnable_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02001435 }
1436
1437 /* Remainder of delta accrued against u_0` */
1438 if (runnable)
1439 sa->runnable_avg_sum += delta;
1440 sa->runnable_avg_period += delta;
1441
1442 return decayed;
1443}
1444
Paul Turner9ee474f2012-10-04 13:18:30 +02001445/* Synchronize an entity's decay with its parenting cfs_rq.*/
Paul Turneraff3e492012-10-04 13:18:30 +02001446static inline u64 __synchronize_entity_decay(struct sched_entity *se)
Paul Turner9ee474f2012-10-04 13:18:30 +02001447{
1448 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1449 u64 decays = atomic64_read(&cfs_rq->decay_counter);
1450
1451 decays -= se->avg.decay_count;
1452 if (!decays)
Paul Turneraff3e492012-10-04 13:18:30 +02001453 return 0;
Paul Turner9ee474f2012-10-04 13:18:30 +02001454
1455 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
1456 se->avg.decay_count = 0;
Paul Turneraff3e492012-10-04 13:18:30 +02001457
1458 return decays;
Paul Turner9ee474f2012-10-04 13:18:30 +02001459}
1460
Paul Turnerc566e8e2012-10-04 13:18:30 +02001461#ifdef CONFIG_FAIR_GROUP_SCHED
1462static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1463 int force_update)
1464{
1465 struct task_group *tg = cfs_rq->tg;
Alex Shibf5b9862013-06-20 10:18:54 +08001466 long tg_contrib;
Paul Turnerc566e8e2012-10-04 13:18:30 +02001467
1468 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
1469 tg_contrib -= cfs_rq->tg_load_contrib;
1470
Alex Shibf5b9862013-06-20 10:18:54 +08001471 if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
1472 atomic_long_add(tg_contrib, &tg->load_avg);
Paul Turnerc566e8e2012-10-04 13:18:30 +02001473 cfs_rq->tg_load_contrib += tg_contrib;
1474 }
1475}
Paul Turner8165e142012-10-04 13:18:31 +02001476
Paul Turnerbb17f652012-10-04 13:18:31 +02001477/*
1478 * Aggregate cfs_rq runnable averages into an equivalent task_group
1479 * representation for computing load contributions.
1480 */
1481static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1482 struct cfs_rq *cfs_rq)
1483{
1484 struct task_group *tg = cfs_rq->tg;
1485 long contrib;
1486
1487 /* The fraction of a cpu used by this cfs_rq */
1488 contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT,
1489 sa->runnable_avg_period + 1);
1490 contrib -= cfs_rq->tg_runnable_contrib;
1491
1492 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
1493 atomic_add(contrib, &tg->runnable_avg);
1494 cfs_rq->tg_runnable_contrib += contrib;
1495 }
1496}
1497
Paul Turner8165e142012-10-04 13:18:31 +02001498static inline void __update_group_entity_contrib(struct sched_entity *se)
1499{
1500 struct cfs_rq *cfs_rq = group_cfs_rq(se);
1501 struct task_group *tg = cfs_rq->tg;
Paul Turnerbb17f652012-10-04 13:18:31 +02001502 int runnable_avg;
1503
Paul Turner8165e142012-10-04 13:18:31 +02001504 u64 contrib;
1505
1506 contrib = cfs_rq->tg_load_contrib * tg->shares;
Alex Shibf5b9862013-06-20 10:18:54 +08001507 se->avg.load_avg_contrib = div_u64(contrib,
1508 atomic_long_read(&tg->load_avg) + 1);
Paul Turnerbb17f652012-10-04 13:18:31 +02001509
1510 /*
1511 * For group entities we need to compute a correction term in the case
1512 * that they are consuming <1 cpu so that we would contribute the same
1513 * load as a task of equal weight.
1514 *
1515 * Explicitly co-ordinating this measurement would be expensive, but
1516 * fortunately the sum of each cpus contribution forms a usable
1517 * lower-bound on the true value.
1518 *
1519 * Consider the aggregate of 2 contributions. Either they are disjoint
1520 * (and the sum represents true value) or they are disjoint and we are
1521 * understating by the aggregate of their overlap.
1522 *
1523 * Extending this to N cpus, for a given overlap, the maximum amount we
1524 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
1525 * cpus that overlap for this interval and w_i is the interval width.
1526 *
1527 * On a small machine; the first term is well-bounded which bounds the
1528 * total error since w_i is a subset of the period. Whereas on a
1529 * larger machine, while this first term can be larger, if w_i is the
1530 * of consequential size guaranteed to see n_i*w_i quickly converge to
1531 * our upper bound of 1-cpu.
1532 */
1533 runnable_avg = atomic_read(&tg->runnable_avg);
1534 if (runnable_avg < NICE_0_LOAD) {
1535 se->avg.load_avg_contrib *= runnable_avg;
1536 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
1537 }
Paul Turner8165e142012-10-04 13:18:31 +02001538}
Paul Turnerc566e8e2012-10-04 13:18:30 +02001539#else
1540static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1541 int force_update) {}
Paul Turnerbb17f652012-10-04 13:18:31 +02001542static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1543 struct cfs_rq *cfs_rq) {}
Paul Turner8165e142012-10-04 13:18:31 +02001544static inline void __update_group_entity_contrib(struct sched_entity *se) {}
Paul Turnerc566e8e2012-10-04 13:18:30 +02001545#endif
1546
Paul Turner8165e142012-10-04 13:18:31 +02001547static inline void __update_task_entity_contrib(struct sched_entity *se)
1548{
1549 u32 contrib;
1550
1551 /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
1552 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
1553 contrib /= (se->avg.runnable_avg_period + 1);
1554 se->avg.load_avg_contrib = scale_load(contrib);
1555}
1556
Paul Turner2dac7542012-10-04 13:18:30 +02001557/* Compute the current contribution to load_avg by se, return any delta */
1558static long __update_entity_load_avg_contrib(struct sched_entity *se)
1559{
1560 long old_contrib = se->avg.load_avg_contrib;
1561
Paul Turner8165e142012-10-04 13:18:31 +02001562 if (entity_is_task(se)) {
1563 __update_task_entity_contrib(se);
1564 } else {
Paul Turnerbb17f652012-10-04 13:18:31 +02001565 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
Paul Turner8165e142012-10-04 13:18:31 +02001566 __update_group_entity_contrib(se);
1567 }
Paul Turner2dac7542012-10-04 13:18:30 +02001568
1569 return se->avg.load_avg_contrib - old_contrib;
1570}
1571
Paul Turner9ee474f2012-10-04 13:18:30 +02001572static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
1573 long load_contrib)
1574{
1575 if (likely(load_contrib < cfs_rq->blocked_load_avg))
1576 cfs_rq->blocked_load_avg -= load_contrib;
1577 else
1578 cfs_rq->blocked_load_avg = 0;
1579}
1580
Paul Turnerf1b17282012-10-04 13:18:31 +02001581static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
1582
Paul Turner9d85f212012-10-04 13:18:29 +02001583/* Update a sched_entity's runnable average */
Paul Turner9ee474f2012-10-04 13:18:30 +02001584static inline void update_entity_load_avg(struct sched_entity *se,
1585 int update_cfs_rq)
Paul Turner9d85f212012-10-04 13:18:29 +02001586{
Paul Turner2dac7542012-10-04 13:18:30 +02001587 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1588 long contrib_delta;
Paul Turnerf1b17282012-10-04 13:18:31 +02001589 u64 now;
Paul Turner2dac7542012-10-04 13:18:30 +02001590
Paul Turnerf1b17282012-10-04 13:18:31 +02001591 /*
1592 * For a group entity we need to use their owned cfs_rq_clock_task() in
1593 * case they are the parent of a throttled hierarchy.
1594 */
1595 if (entity_is_task(se))
1596 now = cfs_rq_clock_task(cfs_rq);
1597 else
1598 now = cfs_rq_clock_task(group_cfs_rq(se));
1599
1600 if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
Paul Turner2dac7542012-10-04 13:18:30 +02001601 return;
1602
1603 contrib_delta = __update_entity_load_avg_contrib(se);
Paul Turner9ee474f2012-10-04 13:18:30 +02001604
1605 if (!update_cfs_rq)
1606 return;
1607
Paul Turner2dac7542012-10-04 13:18:30 +02001608 if (se->on_rq)
1609 cfs_rq->runnable_load_avg += contrib_delta;
Paul Turner9ee474f2012-10-04 13:18:30 +02001610 else
1611 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
1612}
1613
1614/*
1615 * Decay the load contributed by all blocked children and account this so that
1616 * their contribution may appropriately discounted when they wake up.
1617 */
Paul Turneraff3e492012-10-04 13:18:30 +02001618static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
Paul Turner9ee474f2012-10-04 13:18:30 +02001619{
Paul Turnerf1b17282012-10-04 13:18:31 +02001620 u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
Paul Turner9ee474f2012-10-04 13:18:30 +02001621 u64 decays;
1622
1623 decays = now - cfs_rq->last_decay;
Paul Turneraff3e492012-10-04 13:18:30 +02001624 if (!decays && !force_update)
Paul Turner9ee474f2012-10-04 13:18:30 +02001625 return;
1626
Alex Shi25099402013-06-20 10:18:55 +08001627 if (atomic_long_read(&cfs_rq->removed_load)) {
1628 unsigned long removed_load;
1629 removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
Paul Turneraff3e492012-10-04 13:18:30 +02001630 subtract_blocked_load_contrib(cfs_rq, removed_load);
1631 }
Paul Turner9ee474f2012-10-04 13:18:30 +02001632
Paul Turneraff3e492012-10-04 13:18:30 +02001633 if (decays) {
1634 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
1635 decays);
1636 atomic64_add(decays, &cfs_rq->decay_counter);
1637 cfs_rq->last_decay = now;
1638 }
Paul Turnerc566e8e2012-10-04 13:18:30 +02001639
1640 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
Paul Turner9d85f212012-10-04 13:18:29 +02001641}
Ben Segall18bf2802012-10-04 12:51:20 +02001642
1643static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
1644{
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001645 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
Paul Turnerbb17f652012-10-04 13:18:31 +02001646 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
Ben Segall18bf2802012-10-04 12:51:20 +02001647}
Paul Turner2dac7542012-10-04 13:18:30 +02001648
1649/* Add the load generated by se into cfs_rq's child load-average */
1650static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001651 struct sched_entity *se,
1652 int wakeup)
Paul Turner2dac7542012-10-04 13:18:30 +02001653{
Paul Turneraff3e492012-10-04 13:18:30 +02001654 /*
1655 * We track migrations using entity decay_count <= 0, on a wake-up
1656 * migration we use a negative decay count to track the remote decays
1657 * accumulated while sleeping.
Alex Shia75cdaa2013-06-20 10:18:47 +08001658 *
1659 * Newly forked tasks are enqueued with se->avg.decay_count == 0, they
1660 * are seen by enqueue_entity_load_avg() as a migration with an already
1661 * constructed load_avg_contrib.
Paul Turneraff3e492012-10-04 13:18:30 +02001662 */
1663 if (unlikely(se->avg.decay_count <= 0)) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001664 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
Paul Turneraff3e492012-10-04 13:18:30 +02001665 if (se->avg.decay_count) {
1666 /*
1667 * In a wake-up migration we have to approximate the
1668 * time sleeping. This is because we can't synchronize
1669 * clock_task between the two cpus, and it is not
1670 * guaranteed to be read-safe. Instead, we can
1671 * approximate this using our carried decays, which are
1672 * explicitly atomically readable.
1673 */
1674 se->avg.last_runnable_update -= (-se->avg.decay_count)
1675 << 20;
1676 update_entity_load_avg(se, 0);
1677 /* Indicate that we're now synchronized and on-rq */
1678 se->avg.decay_count = 0;
1679 }
Paul Turner9ee474f2012-10-04 13:18:30 +02001680 wakeup = 0;
1681 } else {
Alex Shi282cf492013-06-20 10:18:48 +08001682 /*
1683 * Task re-woke on same cpu (or else migrate_task_rq_fair()
1684 * would have made count negative); we must be careful to avoid
1685 * double-accounting blocked time after synchronizing decays.
1686 */
1687 se->avg.last_runnable_update += __synchronize_entity_decay(se)
1688 << 20;
Paul Turner9ee474f2012-10-04 13:18:30 +02001689 }
1690
Paul Turneraff3e492012-10-04 13:18:30 +02001691 /* migrated tasks did not contribute to our blocked load */
1692 if (wakeup) {
Paul Turner9ee474f2012-10-04 13:18:30 +02001693 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
Paul Turneraff3e492012-10-04 13:18:30 +02001694 update_entity_load_avg(se, 0);
1695 }
Paul Turner9ee474f2012-10-04 13:18:30 +02001696
Paul Turner2dac7542012-10-04 13:18:30 +02001697 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
Paul Turneraff3e492012-10-04 13:18:30 +02001698 /* we force update consideration on load-balancer moves */
1699 update_cfs_rq_blocked_load(cfs_rq, !wakeup);
Paul Turner2dac7542012-10-04 13:18:30 +02001700}
1701
Paul Turner9ee474f2012-10-04 13:18:30 +02001702/*
1703 * Remove se's load from this cfs_rq child load-average, if the entity is
1704 * transitioning to a blocked state we track its projected decay using
1705 * blocked_load_avg.
1706 */
Paul Turner2dac7542012-10-04 13:18:30 +02001707static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001708 struct sched_entity *se,
1709 int sleep)
Paul Turner2dac7542012-10-04 13:18:30 +02001710{
Paul Turner9ee474f2012-10-04 13:18:30 +02001711 update_entity_load_avg(se, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02001712 /* we force update consideration on load-balancer moves */
1713 update_cfs_rq_blocked_load(cfs_rq, !sleep);
Paul Turner9ee474f2012-10-04 13:18:30 +02001714
Paul Turner2dac7542012-10-04 13:18:30 +02001715 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
Paul Turner9ee474f2012-10-04 13:18:30 +02001716 if (sleep) {
1717 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
1718 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
1719 } /* migrations, e.g. sleep=0 leave decay_count == 0 */
Paul Turner2dac7542012-10-04 13:18:30 +02001720}
Vincent Guittot642dbc32013-04-18 18:34:26 +02001721
1722/*
1723 * Update the rq's load with the elapsed running time before entering
1724 * idle. if the last scheduled task is not a CFS task, idle_enter will
1725 * be the only way to update the runnable statistic.
1726 */
1727void idle_enter_fair(struct rq *this_rq)
1728{
1729 update_rq_runnable_avg(this_rq, 1);
1730}
1731
1732/*
1733 * Update the rq's load with the elapsed idle time before a task is
1734 * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
1735 * be the only way to update the runnable statistic.
1736 */
1737void idle_exit_fair(struct rq *this_rq)
1738{
1739 update_rq_runnable_avg(this_rq, 0);
1740}
1741
Paul Turner9d85f212012-10-04 13:18:29 +02001742#else
Paul Turner9ee474f2012-10-04 13:18:30 +02001743static inline void update_entity_load_avg(struct sched_entity *se,
1744 int update_cfs_rq) {}
Ben Segall18bf2802012-10-04 12:51:20 +02001745static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
Paul Turner2dac7542012-10-04 13:18:30 +02001746static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001747 struct sched_entity *se,
1748 int wakeup) {}
Paul Turner2dac7542012-10-04 13:18:30 +02001749static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001750 struct sched_entity *se,
1751 int sleep) {}
Paul Turneraff3e492012-10-04 13:18:30 +02001752static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
1753 int force_update) {}
Paul Turner9d85f212012-10-04 13:18:29 +02001754#endif
1755
Ingo Molnar2396af62007-08-09 11:16:48 +02001756static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001757{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001758#ifdef CONFIG_SCHEDSTATS
Peter Zijlstrae4143142009-07-23 20:13:26 +02001759 struct task_struct *tsk = NULL;
1760
1761 if (entity_is_task(se))
1762 tsk = task_of(se);
1763
Lucas De Marchi41acab82010-03-10 23:37:45 -03001764 if (se->statistics.sleep_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001765 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001766
1767 if ((s64)delta < 0)
1768 delta = 0;
1769
Lucas De Marchi41acab82010-03-10 23:37:45 -03001770 if (unlikely(delta > se->statistics.sleep_max))
1771 se->statistics.sleep_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001772
Peter Zijlstra8c79a042012-01-30 14:51:37 +01001773 se->statistics.sleep_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03001774 se->statistics.sum_sleep_runtime += delta;
Arjan van de Ven97455122008-01-25 21:08:34 +01001775
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001776 if (tsk) {
Peter Zijlstrae4143142009-07-23 20:13:26 +02001777 account_scheduler_latency(tsk, delta >> 10, 1);
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001778 trace_sched_stat_sleep(tsk, delta);
1779 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001780 }
Lucas De Marchi41acab82010-03-10 23:37:45 -03001781 if (se->statistics.block_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001782 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001783
1784 if ((s64)delta < 0)
1785 delta = 0;
1786
Lucas De Marchi41acab82010-03-10 23:37:45 -03001787 if (unlikely(delta > se->statistics.block_max))
1788 se->statistics.block_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001789
Peter Zijlstra8c79a042012-01-30 14:51:37 +01001790 se->statistics.block_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03001791 se->statistics.sum_sleep_runtime += delta;
Ingo Molnar30084fb2007-10-02 14:13:08 +02001792
Peter Zijlstrae4143142009-07-23 20:13:26 +02001793 if (tsk) {
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07001794 if (tsk->in_iowait) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03001795 se->statistics.iowait_sum += delta;
1796 se->statistics.iowait_count++;
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001797 trace_sched_stat_iowait(tsk, delta);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07001798 }
1799
Andrew Vaginb781a602011-11-28 12:03:35 +03001800 trace_sched_stat_blocked(tsk, delta);
1801
Peter Zijlstrae4143142009-07-23 20:13:26 +02001802 /*
1803 * Blocking time is in units of nanosecs, so shift by
1804 * 20 to get a milliseconds-range estimation of the
1805 * amount of time that the task spent sleeping:
1806 */
1807 if (unlikely(prof_on == SLEEP_PROFILING)) {
1808 profile_hits(SLEEP_PROFILING,
1809 (void *)get_wchan(tsk),
1810 delta >> 20);
1811 }
1812 account_scheduler_latency(tsk, delta >> 10, 0);
Ingo Molnar30084fb2007-10-02 14:13:08 +02001813 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001814 }
1815#endif
1816}
1817
Peter Zijlstraddc97292007-10-15 17:00:10 +02001818static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
1819{
1820#ifdef CONFIG_SCHED_DEBUG
1821 s64 d = se->vruntime - cfs_rq->min_vruntime;
1822
1823 if (d < 0)
1824 d = -d;
1825
1826 if (d > 3*sysctl_sched_latency)
1827 schedstat_inc(cfs_rq, nr_spread_over);
1828#endif
1829}
1830
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001831static void
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001832place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
1833{
Peter Zijlstra1af5f732008-10-24 11:06:13 +02001834 u64 vruntime = cfs_rq->min_vruntime;
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02001835
Peter Zijlstra2cb86002007-11-09 22:39:37 +01001836 /*
1837 * The 'current' period is already promised to the current tasks,
1838 * however the extra weight of the new task will slow them down a
1839 * little, place the new task so that it fits in the slot that
1840 * stays open at the end.
1841 */
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02001842 if (initial && sched_feat(START_DEBIT))
Peter Zijlstraf9c0b092008-10-17 19:27:04 +02001843 vruntime += sched_vslice(cfs_rq, se);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001844
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001845 /* sleeps up to a single latency don't count. */
Mike Galbraith5ca98802010-03-11 17:17:17 +01001846 if (!initial) {
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001847 unsigned long thresh = sysctl_sched_latency;
Peter Zijlstraa7be37a2008-06-27 13:41:11 +02001848
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001849 /*
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001850 * Halve their sleep time's effect, to allow
1851 * for a gentler effect of sleepers:
1852 */
1853 if (sched_feat(GENTLE_FAIR_SLEEPERS))
1854 thresh >>= 1;
Ingo Molnar51e03042009-09-16 08:54:45 +02001855
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001856 vruntime -= thresh;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001857 }
1858
Mike Galbraithb5d9d732009-09-08 11:12:28 +02001859 /* ensure we never gain time by being placed backwards. */
Viresh Kumar16c8f1c2012-11-08 13:33:46 +05301860 se->vruntime = max_vruntime(se->vruntime, vruntime);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001861}
1862
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001863static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
1864
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001865static void
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001866enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001867{
1868 /*
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001869 * Update the normalized vruntime before updating min_vruntime
Kamalesh Babulal0fc576d2013-06-27 11:24:18 +05301870 * through calling update_curr().
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001871 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001872 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001873 se->vruntime += cfs_rq->min_vruntime;
1874
1875 /*
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02001876 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001877 */
Ingo Molnarb7cc0892007-08-09 11:16:47 +02001878 update_curr(cfs_rq);
Paul Turnerf269ae02012-10-04 13:18:31 +02001879 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001880 account_entity_enqueue(cfs_rq, se);
1881 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001882
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001883 if (flags & ENQUEUE_WAKEUP) {
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001884 place_entity(cfs_rq, se, 0);
Ingo Molnar2396af62007-08-09 11:16:48 +02001885 enqueue_sleeper(cfs_rq, se);
Ingo Molnare9acbff2007-10-15 17:00:04 +02001886 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001887
Ingo Molnard2417e52007-08-09 11:16:47 +02001888 update_stats_enqueue(cfs_rq, se);
Peter Zijlstraddc97292007-10-15 17:00:10 +02001889 check_spread(cfs_rq, se);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001890 if (se != cfs_rq->curr)
1891 __enqueue_entity(cfs_rq, se);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001892 se->on_rq = 1;
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08001893
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001894 if (cfs_rq->nr_running == 1) {
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08001895 list_add_leaf_cfs_rq(cfs_rq);
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001896 check_enqueue_throttle(cfs_rq);
1897 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001898}
1899
Rik van Riel2c13c9192011-02-01 09:48:37 -05001900static void __clear_buddies_last(struct sched_entity *se)
Peter Zijlstra2002c692008-11-11 11:52:33 +01001901{
Rik van Riel2c13c9192011-02-01 09:48:37 -05001902 for_each_sched_entity(se) {
1903 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1904 if (cfs_rq->last == se)
1905 cfs_rq->last = NULL;
1906 else
1907 break;
1908 }
1909}
Peter Zijlstra2002c692008-11-11 11:52:33 +01001910
Rik van Riel2c13c9192011-02-01 09:48:37 -05001911static void __clear_buddies_next(struct sched_entity *se)
1912{
1913 for_each_sched_entity(se) {
1914 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1915 if (cfs_rq->next == se)
1916 cfs_rq->next = NULL;
1917 else
1918 break;
1919 }
Peter Zijlstra2002c692008-11-11 11:52:33 +01001920}
1921
Rik van Rielac53db52011-02-01 09:51:03 -05001922static void __clear_buddies_skip(struct sched_entity *se)
1923{
1924 for_each_sched_entity(se) {
1925 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1926 if (cfs_rq->skip == se)
1927 cfs_rq->skip = NULL;
1928 else
1929 break;
1930 }
1931}
1932
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01001933static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1934{
Rik van Riel2c13c9192011-02-01 09:48:37 -05001935 if (cfs_rq->last == se)
1936 __clear_buddies_last(se);
1937
1938 if (cfs_rq->next == se)
1939 __clear_buddies_next(se);
Rik van Rielac53db52011-02-01 09:51:03 -05001940
1941 if (cfs_rq->skip == se)
1942 __clear_buddies_skip(se);
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01001943}
1944
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07001945static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
Paul Turnerd8b49862011-07-21 09:43:41 -07001946
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001947static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001948dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001949{
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02001950 /*
1951 * Update run-time statistics of the 'current'.
1952 */
1953 update_curr(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001954 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02001955
Ingo Molnar19b6a2e2007-08-09 11:16:48 +02001956 update_stats_dequeue(cfs_rq, se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001957 if (flags & DEQUEUE_SLEEP) {
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02001958#ifdef CONFIG_SCHEDSTATS
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001959 if (entity_is_task(se)) {
1960 struct task_struct *tsk = task_of(se);
1961
1962 if (tsk->state & TASK_INTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001963 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001964 if (tsk->state & TASK_UNINTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001965 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001966 }
Dmitry Adamushkodb36cc72007-10-15 17:00:06 +02001967#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02001968 }
1969
Peter Zijlstra2002c692008-11-11 11:52:33 +01001970 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01001971
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001972 if (se != cfs_rq->curr)
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001973 __dequeue_entity(cfs_rq, se);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001974 se->on_rq = 0;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001975 account_entity_dequeue(cfs_rq, se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001976
1977 /*
1978 * Normalize the entity after updating the min_vruntime because the
1979 * update can refer to the ->curr item and we need to reflect this
1980 * movement in our normalized position.
1981 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001982 if (!(flags & DEQUEUE_SLEEP))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001983 se->vruntime -= cfs_rq->min_vruntime;
Peter Zijlstra1e876232011-05-17 16:21:10 -07001984
Paul Turnerd8b49862011-07-21 09:43:41 -07001985 /* return excess runtime on last dequeue */
1986 return_cfs_rq_runtime(cfs_rq);
1987
Peter Zijlstra1e876232011-05-17 16:21:10 -07001988 update_min_vruntime(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001989 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001990}
1991
1992/*
1993 * Preempt the current task with a newly woken task if needed:
1994 */
Peter Zijlstra7c92e542007-09-05 14:32:49 +02001995static void
Ingo Molnar2e09bf52007-10-15 17:00:05 +02001996check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001997{
Peter Zijlstra11697832007-09-05 14:32:49 +02001998 unsigned long ideal_runtime, delta_exec;
Wang Xingchaof4cfb332011-09-16 13:35:52 -04001999 struct sched_entity *se;
2000 s64 delta;
Peter Zijlstra11697832007-09-05 14:32:49 +02002001
Peter Zijlstra6d0f0eb2007-10-15 17:00:05 +02002002 ideal_runtime = sched_slice(cfs_rq, curr);
Peter Zijlstra11697832007-09-05 14:32:49 +02002003 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01002004 if (delta_exec > ideal_runtime) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002005 resched_task(rq_of(cfs_rq)->curr);
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01002006 /*
2007 * The current task ran long enough, ensure it doesn't get
2008 * re-elected due to buddy favours.
2009 */
2010 clear_buddies(cfs_rq, curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02002011 return;
2012 }
2013
2014 /*
2015 * Ensure that a task that missed wakeup preemption by a
2016 * narrow margin doesn't have to wait for a full slice.
2017 * This also mitigates buddy induced latencies under load.
2018 */
Mike Galbraithf685cea2009-10-23 23:09:22 +02002019 if (delta_exec < sysctl_sched_min_granularity)
2020 return;
2021
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002022 se = __pick_first_entity(cfs_rq);
2023 delta = curr->vruntime - se->vruntime;
Mike Galbraithf685cea2009-10-23 23:09:22 +02002024
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002025 if (delta < 0)
2026 return;
Mike Galbraithd7d82942011-01-05 05:41:17 +01002027
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002028 if (delta > ideal_runtime)
2029 resched_task(rq_of(cfs_rq)->curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002030}
2031
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002032static void
Ingo Molnar8494f412007-08-09 11:16:48 +02002033set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002034{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002035 /* 'current' is not kept within the tree. */
2036 if (se->on_rq) {
2037 /*
2038 * Any task has to be enqueued before it get to execute on
2039 * a CPU. So account for the time it spent waiting on the
2040 * runqueue.
2041 */
2042 update_stats_wait_end(cfs_rq, se);
2043 __dequeue_entity(cfs_rq, se);
2044 }
2045
Ingo Molnar79303e92007-08-09 11:16:47 +02002046 update_stats_curr_start(cfs_rq, se);
Ingo Molnar429d43b2007-10-15 17:00:03 +02002047 cfs_rq->curr = se;
Ingo Molnareba1ed42007-10-15 17:00:02 +02002048#ifdef CONFIG_SCHEDSTATS
2049 /*
2050 * Track our maximum slice length, if the CPU's load is at
2051 * least twice that of our own weight (i.e. dont track it
2052 * when there are only lesser-weight tasks around):
2053 */
Dmitry Adamushko495eca42007-10-15 17:00:06 +02002054 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03002055 se->statistics.slice_max = max(se->statistics.slice_max,
Ingo Molnareba1ed42007-10-15 17:00:02 +02002056 se->sum_exec_runtime - se->prev_sum_exec_runtime);
2057 }
2058#endif
Peter Zijlstra4a55b452007-09-05 14:32:49 +02002059 se->prev_sum_exec_runtime = se->sum_exec_runtime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002060}
2061
Peter Zijlstra3f3a4902008-10-24 11:06:16 +02002062static int
2063wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
2064
Rik van Rielac53db52011-02-01 09:51:03 -05002065/*
2066 * Pick the next process, keeping these things in mind, in this order:
2067 * 1) keep things fair between processes/task groups
2068 * 2) pick the "next" process, since someone really wants that to run
2069 * 3) pick the "last" process, for cache locality
2070 * 4) do not run the "skip" process, if something else is available
2071 */
Peter Zijlstraf4b67552008-11-04 21:25:07 +01002072static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01002073{
Rik van Rielac53db52011-02-01 09:51:03 -05002074 struct sched_entity *se = __pick_first_entity(cfs_rq);
Mike Galbraithf685cea2009-10-23 23:09:22 +02002075 struct sched_entity *left = se;
Peter Zijlstraf4b67552008-11-04 21:25:07 +01002076
Rik van Rielac53db52011-02-01 09:51:03 -05002077 /*
2078 * Avoid running the skip buddy, if running something else can
2079 * be done without getting too unfair.
2080 */
2081 if (cfs_rq->skip == se) {
2082 struct sched_entity *second = __pick_next_entity(se);
2083 if (second && wakeup_preempt_entity(second, left) < 1)
2084 se = second;
2085 }
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01002086
Mike Galbraithf685cea2009-10-23 23:09:22 +02002087 /*
2088 * Prefer last buddy, try to return the CPU to a preempted task.
2089 */
2090 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
2091 se = cfs_rq->last;
2092
Rik van Rielac53db52011-02-01 09:51:03 -05002093 /*
2094 * Someone really wants this to run. If it's not unfair, run it.
2095 */
2096 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
2097 se = cfs_rq->next;
2098
Mike Galbraithf685cea2009-10-23 23:09:22 +02002099 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01002100
2101 return se;
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01002102}
2103
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002104static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
2105
Ingo Molnarab6cde22007-08-09 11:16:48 +02002106static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002107{
2108 /*
2109 * If still on the runqueue then deactivate_task()
2110 * was not called and update_curr() has to be done:
2111 */
2112 if (prev->on_rq)
Ingo Molnarb7cc0892007-08-09 11:16:47 +02002113 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002114
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002115 /* throttle cfs_rqs exceeding runtime */
2116 check_cfs_rq_runtime(cfs_rq);
2117
Peter Zijlstraddc97292007-10-15 17:00:10 +02002118 check_spread(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002119 if (prev->on_rq) {
Ingo Molnar5870db52007-08-09 11:16:47 +02002120 update_stats_wait_start(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002121 /* Put 'current' back into the tree. */
2122 __enqueue_entity(cfs_rq, prev);
Paul Turner9d85f212012-10-04 13:18:29 +02002123 /* in !on_rq case, update occurred at dequeue */
Paul Turner9ee474f2012-10-04 13:18:30 +02002124 update_entity_load_avg(prev, 1);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002125 }
Ingo Molnar429d43b2007-10-15 17:00:03 +02002126 cfs_rq->curr = NULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002127}
2128
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002129static void
2130entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002131{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002132 /*
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002133 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002134 */
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002135 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002136
Paul Turner43365bd2010-12-15 19:10:17 -08002137 /*
Paul Turner9d85f212012-10-04 13:18:29 +02002138 * Ensure that runnable average is periodically updated.
2139 */
Paul Turner9ee474f2012-10-04 13:18:30 +02002140 update_entity_load_avg(curr, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02002141 update_cfs_rq_blocked_load(cfs_rq, 1);
Peter Zijlstrabf0bd942013-07-26 23:48:42 +02002142 update_cfs_shares(cfs_rq);
Paul Turner9d85f212012-10-04 13:18:29 +02002143
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002144#ifdef CONFIG_SCHED_HRTICK
2145 /*
2146 * queued ticks are scheduled to match the slice, so don't bother
2147 * validating it and just reschedule.
2148 */
Harvey Harrison983ed7a2008-04-24 18:17:55 -07002149 if (queued) {
2150 resched_task(rq_of(cfs_rq)->curr);
2151 return;
2152 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002153 /*
2154 * don't let the period tick interfere with the hrtick preemption
2155 */
2156 if (!sched_feat(DOUBLE_TICK) &&
2157 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
2158 return;
2159#endif
2160
Yong Zhang2c2efae2011-07-29 16:20:33 +08002161 if (cfs_rq->nr_running > 1)
Ingo Molnar2e09bf52007-10-15 17:00:05 +02002162 check_preempt_tick(cfs_rq, curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002163}
2164
Paul Turnerab84d312011-07-21 09:43:28 -07002165
2166/**************************************************
2167 * CFS bandwidth control machinery
2168 */
2169
2170#ifdef CONFIG_CFS_BANDWIDTH
Peter Zijlstra029632f2011-10-25 10:00:11 +02002171
2172#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01002173static struct static_key __cfs_bandwidth_used;
Peter Zijlstra029632f2011-10-25 10:00:11 +02002174
2175static inline bool cfs_bandwidth_used(void)
2176{
Ingo Molnarc5905af2012-02-24 08:31:31 +01002177 return static_key_false(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002178}
2179
2180void account_cfs_bandwidth_used(int enabled, int was_enabled)
2181{
2182 /* only need to count groups transitioning between enabled/!enabled */
2183 if (enabled && !was_enabled)
Ingo Molnarc5905af2012-02-24 08:31:31 +01002184 static_key_slow_inc(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002185 else if (!enabled && was_enabled)
Ingo Molnarc5905af2012-02-24 08:31:31 +01002186 static_key_slow_dec(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002187}
2188#else /* HAVE_JUMP_LABEL */
2189static bool cfs_bandwidth_used(void)
2190{
2191 return true;
2192}
2193
2194void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
2195#endif /* HAVE_JUMP_LABEL */
2196
Paul Turnerab84d312011-07-21 09:43:28 -07002197/*
2198 * default period for cfs group bandwidth.
2199 * default: 0.1s, units: nanoseconds
2200 */
2201static inline u64 default_cfs_period(void)
2202{
2203 return 100000000ULL;
2204}
Paul Turnerec12cb72011-07-21 09:43:30 -07002205
2206static inline u64 sched_cfs_bandwidth_slice(void)
2207{
2208 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
2209}
2210
Paul Turnera9cf55b2011-07-21 09:43:32 -07002211/*
2212 * Replenish runtime according to assigned quota and update expiration time.
2213 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
2214 * additional synchronization around rq->lock.
2215 *
2216 * requires cfs_b->lock
2217 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02002218void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
Paul Turnera9cf55b2011-07-21 09:43:32 -07002219{
2220 u64 now;
2221
2222 if (cfs_b->quota == RUNTIME_INF)
2223 return;
2224
2225 now = sched_clock_cpu(smp_processor_id());
2226 cfs_b->runtime = cfs_b->quota;
2227 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
2228}
2229
Peter Zijlstra029632f2011-10-25 10:00:11 +02002230static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2231{
2232 return &tg->cfs_bandwidth;
2233}
2234
Paul Turnerf1b17282012-10-04 13:18:31 +02002235/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
2236static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2237{
2238 if (unlikely(cfs_rq->throttle_count))
2239 return cfs_rq->throttled_clock_task;
2240
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002241 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
Paul Turnerf1b17282012-10-04 13:18:31 +02002242}
2243
Paul Turner85dac902011-07-21 09:43:33 -07002244/* returns 0 on failure to allocate runtime */
2245static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
Paul Turnerec12cb72011-07-21 09:43:30 -07002246{
2247 struct task_group *tg = cfs_rq->tg;
2248 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002249 u64 amount = 0, min_amount, expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07002250
2251 /* note: this is a positive sum as runtime_remaining <= 0 */
2252 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
2253
2254 raw_spin_lock(&cfs_b->lock);
2255 if (cfs_b->quota == RUNTIME_INF)
2256 amount = min_amount;
Paul Turner58088ad2011-07-21 09:43:31 -07002257 else {
Paul Turnera9cf55b2011-07-21 09:43:32 -07002258 /*
2259 * If the bandwidth pool has become inactive, then at least one
2260 * period must have elapsed since the last consumption.
2261 * Refresh the global state and ensure bandwidth timer becomes
2262 * active.
2263 */
2264 if (!cfs_b->timer_active) {
2265 __refill_cfs_bandwidth_runtime(cfs_b);
Paul Turner58088ad2011-07-21 09:43:31 -07002266 __start_cfs_bandwidth(cfs_b);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002267 }
Paul Turner58088ad2011-07-21 09:43:31 -07002268
2269 if (cfs_b->runtime > 0) {
2270 amount = min(cfs_b->runtime, min_amount);
2271 cfs_b->runtime -= amount;
2272 cfs_b->idle = 0;
2273 }
Paul Turnerec12cb72011-07-21 09:43:30 -07002274 }
Paul Turnera9cf55b2011-07-21 09:43:32 -07002275 expires = cfs_b->runtime_expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07002276 raw_spin_unlock(&cfs_b->lock);
2277
2278 cfs_rq->runtime_remaining += amount;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002279 /*
2280 * we may have advanced our local expiration to account for allowed
2281 * spread between our sched_clock and the one on which runtime was
2282 * issued.
2283 */
2284 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
2285 cfs_rq->runtime_expires = expires;
Paul Turner85dac902011-07-21 09:43:33 -07002286
2287 return cfs_rq->runtime_remaining > 0;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002288}
2289
2290/*
2291 * Note: This depends on the synchronization provided by sched_clock and the
2292 * fact that rq->clock snapshots this value.
2293 */
2294static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2295{
2296 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002297
2298 /* if the deadline is ahead of our clock, nothing to do */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002299 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
Paul Turnera9cf55b2011-07-21 09:43:32 -07002300 return;
2301
2302 if (cfs_rq->runtime_remaining < 0)
2303 return;
2304
2305 /*
2306 * If the local deadline has passed we have to consider the
2307 * possibility that our sched_clock is 'fast' and the global deadline
2308 * has not truly expired.
2309 *
2310 * Fortunately we can check determine whether this the case by checking
2311 * whether the global deadline has advanced.
2312 */
2313
2314 if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
2315 /* extend local deadline, drift is bounded above by 2 ticks */
2316 cfs_rq->runtime_expires += TICK_NSEC;
2317 } else {
2318 /* global deadline is ahead, expiration has passed */
2319 cfs_rq->runtime_remaining = 0;
2320 }
Paul Turnerec12cb72011-07-21 09:43:30 -07002321}
2322
2323static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2324 unsigned long delta_exec)
2325{
Paul Turnera9cf55b2011-07-21 09:43:32 -07002326 /* dock delta_exec before expiring quota (as it could span periods) */
Paul Turnerec12cb72011-07-21 09:43:30 -07002327 cfs_rq->runtime_remaining -= delta_exec;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002328 expire_cfs_rq_runtime(cfs_rq);
2329
2330 if (likely(cfs_rq->runtime_remaining > 0))
Paul Turnerec12cb72011-07-21 09:43:30 -07002331 return;
2332
Paul Turner85dac902011-07-21 09:43:33 -07002333 /*
2334 * if we're unable to extend our runtime we resched so that the active
2335 * hierarchy can be throttled
2336 */
2337 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
2338 resched_task(rq_of(cfs_rq)->curr);
Paul Turnerec12cb72011-07-21 09:43:30 -07002339}
2340
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07002341static __always_inline
2342void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
Paul Turnerec12cb72011-07-21 09:43:30 -07002343{
Paul Turner56f570e2011-11-07 20:26:33 -08002344 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
Paul Turnerec12cb72011-07-21 09:43:30 -07002345 return;
2346
2347 __account_cfs_rq_runtime(cfs_rq, delta_exec);
2348}
2349
Paul Turner85dac902011-07-21 09:43:33 -07002350static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2351{
Paul Turner56f570e2011-11-07 20:26:33 -08002352 return cfs_bandwidth_used() && cfs_rq->throttled;
Paul Turner85dac902011-07-21 09:43:33 -07002353}
2354
Paul Turner64660c82011-07-21 09:43:36 -07002355/* check whether cfs_rq, or any parent, is throttled */
2356static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2357{
Paul Turner56f570e2011-11-07 20:26:33 -08002358 return cfs_bandwidth_used() && cfs_rq->throttle_count;
Paul Turner64660c82011-07-21 09:43:36 -07002359}
2360
2361/*
2362 * Ensure that neither of the group entities corresponding to src_cpu or
2363 * dest_cpu are members of a throttled hierarchy when performing group
2364 * load-balance operations.
2365 */
2366static inline int throttled_lb_pair(struct task_group *tg,
2367 int src_cpu, int dest_cpu)
2368{
2369 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
2370
2371 src_cfs_rq = tg->cfs_rq[src_cpu];
2372 dest_cfs_rq = tg->cfs_rq[dest_cpu];
2373
2374 return throttled_hierarchy(src_cfs_rq) ||
2375 throttled_hierarchy(dest_cfs_rq);
2376}
2377
2378/* updated child weight may affect parent so we have to do this bottom up */
2379static int tg_unthrottle_up(struct task_group *tg, void *data)
2380{
2381 struct rq *rq = data;
2382 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2383
2384 cfs_rq->throttle_count--;
2385#ifdef CONFIG_SMP
2386 if (!cfs_rq->throttle_count) {
Paul Turnerf1b17282012-10-04 13:18:31 +02002387 /* adjust cfs_rq_clock_task() */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002388 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
Paul Turnerf1b17282012-10-04 13:18:31 +02002389 cfs_rq->throttled_clock_task;
Paul Turner64660c82011-07-21 09:43:36 -07002390 }
2391#endif
2392
2393 return 0;
2394}
2395
2396static int tg_throttle_down(struct task_group *tg, void *data)
2397{
2398 struct rq *rq = data;
2399 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2400
Paul Turner82958362012-10-04 13:18:31 +02002401 /* group is entering throttled state, stop time */
2402 if (!cfs_rq->throttle_count)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002403 cfs_rq->throttled_clock_task = rq_clock_task(rq);
Paul Turner64660c82011-07-21 09:43:36 -07002404 cfs_rq->throttle_count++;
2405
2406 return 0;
2407}
2408
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002409static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner85dac902011-07-21 09:43:33 -07002410{
2411 struct rq *rq = rq_of(cfs_rq);
2412 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2413 struct sched_entity *se;
2414 long task_delta, dequeue = 1;
2415
2416 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
2417
Paul Turnerf1b17282012-10-04 13:18:31 +02002418 /* freeze hierarchy runnable averages while throttled */
Paul Turner64660c82011-07-21 09:43:36 -07002419 rcu_read_lock();
2420 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
2421 rcu_read_unlock();
Paul Turner85dac902011-07-21 09:43:33 -07002422
2423 task_delta = cfs_rq->h_nr_running;
2424 for_each_sched_entity(se) {
2425 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
2426 /* throttled entity or throttle-on-deactivate */
2427 if (!se->on_rq)
2428 break;
2429
2430 if (dequeue)
2431 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
2432 qcfs_rq->h_nr_running -= task_delta;
2433
2434 if (qcfs_rq->load.weight)
2435 dequeue = 0;
2436 }
2437
2438 if (!se)
2439 rq->nr_running -= task_delta;
2440
2441 cfs_rq->throttled = 1;
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002442 cfs_rq->throttled_clock = rq_clock(rq);
Paul Turner85dac902011-07-21 09:43:33 -07002443 raw_spin_lock(&cfs_b->lock);
2444 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
2445 raw_spin_unlock(&cfs_b->lock);
2446}
2447
Peter Zijlstra029632f2011-10-25 10:00:11 +02002448void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner671fd9d2011-07-21 09:43:34 -07002449{
2450 struct rq *rq = rq_of(cfs_rq);
2451 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2452 struct sched_entity *se;
2453 int enqueue = 1;
2454 long task_delta;
2455
Michael Wang22b958d2013-06-04 14:23:39 +08002456 se = cfs_rq->tg->se[cpu_of(rq)];
Paul Turner671fd9d2011-07-21 09:43:34 -07002457
2458 cfs_rq->throttled = 0;
Frederic Weisbecker1a55af22013-04-12 01:51:01 +02002459
2460 update_rq_clock(rq);
2461
Paul Turner671fd9d2011-07-21 09:43:34 -07002462 raw_spin_lock(&cfs_b->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002463 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
Paul Turner671fd9d2011-07-21 09:43:34 -07002464 list_del_rcu(&cfs_rq->throttled_list);
2465 raw_spin_unlock(&cfs_b->lock);
2466
Paul Turner64660c82011-07-21 09:43:36 -07002467 /* update hierarchical throttle state */
2468 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
2469
Paul Turner671fd9d2011-07-21 09:43:34 -07002470 if (!cfs_rq->load.weight)
2471 return;
2472
2473 task_delta = cfs_rq->h_nr_running;
2474 for_each_sched_entity(se) {
2475 if (se->on_rq)
2476 enqueue = 0;
2477
2478 cfs_rq = cfs_rq_of(se);
2479 if (enqueue)
2480 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
2481 cfs_rq->h_nr_running += task_delta;
2482
2483 if (cfs_rq_throttled(cfs_rq))
2484 break;
2485 }
2486
2487 if (!se)
2488 rq->nr_running += task_delta;
2489
2490 /* determine whether we need to wake up potentially idle cpu */
2491 if (rq->curr == rq->idle && rq->cfs.nr_running)
2492 resched_task(rq->curr);
2493}
2494
2495static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
2496 u64 remaining, u64 expires)
2497{
2498 struct cfs_rq *cfs_rq;
2499 u64 runtime = remaining;
2500
2501 rcu_read_lock();
2502 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
2503 throttled_list) {
2504 struct rq *rq = rq_of(cfs_rq);
2505
2506 raw_spin_lock(&rq->lock);
2507 if (!cfs_rq_throttled(cfs_rq))
2508 goto next;
2509
2510 runtime = -cfs_rq->runtime_remaining + 1;
2511 if (runtime > remaining)
2512 runtime = remaining;
2513 remaining -= runtime;
2514
2515 cfs_rq->runtime_remaining += runtime;
2516 cfs_rq->runtime_expires = expires;
2517
2518 /* we check whether we're throttled above */
2519 if (cfs_rq->runtime_remaining > 0)
2520 unthrottle_cfs_rq(cfs_rq);
2521
2522next:
2523 raw_spin_unlock(&rq->lock);
2524
2525 if (!remaining)
2526 break;
2527 }
2528 rcu_read_unlock();
2529
2530 return remaining;
2531}
2532
Paul Turner58088ad2011-07-21 09:43:31 -07002533/*
2534 * Responsible for refilling a task_group's bandwidth and unthrottling its
2535 * cfs_rqs as appropriate. If there has been no activity within the last
2536 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
2537 * used to track this state.
2538 */
2539static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
2540{
Paul Turner671fd9d2011-07-21 09:43:34 -07002541 u64 runtime, runtime_expires;
2542 int idle = 1, throttled;
Paul Turner58088ad2011-07-21 09:43:31 -07002543
2544 raw_spin_lock(&cfs_b->lock);
2545 /* no need to continue the timer with no bandwidth constraint */
2546 if (cfs_b->quota == RUNTIME_INF)
2547 goto out_unlock;
2548
Paul Turner671fd9d2011-07-21 09:43:34 -07002549 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2550 /* idle depends on !throttled (for the case of a large deficit) */
2551 idle = cfs_b->idle && !throttled;
Nikhil Raoe8da1b12011-07-21 09:43:40 -07002552 cfs_b->nr_periods += overrun;
Paul Turner671fd9d2011-07-21 09:43:34 -07002553
Paul Turnera9cf55b2011-07-21 09:43:32 -07002554 /* if we're going inactive then everything else can be deferred */
2555 if (idle)
2556 goto out_unlock;
2557
2558 __refill_cfs_bandwidth_runtime(cfs_b);
2559
Paul Turner671fd9d2011-07-21 09:43:34 -07002560 if (!throttled) {
2561 /* mark as potentially idle for the upcoming period */
2562 cfs_b->idle = 1;
2563 goto out_unlock;
2564 }
Paul Turner58088ad2011-07-21 09:43:31 -07002565
Nikhil Raoe8da1b12011-07-21 09:43:40 -07002566 /* account preceding periods in which throttling occurred */
2567 cfs_b->nr_throttled += overrun;
2568
Paul Turner671fd9d2011-07-21 09:43:34 -07002569 /*
2570 * There are throttled entities so we must first use the new bandwidth
2571 * to unthrottle them before making it generally available. This
2572 * ensures that all existing debts will be paid before a new cfs_rq is
2573 * allowed to run.
2574 */
2575 runtime = cfs_b->runtime;
2576 runtime_expires = cfs_b->runtime_expires;
2577 cfs_b->runtime = 0;
2578
2579 /*
2580 * This check is repeated as we are holding onto the new bandwidth
2581 * while we unthrottle. This can potentially race with an unthrottled
2582 * group trying to acquire new bandwidth from the global pool.
2583 */
2584 while (throttled && runtime > 0) {
2585 raw_spin_unlock(&cfs_b->lock);
2586 /* we can't nest cfs_b->lock while distributing bandwidth */
2587 runtime = distribute_cfs_runtime(cfs_b, runtime,
2588 runtime_expires);
2589 raw_spin_lock(&cfs_b->lock);
2590
2591 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2592 }
2593
2594 /* return (any) remaining runtime */
2595 cfs_b->runtime = runtime;
2596 /*
2597 * While we are ensured activity in the period following an
2598 * unthrottle, this also covers the case in which the new bandwidth is
2599 * insufficient to cover the existing bandwidth deficit. (Forcing the
2600 * timer to remain active while there are any throttled entities.)
2601 */
2602 cfs_b->idle = 0;
Paul Turner58088ad2011-07-21 09:43:31 -07002603out_unlock:
2604 if (idle)
2605 cfs_b->timer_active = 0;
2606 raw_spin_unlock(&cfs_b->lock);
2607
2608 return idle;
2609}
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002610
Paul Turnerd8b49862011-07-21 09:43:41 -07002611/* a cfs_rq won't donate quota below this amount */
2612static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
2613/* minimum remaining period time to redistribute slack quota */
2614static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
2615/* how long we wait to gather additional slack before distributing */
2616static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
2617
2618/* are we near the end of the current quota period? */
2619static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
2620{
2621 struct hrtimer *refresh_timer = &cfs_b->period_timer;
2622 u64 remaining;
2623
2624 /* if the call-back is running a quota refresh is already occurring */
2625 if (hrtimer_callback_running(refresh_timer))
2626 return 1;
2627
2628 /* is a quota refresh about to occur? */
2629 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
2630 if (remaining < min_expire)
2631 return 1;
2632
2633 return 0;
2634}
2635
2636static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
2637{
2638 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
2639
2640 /* if there's a quota refresh soon don't bother with slack */
2641 if (runtime_refresh_within(cfs_b, min_left))
2642 return;
2643
2644 start_bandwidth_timer(&cfs_b->slack_timer,
2645 ns_to_ktime(cfs_bandwidth_slack_period));
2646}
2647
2648/* we know any runtime found here is valid as update_curr() precedes return */
2649static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2650{
2651 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2652 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
2653
2654 if (slack_runtime <= 0)
2655 return;
2656
2657 raw_spin_lock(&cfs_b->lock);
2658 if (cfs_b->quota != RUNTIME_INF &&
2659 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
2660 cfs_b->runtime += slack_runtime;
2661
2662 /* we are under rq->lock, defer unthrottling using a timer */
2663 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
2664 !list_empty(&cfs_b->throttled_cfs_rq))
2665 start_cfs_slack_bandwidth(cfs_b);
2666 }
2667 raw_spin_unlock(&cfs_b->lock);
2668
2669 /* even if it's not valid for return we don't want to try again */
2670 cfs_rq->runtime_remaining -= slack_runtime;
2671}
2672
2673static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2674{
Paul Turner56f570e2011-11-07 20:26:33 -08002675 if (!cfs_bandwidth_used())
2676 return;
2677
Paul Turnerfccfdc62011-11-07 20:26:34 -08002678 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
Paul Turnerd8b49862011-07-21 09:43:41 -07002679 return;
2680
2681 __return_cfs_rq_runtime(cfs_rq);
2682}
2683
2684/*
2685 * This is done with a timer (instead of inline with bandwidth return) since
2686 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
2687 */
2688static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
2689{
2690 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
2691 u64 expires;
2692
2693 /* confirm we're still not at a refresh boundary */
2694 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
2695 return;
2696
2697 raw_spin_lock(&cfs_b->lock);
2698 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
2699 runtime = cfs_b->runtime;
2700 cfs_b->runtime = 0;
2701 }
2702 expires = cfs_b->runtime_expires;
2703 raw_spin_unlock(&cfs_b->lock);
2704
2705 if (!runtime)
2706 return;
2707
2708 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
2709
2710 raw_spin_lock(&cfs_b->lock);
2711 if (expires == cfs_b->runtime_expires)
2712 cfs_b->runtime = runtime;
2713 raw_spin_unlock(&cfs_b->lock);
2714}
2715
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002716/*
2717 * When a group wakes up we want to make sure that its quota is not already
2718 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
2719 * runtime as update_curr() throttling can not not trigger until it's on-rq.
2720 */
2721static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
2722{
Paul Turner56f570e2011-11-07 20:26:33 -08002723 if (!cfs_bandwidth_used())
2724 return;
2725
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002726 /* an active group must be handled by the update_curr()->put() path */
2727 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
2728 return;
2729
2730 /* ensure the group is not already throttled */
2731 if (cfs_rq_throttled(cfs_rq))
2732 return;
2733
2734 /* update runtime allocation */
2735 account_cfs_rq_runtime(cfs_rq, 0);
2736 if (cfs_rq->runtime_remaining <= 0)
2737 throttle_cfs_rq(cfs_rq);
2738}
2739
2740/* conditionally throttle active cfs_rq's from put_prev_entity() */
2741static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2742{
Paul Turner56f570e2011-11-07 20:26:33 -08002743 if (!cfs_bandwidth_used())
2744 return;
2745
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002746 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
2747 return;
2748
2749 /*
2750 * it's possible for a throttled entity to be forced into a running
2751 * state (e.g. set_curr_task), in this case we're finished.
2752 */
2753 if (cfs_rq_throttled(cfs_rq))
2754 return;
2755
2756 throttle_cfs_rq(cfs_rq);
2757}
Peter Zijlstra029632f2011-10-25 10:00:11 +02002758
Peter Zijlstra029632f2011-10-25 10:00:11 +02002759static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
2760{
2761 struct cfs_bandwidth *cfs_b =
2762 container_of(timer, struct cfs_bandwidth, slack_timer);
2763 do_sched_cfs_slack_timer(cfs_b);
2764
2765 return HRTIMER_NORESTART;
2766}
2767
2768static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
2769{
2770 struct cfs_bandwidth *cfs_b =
2771 container_of(timer, struct cfs_bandwidth, period_timer);
2772 ktime_t now;
2773 int overrun;
2774 int idle = 0;
2775
2776 for (;;) {
2777 now = hrtimer_cb_get_time(timer);
2778 overrun = hrtimer_forward(timer, now, cfs_b->period);
2779
2780 if (!overrun)
2781 break;
2782
2783 idle = do_sched_cfs_period_timer(cfs_b, overrun);
2784 }
2785
2786 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
2787}
2788
2789void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2790{
2791 raw_spin_lock_init(&cfs_b->lock);
2792 cfs_b->runtime = 0;
2793 cfs_b->quota = RUNTIME_INF;
2794 cfs_b->period = ns_to_ktime(default_cfs_period());
2795
2796 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
2797 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2798 cfs_b->period_timer.function = sched_cfs_period_timer;
2799 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2800 cfs_b->slack_timer.function = sched_cfs_slack_timer;
2801}
2802
2803static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2804{
2805 cfs_rq->runtime_enabled = 0;
2806 INIT_LIST_HEAD(&cfs_rq->throttled_list);
2807}
2808
2809/* requires cfs_b->lock, may release to reprogram timer */
2810void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2811{
2812 /*
2813 * The timer may be active because we're trying to set a new bandwidth
2814 * period or because we're racing with the tear-down path
2815 * (timer_active==0 becomes visible before the hrtimer call-back
2816 * terminates). In either case we ensure that it's re-programmed
2817 */
2818 while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
2819 raw_spin_unlock(&cfs_b->lock);
2820 /* ensure cfs_b->lock is available while we wait */
2821 hrtimer_cancel(&cfs_b->period_timer);
2822
2823 raw_spin_lock(&cfs_b->lock);
2824 /* if someone else restarted the timer then we're done */
2825 if (cfs_b->timer_active)
2826 return;
2827 }
2828
2829 cfs_b->timer_active = 1;
2830 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
2831}
2832
2833static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2834{
2835 hrtimer_cancel(&cfs_b->period_timer);
2836 hrtimer_cancel(&cfs_b->slack_timer);
2837}
2838
Arnd Bergmann38dc3342013-01-25 14:14:22 +00002839static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
Peter Zijlstra029632f2011-10-25 10:00:11 +02002840{
2841 struct cfs_rq *cfs_rq;
2842
2843 for_each_leaf_cfs_rq(rq, cfs_rq) {
2844 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2845
2846 if (!cfs_rq->runtime_enabled)
2847 continue;
2848
2849 /*
2850 * clock_task is not advancing so we just need to make sure
2851 * there's some valid quota amount
2852 */
2853 cfs_rq->runtime_remaining = cfs_b->quota;
2854 if (cfs_rq_throttled(cfs_rq))
2855 unthrottle_cfs_rq(cfs_rq);
2856 }
2857}
2858
2859#else /* CONFIG_CFS_BANDWIDTH */
Paul Turnerf1b17282012-10-04 13:18:31 +02002860static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2861{
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002862 return rq_clock_task(rq_of(cfs_rq));
Paul Turnerf1b17282012-10-04 13:18:31 +02002863}
2864
2865static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2866 unsigned long delta_exec) {}
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002867static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2868static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07002869static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turner85dac902011-07-21 09:43:33 -07002870
2871static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2872{
2873 return 0;
2874}
Paul Turner64660c82011-07-21 09:43:36 -07002875
2876static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2877{
2878 return 0;
2879}
2880
2881static inline int throttled_lb_pair(struct task_group *tg,
2882 int src_cpu, int dest_cpu)
2883{
2884 return 0;
2885}
Peter Zijlstra029632f2011-10-25 10:00:11 +02002886
2887void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2888
2889#ifdef CONFIG_FAIR_GROUP_SCHED
2890static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turnerab84d312011-07-21 09:43:28 -07002891#endif
2892
Peter Zijlstra029632f2011-10-25 10:00:11 +02002893static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2894{
2895 return NULL;
2896}
2897static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07002898static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
Peter Zijlstra029632f2011-10-25 10:00:11 +02002899
2900#endif /* CONFIG_CFS_BANDWIDTH */
2901
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002902/**************************************************
2903 * CFS operations on tasks:
2904 */
2905
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002906#ifdef CONFIG_SCHED_HRTICK
2907static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
2908{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002909 struct sched_entity *se = &p->se;
2910 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2911
2912 WARN_ON(task_rq(p) != rq);
2913
Mike Galbraithb39e66e2011-11-22 15:20:07 +01002914 if (cfs_rq->nr_running > 1) {
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002915 u64 slice = sched_slice(cfs_rq, se);
2916 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
2917 s64 delta = slice - ran;
2918
2919 if (delta < 0) {
2920 if (rq->curr == p)
2921 resched_task(p);
2922 return;
2923 }
2924
2925 /*
2926 * Don't schedule slices shorter than 10000ns, that just
2927 * doesn't make sense. Rely on vruntime for fairness.
2928 */
Peter Zijlstra31656512008-07-18 18:01:23 +02002929 if (rq->curr != p)
Peter Zijlstra157124c2008-07-28 11:53:11 +02002930 delta = max_t(s64, 10000LL, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002931
Peter Zijlstra31656512008-07-18 18:01:23 +02002932 hrtick_start(rq, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002933 }
2934}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002935
2936/*
2937 * called from enqueue/dequeue and updates the hrtick when the
2938 * current task is from our class and nr_running is low enough
2939 * to matter.
2940 */
2941static void hrtick_update(struct rq *rq)
2942{
2943 struct task_struct *curr = rq->curr;
2944
Mike Galbraithb39e66e2011-11-22 15:20:07 +01002945 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002946 return;
2947
2948 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
2949 hrtick_start_fair(rq, curr);
2950}
Dhaval Giani55e12e52008-06-24 23:39:43 +05302951#else /* !CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002952static inline void
2953hrtick_start_fair(struct rq *rq, struct task_struct *p)
2954{
2955}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002956
2957static inline void hrtick_update(struct rq *rq)
2958{
2959}
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002960#endif
2961
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002962/*
2963 * The enqueue_task method is called before nr_running is
2964 * increased. Here we update the fair scheduling stats and
2965 * then put the task into the rbtree:
2966 */
Thomas Gleixnerea87bb72010-01-20 20:58:57 +00002967static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002968enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002969{
2970 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01002971 struct sched_entity *se = &p->se;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002972
2973 for_each_sched_entity(se) {
Peter Zijlstra62fb1852008-02-25 17:34:02 +01002974 if (se->on_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002975 break;
2976 cfs_rq = cfs_rq_of(se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002977 enqueue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07002978
2979 /*
2980 * end evaluation on encountering a throttled cfs_rq
2981 *
2982 * note: in the case of encountering a throttled cfs_rq we will
2983 * post the final h_nr_running increment below.
2984 */
2985 if (cfs_rq_throttled(cfs_rq))
2986 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07002987 cfs_rq->h_nr_running++;
Paul Turner85dac902011-07-21 09:43:33 -07002988
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002989 flags = ENQUEUE_WAKEUP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002990 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002991
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002992 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08002993 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07002994 cfs_rq->h_nr_running++;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002995
Paul Turner85dac902011-07-21 09:43:33 -07002996 if (cfs_rq_throttled(cfs_rq))
2997 break;
2998
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002999 update_cfs_shares(cfs_rq);
Paul Turner9ee474f2012-10-04 13:18:30 +02003000 update_entity_load_avg(se, 1);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003001 }
3002
Ben Segall18bf2802012-10-04 12:51:20 +02003003 if (!se) {
3004 update_rq_runnable_avg(rq, rq->nr_running);
Paul Turner85dac902011-07-21 09:43:33 -07003005 inc_nr_running(rq);
Ben Segall18bf2802012-10-04 12:51:20 +02003006 }
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003007 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003008}
3009
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003010static void set_next_buddy(struct sched_entity *se);
3011
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003012/*
3013 * The dequeue_task method is called before nr_running is
3014 * decreased. We remove the task from the rbtree and
3015 * update the fair scheduling stats:
3016 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003017static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003018{
3019 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01003020 struct sched_entity *se = &p->se;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003021 int task_sleep = flags & DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003022
3023 for_each_sched_entity(se) {
3024 cfs_rq = cfs_rq_of(se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003025 dequeue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07003026
3027 /*
3028 * end evaluation on encountering a throttled cfs_rq
3029 *
3030 * note: in the case of encountering a throttled cfs_rq we will
3031 * post the final h_nr_running decrement below.
3032 */
3033 if (cfs_rq_throttled(cfs_rq))
3034 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07003035 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003036
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003037 /* Don't dequeue parent if it has other entities besides us */
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003038 if (cfs_rq->load.weight) {
3039 /*
3040 * Bias pick_next to pick a task from this cfs_rq, as
3041 * p is sleeping when it is within its sched_slice.
3042 */
3043 if (task_sleep && parent_entity(se))
3044 set_next_buddy(parent_entity(se));
Paul Turner9598c822011-07-06 22:30:37 -07003045
3046 /* avoid re-evaluating load for this entity */
3047 se = parent_entity(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003048 break;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003049 }
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003050 flags |= DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003051 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003052
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003053 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08003054 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07003055 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003056
Paul Turner85dac902011-07-21 09:43:33 -07003057 if (cfs_rq_throttled(cfs_rq))
3058 break;
3059
Linus Torvalds17bc14b2012-12-14 07:20:43 -08003060 update_cfs_shares(cfs_rq);
Paul Turner9ee474f2012-10-04 13:18:30 +02003061 update_entity_load_avg(se, 1);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003062 }
3063
Ben Segall18bf2802012-10-04 12:51:20 +02003064 if (!se) {
Paul Turner85dac902011-07-21 09:43:33 -07003065 dec_nr_running(rq);
Ben Segall18bf2802012-10-04 12:51:20 +02003066 update_rq_runnable_avg(rq, 1);
3067 }
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003068 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003069}
3070
Gregory Haskinse7693a32008-01-25 21:08:09 +01003071#ifdef CONFIG_SMP
Peter Zijlstra029632f2011-10-25 10:00:11 +02003072/* Used instead of source_load when we know the type == 0 */
3073static unsigned long weighted_cpuload(const int cpu)
3074{
Alex Shib92486c2013-06-20 10:18:50 +08003075 return cpu_rq(cpu)->cfs.runnable_load_avg;
Peter Zijlstra029632f2011-10-25 10:00:11 +02003076}
3077
3078/*
3079 * Return a low guess at the load of a migration-source cpu weighted
3080 * according to the scheduling class and "nice" value.
3081 *
3082 * We want to under-estimate the load of migration sources, to
3083 * balance conservatively.
3084 */
3085static unsigned long source_load(int cpu, int type)
3086{
3087 struct rq *rq = cpu_rq(cpu);
3088 unsigned long total = weighted_cpuload(cpu);
3089
3090 if (type == 0 || !sched_feat(LB_BIAS))
3091 return total;
3092
3093 return min(rq->cpu_load[type-1], total);
3094}
3095
3096/*
3097 * Return a high guess at the load of a migration-target cpu weighted
3098 * according to the scheduling class and "nice" value.
3099 */
3100static unsigned long target_load(int cpu, int type)
3101{
3102 struct rq *rq = cpu_rq(cpu);
3103 unsigned long total = weighted_cpuload(cpu);
3104
3105 if (type == 0 || !sched_feat(LB_BIAS))
3106 return total;
3107
3108 return max(rq->cpu_load[type-1], total);
3109}
3110
3111static unsigned long power_of(int cpu)
3112{
3113 return cpu_rq(cpu)->cpu_power;
3114}
3115
3116static unsigned long cpu_avg_load_per_task(int cpu)
3117{
3118 struct rq *rq = cpu_rq(cpu);
3119 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
Alex Shib92486c2013-06-20 10:18:50 +08003120 unsigned long load_avg = rq->cfs.runnable_load_avg;
Peter Zijlstra029632f2011-10-25 10:00:11 +02003121
3122 if (nr_running)
Alex Shib92486c2013-06-20 10:18:50 +08003123 return load_avg / nr_running;
Peter Zijlstra029632f2011-10-25 10:00:11 +02003124
3125 return 0;
3126}
3127
Michael Wang62470412013-07-04 12:55:51 +08003128static void record_wakee(struct task_struct *p)
3129{
3130 /*
3131 * Rough decay (wiping) for cost saving, don't worry
3132 * about the boundary, really active task won't care
3133 * about the loss.
3134 */
3135 if (jiffies > current->wakee_flip_decay_ts + HZ) {
3136 current->wakee_flips = 0;
3137 current->wakee_flip_decay_ts = jiffies;
3138 }
3139
3140 if (current->last_wakee != p) {
3141 current->last_wakee = p;
3142 current->wakee_flips++;
3143 }
3144}
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003145
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02003146static void task_waking_fair(struct task_struct *p)
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003147{
3148 struct sched_entity *se = &p->se;
3149 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003150 u64 min_vruntime;
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003151
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003152#ifndef CONFIG_64BIT
3153 u64 min_vruntime_copy;
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02003154
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003155 do {
3156 min_vruntime_copy = cfs_rq->min_vruntime_copy;
3157 smp_rmb();
3158 min_vruntime = cfs_rq->min_vruntime;
3159 } while (min_vruntime != min_vruntime_copy);
3160#else
3161 min_vruntime = cfs_rq->min_vruntime;
3162#endif
3163
3164 se->vruntime -= min_vruntime;
Michael Wang62470412013-07-04 12:55:51 +08003165 record_wakee(p);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003166}
3167
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003168#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02003169/*
3170 * effective_load() calculates the load change as seen from the root_task_group
3171 *
3172 * Adding load to a group doesn't make a group heavier, but can cause movement
3173 * of group shares between cpus. Assuming the shares were perfectly aligned one
3174 * can calculate the shift in shares.
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003175 *
3176 * Calculate the effective load difference if @wl is added (subtracted) to @tg
3177 * on this @cpu and results in a total addition (subtraction) of @wg to the
3178 * total group weight.
3179 *
3180 * Given a runqueue weight distribution (rw_i) we can compute a shares
3181 * distribution (s_i) using:
3182 *
3183 * s_i = rw_i / \Sum rw_j (1)
3184 *
3185 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
3186 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
3187 * shares distribution (s_i):
3188 *
3189 * rw_i = { 2, 4, 1, 0 }
3190 * s_i = { 2/7, 4/7, 1/7, 0 }
3191 *
3192 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
3193 * task used to run on and the CPU the waker is running on), we need to
3194 * compute the effect of waking a task on either CPU and, in case of a sync
3195 * wakeup, compute the effect of the current task going to sleep.
3196 *
3197 * So for a change of @wl to the local @cpu with an overall group weight change
3198 * of @wl we can compute the new shares distribution (s'_i) using:
3199 *
3200 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
3201 *
3202 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
3203 * differences in waking a task to CPU 0. The additional task changes the
3204 * weight and shares distributions like:
3205 *
3206 * rw'_i = { 3, 4, 1, 0 }
3207 * s'_i = { 3/8, 4/8, 1/8, 0 }
3208 *
3209 * We can then compute the difference in effective weight by using:
3210 *
3211 * dw_i = S * (s'_i - s_i) (3)
3212 *
3213 * Where 'S' is the group weight as seen by its parent.
3214 *
3215 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
3216 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
3217 * 4/7) times the weight of the group.
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02003218 */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003219static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003220{
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003221 struct sched_entity *se = tg->se[cpu];
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02003222
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003223 if (!tg->parent) /* the trivial, non-cgroup case */
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02003224 return wl;
3225
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003226 for_each_sched_entity(se) {
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003227 long w, W;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003228
Paul Turner977dda72011-01-14 17:57:50 -08003229 tg = se->my_q->tg;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003230
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003231 /*
3232 * W = @wg + \Sum rw_j
3233 */
3234 W = wg + calc_tg_weight(tg, se->my_q);
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003235
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003236 /*
3237 * w = rw_i + @wl
3238 */
3239 w = se->my_q->load.weight + wl;
Peter Zijlstra940959e2008-09-23 15:33:42 +02003240
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003241 /*
3242 * wl = S * s'_i; see (2)
3243 */
3244 if (W > 0 && w < W)
3245 wl = (w * tg->shares) / W;
Paul Turner977dda72011-01-14 17:57:50 -08003246 else
3247 wl = tg->shares;
Peter Zijlstra940959e2008-09-23 15:33:42 +02003248
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003249 /*
3250 * Per the above, wl is the new se->load.weight value; since
3251 * those are clipped to [MIN_SHARES, ...) do so now. See
3252 * calc_cfs_shares().
3253 */
Paul Turner977dda72011-01-14 17:57:50 -08003254 if (wl < MIN_SHARES)
3255 wl = MIN_SHARES;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003256
3257 /*
3258 * wl = dw_i = S * (s'_i - s_i); see (3)
3259 */
Paul Turner977dda72011-01-14 17:57:50 -08003260 wl -= se->load.weight;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003261
3262 /*
3263 * Recursively apply this logic to all parent groups to compute
3264 * the final effective load change on the root group. Since
3265 * only the @tg group gets extra weight, all parent groups can
3266 * only redistribute existing shares. @wl is the shift in shares
3267 * resulting from this level per the above.
3268 */
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003269 wg = 0;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003270 }
3271
3272 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003273}
3274#else
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003275
Peter Zijlstra83378262008-06-27 13:41:37 +02003276static inline unsigned long effective_load(struct task_group *tg, int cpu,
3277 unsigned long wl, unsigned long wg)
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003278{
Peter Zijlstra83378262008-06-27 13:41:37 +02003279 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003280}
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003281
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003282#endif
3283
Michael Wang62470412013-07-04 12:55:51 +08003284static int wake_wide(struct task_struct *p)
3285{
Peter Zijlstra7d9ffa82013-07-04 12:56:46 +08003286 int factor = this_cpu_read(sd_llc_size);
Michael Wang62470412013-07-04 12:55:51 +08003287
3288 /*
3289 * Yeah, it's the switching-frequency, could means many wakee or
3290 * rapidly switch, use factor here will just help to automatically
3291 * adjust the loose-degree, so bigger node will lead to more pull.
3292 */
3293 if (p->wakee_flips > factor) {
3294 /*
3295 * wakee is somewhat hot, it needs certain amount of cpu
3296 * resource, so if waker is far more hot, prefer to leave
3297 * it alone.
3298 */
3299 if (current->wakee_flips > (factor * p->wakee_flips))
3300 return 1;
3301 }
3302
3303 return 0;
3304}
3305
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003306static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003307{
Paul Turnere37b6a72011-01-21 20:44:59 -08003308 s64 this_load, load;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003309 int idx, this_cpu, prev_cpu;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003310 unsigned long tl_per_task;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003311 struct task_group *tg;
Peter Zijlstra83378262008-06-27 13:41:37 +02003312 unsigned long weight;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003313 int balanced;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003314
Michael Wang62470412013-07-04 12:55:51 +08003315 /*
3316 * If we wake multiple tasks be careful to not bounce
3317 * ourselves around too much.
3318 */
3319 if (wake_wide(p))
3320 return 0;
3321
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003322 idx = sd->wake_idx;
3323 this_cpu = smp_processor_id();
3324 prev_cpu = task_cpu(p);
3325 load = source_load(prev_cpu, idx);
3326 this_load = target_load(this_cpu, idx);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003327
3328 /*
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003329 * If sync wakeup then subtract the (maximum possible)
3330 * effect of the currently running task from the load
3331 * of the current CPU:
3332 */
Peter Zijlstra83378262008-06-27 13:41:37 +02003333 if (sync) {
3334 tg = task_group(current);
3335 weight = current->se.load.weight;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003336
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003337 this_load += effective_load(tg, this_cpu, -weight, -weight);
Peter Zijlstra83378262008-06-27 13:41:37 +02003338 load += effective_load(tg, prev_cpu, 0, -weight);
3339 }
3340
3341 tg = task_group(p);
3342 weight = p->se.load.weight;
3343
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02003344 /*
3345 * In low-load situations, where prev_cpu is idle and this_cpu is idle
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003346 * due to the sync cause above having dropped this_load to 0, we'll
3347 * always have an imbalance, but there's really nothing you can do
3348 * about that, so that's good too.
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02003349 *
3350 * Otherwise check if either cpus are near enough in load to allow this
3351 * task to be woken on this_cpu.
3352 */
Paul Turnere37b6a72011-01-21 20:44:59 -08003353 if (this_load > 0) {
3354 s64 this_eff_load, prev_eff_load;
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02003355
3356 this_eff_load = 100;
3357 this_eff_load *= power_of(prev_cpu);
3358 this_eff_load *= this_load +
3359 effective_load(tg, this_cpu, weight, weight);
3360
3361 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
3362 prev_eff_load *= power_of(this_cpu);
3363 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
3364
3365 balanced = this_eff_load <= prev_eff_load;
3366 } else
3367 balanced = true;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003368
3369 /*
3370 * If the currently running task will sleep within
3371 * a reasonable amount of time then attract this newly
3372 * woken task:
3373 */
Peter Zijlstra2fb76352008-10-08 09:16:04 +02003374 if (sync && balanced)
3375 return 1;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003376
Lucas De Marchi41acab82010-03-10 23:37:45 -03003377 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003378 tl_per_task = cpu_avg_load_per_task(this_cpu);
3379
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003380 if (balanced ||
3381 (this_load <= load &&
3382 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003383 /*
3384 * This domain has SD_WAKE_AFFINE and
3385 * p is cache cold in this domain, and
3386 * there is no bad imbalance.
3387 */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003388 schedstat_inc(sd, ttwu_move_affine);
Lucas De Marchi41acab82010-03-10 23:37:45 -03003389 schedstat_inc(p, se.statistics.nr_wakeups_affine);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003390
3391 return 1;
3392 }
3393 return 0;
3394}
3395
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003396/*
3397 * find_idlest_group finds and returns the least busy CPU group within the
3398 * domain.
3399 */
3400static struct sched_group *
Peter Zijlstra78e7ed52009-09-03 13:16:51 +02003401find_idlest_group(struct sched_domain *sd, struct task_struct *p,
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003402 int this_cpu, int load_idx)
Gregory Haskinse7693a32008-01-25 21:08:09 +01003403{
Andi Kleenb3bd3de2010-08-10 14:17:51 -07003404 struct sched_group *idlest = NULL, *group = sd->groups;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003405 unsigned long min_load = ULONG_MAX, this_load = 0;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003406 int imbalance = 100 + (sd->imbalance_pct-100)/2;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003407
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003408 do {
3409 unsigned long load, avg_load;
3410 int local_group;
3411 int i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003412
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003413 /* Skip over this group if it has no CPUs allowed */
3414 if (!cpumask_intersects(sched_group_cpus(group),
Peter Zijlstrafa17b502011-06-16 12:23:22 +02003415 tsk_cpus_allowed(p)))
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003416 continue;
3417
3418 local_group = cpumask_test_cpu(this_cpu,
3419 sched_group_cpus(group));
3420
3421 /* Tally up the load of all CPUs in the group */
3422 avg_load = 0;
3423
3424 for_each_cpu(i, sched_group_cpus(group)) {
3425 /* Bias balancing toward cpus of our domain */
3426 if (local_group)
3427 load = source_load(i, load_idx);
3428 else
3429 load = target_load(i, load_idx);
3430
3431 avg_load += load;
3432 }
3433
3434 /* Adjust by relative CPU power of the group */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02003435 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003436
3437 if (local_group) {
3438 this_load = avg_load;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003439 } else if (avg_load < min_load) {
3440 min_load = avg_load;
3441 idlest = group;
3442 }
3443 } while (group = group->next, group != sd->groups);
3444
3445 if (!idlest || 100*this_load < imbalance*min_load)
3446 return NULL;
3447 return idlest;
3448}
3449
3450/*
3451 * find_idlest_cpu - find the idlest cpu among the cpus in group.
3452 */
3453static int
3454find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
3455{
3456 unsigned long load, min_load = ULONG_MAX;
3457 int idlest = -1;
3458 int i;
3459
3460 /* Traverse only the allowed CPUs */
Peter Zijlstrafa17b502011-06-16 12:23:22 +02003461 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003462 load = weighted_cpuload(i);
3463
3464 if (load < min_load || (load == min_load && i == this_cpu)) {
3465 min_load = load;
3466 idlest = i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003467 }
3468 }
3469
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003470 return idlest;
3471}
Gregory Haskinse7693a32008-01-25 21:08:09 +01003472
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003473/*
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003474 * Try and locate an idle CPU in the sched_domain.
3475 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003476static int select_idle_sibling(struct task_struct *p, int target)
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003477{
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003478 struct sched_domain *sd;
Linus Torvalds37407ea2012-09-16 12:29:43 -07003479 struct sched_group *sg;
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003480 int i = task_cpu(p);
3481
3482 if (idle_cpu(target))
3483 return target;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003484
3485 /*
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003486 * If the prevous cpu is cache affine and idle, don't be stupid.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003487 */
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003488 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
3489 return i;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003490
3491 /*
Linus Torvalds37407ea2012-09-16 12:29:43 -07003492 * Otherwise, iterate the domains and find an elegible idle cpu.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003493 */
Peter Zijlstra518cd622011-12-07 15:07:31 +01003494 sd = rcu_dereference(per_cpu(sd_llc, target));
Suresh Siddha77e81362011-11-17 11:08:23 -08003495 for_each_lower_domain(sd) {
Linus Torvalds37407ea2012-09-16 12:29:43 -07003496 sg = sd->groups;
3497 do {
3498 if (!cpumask_intersects(sched_group_cpus(sg),
3499 tsk_cpus_allowed(p)))
3500 goto next;
Mike Galbraith970e1782012-06-12 05:18:32 +02003501
Linus Torvalds37407ea2012-09-16 12:29:43 -07003502 for_each_cpu(i, sched_group_cpus(sg)) {
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003503 if (i == target || !idle_cpu(i))
Linus Torvalds37407ea2012-09-16 12:29:43 -07003504 goto next;
3505 }
3506
3507 target = cpumask_first_and(sched_group_cpus(sg),
3508 tsk_cpus_allowed(p));
3509 goto done;
3510next:
3511 sg = sg->next;
3512 } while (sg != sd->groups);
3513 }
3514done:
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003515 return target;
3516}
3517
3518/*
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003519 * sched_balance_self: balance the current task (running on cpu) in domains
3520 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
3521 * SD_BALANCE_EXEC.
3522 *
3523 * Balance, ie. select the least loaded group.
3524 *
3525 * Returns the target CPU number, or the same CPU if no balancing is needed.
3526 *
3527 * preempt must be disabled.
3528 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01003529static int
Peter Zijlstra7608dec2011-04-05 17:23:46 +02003530select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003531{
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02003532 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003533 int cpu = smp_processor_id();
3534 int prev_cpu = task_cpu(p);
3535 int new_cpu = cpu;
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003536 int want_affine = 0;
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003537 int sync = wake_flags & WF_SYNC;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003538
Peter Zijlstra29baa742012-04-23 12:11:21 +02003539 if (p->nr_cpus_allowed == 1)
Mike Galbraith76854c72011-11-22 15:18:24 +01003540 return prev_cpu;
3541
Peter Zijlstra0763a662009-09-14 19:37:39 +02003542 if (sd_flag & SD_BALANCE_WAKE) {
Peter Zijlstrafa17b502011-06-16 12:23:22 +02003543 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003544 want_affine = 1;
3545 new_cpu = prev_cpu;
3546 }
Gregory Haskinse7693a32008-01-25 21:08:09 +01003547
Peter Zijlstradce840a2011-04-07 14:09:50 +02003548 rcu_read_lock();
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003549 for_each_domain(cpu, tmp) {
Peter Zijlstrae4f42882009-12-16 18:04:34 +01003550 if (!(tmp->flags & SD_LOAD_BALANCE))
3551 continue;
3552
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003553 /*
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003554 * If both cpu and prev_cpu are part of this domain,
3555 * cpu is a valid SD_WAKE_AFFINE target.
Peter Zijlstrafe3bcfe2009-11-12 15:55:29 +01003556 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003557 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
3558 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
3559 affine_sd = tmp;
Alex Shif03542a2012-07-26 08:55:34 +08003560 break;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003561 }
3562
Alex Shif03542a2012-07-26 08:55:34 +08003563 if (tmp->flags & sd_flag)
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02003564 sd = tmp;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003565 }
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003566
Mike Galbraith8b911ac2010-03-11 17:17:16 +01003567 if (affine_sd) {
Alex Shif03542a2012-07-26 08:55:34 +08003568 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
Peter Zijlstradce840a2011-04-07 14:09:50 +02003569 prev_cpu = cpu;
3570
3571 new_cpu = select_idle_sibling(p, prev_cpu);
3572 goto unlock;
Mike Galbraith8b911ac2010-03-11 17:17:16 +01003573 }
Peter Zijlstra3b640892009-09-16 13:44:33 +02003574
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003575 while (sd) {
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003576 int load_idx = sd->forkexec_idx;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003577 struct sched_group *group;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003578 int weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003579
Peter Zijlstra0763a662009-09-14 19:37:39 +02003580 if (!(sd->flags & sd_flag)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003581 sd = sd->child;
3582 continue;
3583 }
3584
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003585 if (sd_flag & SD_BALANCE_WAKE)
3586 load_idx = sd->wake_idx;
3587
3588 group = find_idlest_group(sd, p, cpu, load_idx);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003589 if (!group) {
3590 sd = sd->child;
3591 continue;
3592 }
3593
Peter Zijlstrad7c33c42009-09-11 12:45:38 +02003594 new_cpu = find_idlest_cpu(group, p, cpu);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003595 if (new_cpu == -1 || new_cpu == cpu) {
3596 /* Now try balancing at a lower domain level of cpu */
3597 sd = sd->child;
3598 continue;
3599 }
3600
3601 /* Now try balancing at a lower domain level of new_cpu */
3602 cpu = new_cpu;
Peter Zijlstra669c55e2010-04-16 14:59:29 +02003603 weight = sd->span_weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003604 sd = NULL;
3605 for_each_domain(cpu, tmp) {
Peter Zijlstra669c55e2010-04-16 14:59:29 +02003606 if (weight <= tmp->span_weight)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003607 break;
Peter Zijlstra0763a662009-09-14 19:37:39 +02003608 if (tmp->flags & sd_flag)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003609 sd = tmp;
3610 }
3611 /* while loop will break here if sd == NULL */
Gregory Haskinse7693a32008-01-25 21:08:09 +01003612 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02003613unlock:
3614 rcu_read_unlock();
Gregory Haskinse7693a32008-01-25 21:08:09 +01003615
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003616 return new_cpu;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003617}
Paul Turner0a74bef2012-10-04 13:18:30 +02003618
3619/*
3620 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
3621 * cfs_rq_of(p) references at time of call are still valid and identify the
3622 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
3623 * other assumptions, including the state of rq->lock, should be made.
3624 */
3625static void
3626migrate_task_rq_fair(struct task_struct *p, int next_cpu)
3627{
Paul Turneraff3e492012-10-04 13:18:30 +02003628 struct sched_entity *se = &p->se;
3629 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3630
3631 /*
3632 * Load tracking: accumulate removed load so that it can be processed
3633 * when we next update owning cfs_rq under rq->lock. Tasks contribute
3634 * to blocked load iff they have a positive decay-count. It can never
3635 * be negative here since on-rq tasks have decay-count == 0.
3636 */
3637 if (se->avg.decay_count) {
3638 se->avg.decay_count = -__synchronize_entity_decay(se);
Alex Shi25099402013-06-20 10:18:55 +08003639 atomic_long_add(se->avg.load_avg_contrib,
3640 &cfs_rq->removed_load);
Paul Turneraff3e492012-10-04 13:18:30 +02003641 }
Paul Turner0a74bef2012-10-04 13:18:30 +02003642}
Gregory Haskinse7693a32008-01-25 21:08:09 +01003643#endif /* CONFIG_SMP */
3644
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01003645static unsigned long
3646wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02003647{
3648 unsigned long gran = sysctl_sched_wakeup_granularity;
3649
3650 /*
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01003651 * Since its curr running now, convert the gran from real-time
3652 * to virtual-time in his units.
Mike Galbraith13814d42010-03-11 17:17:04 +01003653 *
3654 * By using 'se' instead of 'curr' we penalize light tasks, so
3655 * they get preempted easier. That is, if 'se' < 'curr' then
3656 * the resulting gran will be larger, therefore penalizing the
3657 * lighter, if otoh 'se' > 'curr' then the resulting gran will
3658 * be smaller, again penalizing the lighter task.
3659 *
3660 * This is especially important for buddies when the leftmost
3661 * task is higher priority than the buddy.
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02003662 */
Shaohua Lif4ad9bd2011-04-08 12:53:09 +08003663 return calc_delta_fair(gran, se);
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02003664}
3665
3666/*
Peter Zijlstra464b7522008-10-24 11:06:15 +02003667 * Should 'se' preempt 'curr'.
3668 *
3669 * |s1
3670 * |s2
3671 * |s3
3672 * g
3673 * |<--->|c
3674 *
3675 * w(c, s1) = -1
3676 * w(c, s2) = 0
3677 * w(c, s3) = 1
3678 *
3679 */
3680static int
3681wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
3682{
3683 s64 gran, vdiff = curr->vruntime - se->vruntime;
3684
3685 if (vdiff <= 0)
3686 return -1;
3687
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01003688 gran = wakeup_gran(curr, se);
Peter Zijlstra464b7522008-10-24 11:06:15 +02003689 if (vdiff > gran)
3690 return 1;
3691
3692 return 0;
3693}
3694
Peter Zijlstra02479092008-11-04 21:25:10 +01003695static void set_last_buddy(struct sched_entity *se)
3696{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07003697 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3698 return;
3699
3700 for_each_sched_entity(se)
3701 cfs_rq_of(se)->last = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01003702}
3703
3704static void set_next_buddy(struct sched_entity *se)
3705{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07003706 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3707 return;
3708
3709 for_each_sched_entity(se)
3710 cfs_rq_of(se)->next = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01003711}
3712
Rik van Rielac53db52011-02-01 09:51:03 -05003713static void set_skip_buddy(struct sched_entity *se)
3714{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07003715 for_each_sched_entity(se)
3716 cfs_rq_of(se)->skip = se;
Rik van Rielac53db52011-02-01 09:51:03 -05003717}
3718
Peter Zijlstra464b7522008-10-24 11:06:15 +02003719/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003720 * Preempt the current task with a newly woken task if needed:
3721 */
Peter Zijlstra5a9b86f2009-09-16 13:47:58 +02003722static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003723{
3724 struct task_struct *curr = rq->curr;
Srivatsa Vaddagiri8651a862007-10-15 17:00:12 +02003725 struct sched_entity *se = &curr->se, *pse = &p->se;
Mike Galbraith03e89e42008-12-16 08:45:30 +01003726 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02003727 int scale = cfs_rq->nr_running >= sched_nr_latency;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003728 int next_buddy_marked = 0;
Mike Galbraith03e89e42008-12-16 08:45:30 +01003729
Ingo Molnar4ae7d5c2008-03-19 01:42:00 +01003730 if (unlikely(se == pse))
3731 return;
3732
Paul Turner5238cdd2011-07-21 09:43:37 -07003733 /*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003734 * This is possible from callers such as move_task(), in which we
Paul Turner5238cdd2011-07-21 09:43:37 -07003735 * unconditionally check_prempt_curr() after an enqueue (which may have
3736 * lead to a throttle). This both saves work and prevents false
3737 * next-buddy nomination below.
3738 */
3739 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
3740 return;
3741
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003742 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
Mike Galbraith3cb63d52009-09-11 12:01:17 +02003743 set_next_buddy(pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003744 next_buddy_marked = 1;
3745 }
Peter Zijlstra57fdc262008-09-23 15:33:45 +02003746
Bharata B Raoaec0a512008-08-28 14:42:49 +05303747 /*
3748 * We can come here with TIF_NEED_RESCHED already set from new task
3749 * wake up path.
Paul Turner5238cdd2011-07-21 09:43:37 -07003750 *
3751 * Note: this also catches the edge-case of curr being in a throttled
3752 * group (e.g. via set_curr_task), since update_curr() (in the
3753 * enqueue of curr) will have resulted in resched being set. This
3754 * prevents us from potentially nominating it as a false LAST_BUDDY
3755 * below.
Bharata B Raoaec0a512008-08-28 14:42:49 +05303756 */
3757 if (test_tsk_need_resched(curr))
3758 return;
3759
Darren Harta2f5c9a2011-02-22 13:04:33 -08003760 /* Idle tasks are by definition preempted by non-idle tasks. */
3761 if (unlikely(curr->policy == SCHED_IDLE) &&
3762 likely(p->policy != SCHED_IDLE))
3763 goto preempt;
3764
Ingo Molnar91c234b2007-10-15 17:00:18 +02003765 /*
Darren Harta2f5c9a2011-02-22 13:04:33 -08003766 * Batch and idle tasks do not preempt non-idle tasks (their preemption
3767 * is driven by the tick):
Ingo Molnar91c234b2007-10-15 17:00:18 +02003768 */
Ingo Molnar8ed92e52012-10-14 14:28:50 +02003769 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
Ingo Molnar91c234b2007-10-15 17:00:18 +02003770 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003771
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003772 find_matching_se(&se, &pse);
Paul Turner9bbd7372011-07-05 19:07:21 -07003773 update_curr(cfs_rq_of(se));
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003774 BUG_ON(!pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003775 if (wakeup_preempt_entity(se, pse) == 1) {
3776 /*
3777 * Bias pick_next to pick the sched entity that is
3778 * triggering this preemption.
3779 */
3780 if (!next_buddy_marked)
3781 set_next_buddy(pse);
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003782 goto preempt;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003783 }
Jupyung Leea65ac742009-11-17 18:51:40 +09003784
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003785 return;
3786
3787preempt:
3788 resched_task(curr);
3789 /*
3790 * Only set the backward buddy when the current task is still
3791 * on the rq. This can happen when a wakeup gets interleaved
3792 * with schedule on the ->pre_schedule() or idle_balance()
3793 * point, either of which can * drop the rq lock.
3794 *
3795 * Also, during early boot the idle thread is in the fair class,
3796 * for obvious reasons its a bad idea to schedule back to it.
3797 */
3798 if (unlikely(!se->on_rq || curr == rq->idle))
3799 return;
3800
3801 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
3802 set_last_buddy(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003803}
3804
Ingo Molnarfb8d4722007-08-09 11:16:48 +02003805static struct task_struct *pick_next_task_fair(struct rq *rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003806{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003807 struct task_struct *p;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003808 struct cfs_rq *cfs_rq = &rq->cfs;
3809 struct sched_entity *se;
3810
Tim Blechmann36ace272009-11-24 11:55:45 +01003811 if (!cfs_rq->nr_running)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003812 return NULL;
3813
3814 do {
Ingo Molnar9948f4b2007-08-09 11:16:48 +02003815 se = pick_next_entity(cfs_rq);
Peter Zijlstraf4b67552008-11-04 21:25:07 +01003816 set_next_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003817 cfs_rq = group_cfs_rq(se);
3818 } while (cfs_rq);
3819
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003820 p = task_of(se);
Mike Galbraithb39e66e2011-11-22 15:20:07 +01003821 if (hrtick_enabled(rq))
3822 hrtick_start_fair(rq, p);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003823
3824 return p;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003825}
3826
3827/*
3828 * Account for a descheduled task:
3829 */
Ingo Molnar31ee5292007-08-09 11:16:49 +02003830static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003831{
3832 struct sched_entity *se = &prev->se;
3833 struct cfs_rq *cfs_rq;
3834
3835 for_each_sched_entity(se) {
3836 cfs_rq = cfs_rq_of(se);
Ingo Molnarab6cde22007-08-09 11:16:48 +02003837 put_prev_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003838 }
3839}
3840
Rik van Rielac53db52011-02-01 09:51:03 -05003841/*
3842 * sched_yield() is very simple
3843 *
3844 * The magic of dealing with the ->skip buddy is in pick_next_entity.
3845 */
3846static void yield_task_fair(struct rq *rq)
3847{
3848 struct task_struct *curr = rq->curr;
3849 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3850 struct sched_entity *se = &curr->se;
3851
3852 /*
3853 * Are we the only task in the tree?
3854 */
3855 if (unlikely(rq->nr_running == 1))
3856 return;
3857
3858 clear_buddies(cfs_rq, se);
3859
3860 if (curr->policy != SCHED_BATCH) {
3861 update_rq_clock(rq);
3862 /*
3863 * Update run-time statistics of the 'current'.
3864 */
3865 update_curr(cfs_rq);
Mike Galbraith916671c2011-11-22 15:21:26 +01003866 /*
3867 * Tell update_rq_clock() that we've just updated,
3868 * so we don't do microscopic update in schedule()
3869 * and double the fastpath cost.
3870 */
3871 rq->skip_clock_update = 1;
Rik van Rielac53db52011-02-01 09:51:03 -05003872 }
3873
3874 set_skip_buddy(se);
3875}
3876
Mike Galbraithd95f4122011-02-01 09:50:51 -05003877static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
3878{
3879 struct sched_entity *se = &p->se;
3880
Paul Turner5238cdd2011-07-21 09:43:37 -07003881 /* throttled hierarchies are not runnable */
3882 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
Mike Galbraithd95f4122011-02-01 09:50:51 -05003883 return false;
3884
3885 /* Tell the scheduler that we'd really like pse to run next. */
3886 set_next_buddy(se);
3887
Mike Galbraithd95f4122011-02-01 09:50:51 -05003888 yield_task_fair(rq);
3889
3890 return true;
3891}
3892
Peter Williams681f3e62007-10-24 18:23:51 +02003893#ifdef CONFIG_SMP
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003894/**************************************************
Peter Zijlstrae9c84cb2012-07-03 13:53:26 +02003895 * Fair scheduling class load-balancing methods.
3896 *
3897 * BASICS
3898 *
3899 * The purpose of load-balancing is to achieve the same basic fairness the
3900 * per-cpu scheduler provides, namely provide a proportional amount of compute
3901 * time to each task. This is expressed in the following equation:
3902 *
3903 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
3904 *
3905 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
3906 * W_i,0 is defined as:
3907 *
3908 * W_i,0 = \Sum_j w_i,j (2)
3909 *
3910 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
3911 * is derived from the nice value as per prio_to_weight[].
3912 *
3913 * The weight average is an exponential decay average of the instantaneous
3914 * weight:
3915 *
3916 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
3917 *
3918 * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
3919 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
3920 * can also include other factors [XXX].
3921 *
3922 * To achieve this balance we define a measure of imbalance which follows
3923 * directly from (1):
3924 *
3925 * imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j } (4)
3926 *
3927 * We them move tasks around to minimize the imbalance. In the continuous
3928 * function space it is obvious this converges, in the discrete case we get
3929 * a few fun cases generally called infeasible weight scenarios.
3930 *
3931 * [XXX expand on:
3932 * - infeasible weights;
3933 * - local vs global optima in the discrete case. ]
3934 *
3935 *
3936 * SCHED DOMAINS
3937 *
3938 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
3939 * for all i,j solution, we create a tree of cpus that follows the hardware
3940 * topology where each level pairs two lower groups (or better). This results
3941 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
3942 * tree to only the first of the previous level and we decrease the frequency
3943 * of load-balance at each level inv. proportional to the number of cpus in
3944 * the groups.
3945 *
3946 * This yields:
3947 *
3948 * log_2 n 1 n
3949 * \Sum { --- * --- * 2^i } = O(n) (5)
3950 * i = 0 2^i 2^i
3951 * `- size of each group
3952 * | | `- number of cpus doing load-balance
3953 * | `- freq
3954 * `- sum over all levels
3955 *
3956 * Coupled with a limit on how many tasks we can migrate every balance pass,
3957 * this makes (5) the runtime complexity of the balancer.
3958 *
3959 * An important property here is that each CPU is still (indirectly) connected
3960 * to every other cpu in at most O(log n) steps:
3961 *
3962 * The adjacency matrix of the resulting graph is given by:
3963 *
3964 * log_2 n
3965 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
3966 * k = 0
3967 *
3968 * And you'll find that:
3969 *
3970 * A^(log_2 n)_i,j != 0 for all i,j (7)
3971 *
3972 * Showing there's indeed a path between every cpu in at most O(log n) steps.
3973 * The task movement gives a factor of O(m), giving a convergence complexity
3974 * of:
3975 *
3976 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
3977 *
3978 *
3979 * WORK CONSERVING
3980 *
3981 * In order to avoid CPUs going idle while there's still work to do, new idle
3982 * balancing is more aggressive and has the newly idle cpu iterate up the domain
3983 * tree itself instead of relying on other CPUs to bring it work.
3984 *
3985 * This adds some complexity to both (5) and (8) but it reduces the total idle
3986 * time.
3987 *
3988 * [XXX more?]
3989 *
3990 *
3991 * CGROUPS
3992 *
3993 * Cgroups make a horror show out of (2), instead of a simple sum we get:
3994 *
3995 * s_k,i
3996 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
3997 * S_k
3998 *
3999 * Where
4000 *
4001 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
4002 *
4003 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
4004 *
4005 * The big problem is S_k, its a global sum needed to compute a local (W_i)
4006 * property.
4007 *
4008 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
4009 * rewrite all of this once again.]
4010 */
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004011
Hiroshi Shimamotoed387b72012-01-31 11:40:32 +09004012static unsigned long __read_mostly max_load_balance_interval = HZ/10;
4013
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004014#define LBF_ALL_PINNED 0x01
Peter Zijlstra367456c2012-02-20 21:49:09 +01004015#define LBF_NEED_BREAK 0x02
Peter Zijlstra62633222013-08-19 12:41:09 +02004016#define LBF_DST_PINNED 0x04
4017#define LBF_SOME_PINNED 0x08
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004018
4019struct lb_env {
4020 struct sched_domain *sd;
4021
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004022 struct rq *src_rq;
Prashanth Nageshappa85c1e7d2012-06-19 17:47:34 +05304023 int src_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004024
4025 int dst_cpu;
4026 struct rq *dst_rq;
4027
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304028 struct cpumask *dst_grpmask;
4029 int new_dst_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004030 enum cpu_idle_type idle;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004031 long imbalance;
Michael Wangb9403132012-07-12 16:10:13 +08004032 /* The set of CPUs under consideration for load-balancing */
4033 struct cpumask *cpus;
4034
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004035 unsigned int flags;
Peter Zijlstra367456c2012-02-20 21:49:09 +01004036
4037 unsigned int loop;
4038 unsigned int loop_break;
4039 unsigned int loop_max;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004040};
4041
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004042/*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004043 * move_task - move a task from one runqueue to another runqueue.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004044 * Both runqueues must be locked.
4045 */
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004046static void move_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004047{
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004048 deactivate_task(env->src_rq, p, 0);
4049 set_task_cpu(p, env->dst_cpu);
4050 activate_task(env->dst_rq, p, 0);
4051 check_preempt_curr(env->dst_rq, p, 0);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004052}
4053
4054/*
Peter Zijlstra029632f2011-10-25 10:00:11 +02004055 * Is this task likely cache-hot:
4056 */
4057static int
4058task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
4059{
4060 s64 delta;
4061
4062 if (p->sched_class != &fair_sched_class)
4063 return 0;
4064
4065 if (unlikely(p->policy == SCHED_IDLE))
4066 return 0;
4067
4068 /*
4069 * Buddy candidates are cache hot:
4070 */
4071 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
4072 (&p->se == cfs_rq_of(&p->se)->next ||
4073 &p->se == cfs_rq_of(&p->se)->last))
4074 return 1;
4075
4076 if (sysctl_sched_migration_cost == -1)
4077 return 1;
4078 if (sysctl_sched_migration_cost == 0)
4079 return 0;
4080
4081 delta = now - p->se.exec_start;
4082
4083 return delta < (s64)sysctl_sched_migration_cost;
4084}
4085
Mel Gorman3a7053b2013-10-07 11:29:00 +01004086#ifdef CONFIG_NUMA_BALANCING
4087/* Returns true if the destination node has incurred more faults */
4088static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
4089{
4090 int src_nid, dst_nid;
4091
4092 if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults ||
4093 !(env->sd->flags & SD_NUMA)) {
4094 return false;
4095 }
4096
4097 src_nid = cpu_to_node(env->src_cpu);
4098 dst_nid = cpu_to_node(env->dst_cpu);
4099
4100 if (src_nid == dst_nid ||
4101 p->numa_migrate_seq >= sysctl_numa_balancing_settle_count)
4102 return false;
4103
4104 if (dst_nid == p->numa_preferred_nid ||
4105 p->numa_faults[dst_nid] > p->numa_faults[src_nid])
4106 return true;
4107
4108 return false;
4109}
4110#else
4111static inline bool migrate_improves_locality(struct task_struct *p,
4112 struct lb_env *env)
4113{
4114 return false;
4115}
4116#endif
4117
Peter Zijlstra029632f2011-10-25 10:00:11 +02004118/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004119 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
4120 */
4121static
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004122int can_migrate_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004123{
4124 int tsk_cache_hot = 0;
4125 /*
4126 * We do not migrate tasks that are:
Joonsoo Kimd3198082013-04-23 17:27:40 +09004127 * 1) throttled_lb_pair, or
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004128 * 2) cannot be migrated to this CPU due to cpus_allowed, or
Joonsoo Kimd3198082013-04-23 17:27:40 +09004129 * 3) running (obviously), or
4130 * 4) are cache-hot on their current CPU.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004131 */
Joonsoo Kimd3198082013-04-23 17:27:40 +09004132 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
4133 return 0;
4134
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004135 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
Joonsoo Kime02e60c2013-04-23 17:27:42 +09004136 int cpu;
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304137
Lucas De Marchi41acab82010-03-10 23:37:45 -03004138 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304139
Peter Zijlstra62633222013-08-19 12:41:09 +02004140 env->flags |= LBF_SOME_PINNED;
4141
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304142 /*
4143 * Remember if this task can be migrated to any other cpu in
4144 * our sched_group. We may want to revisit it if we couldn't
4145 * meet load balance goals by pulling other tasks on src_cpu.
4146 *
4147 * Also avoid computing new_dst_cpu if we have already computed
4148 * one in current iteration.
4149 */
Peter Zijlstra62633222013-08-19 12:41:09 +02004150 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304151 return 0;
4152
Joonsoo Kime02e60c2013-04-23 17:27:42 +09004153 /* Prevent to re-select dst_cpu via env's cpus */
4154 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
4155 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
Peter Zijlstra62633222013-08-19 12:41:09 +02004156 env->flags |= LBF_DST_PINNED;
Joonsoo Kime02e60c2013-04-23 17:27:42 +09004157 env->new_dst_cpu = cpu;
4158 break;
4159 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304160 }
Joonsoo Kime02e60c2013-04-23 17:27:42 +09004161
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004162 return 0;
4163 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304164
4165 /* Record that we found atleast one task that could run on dst_cpu */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004166 env->flags &= ~LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004167
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004168 if (task_running(env->src_rq, p)) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03004169 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004170 return 0;
4171 }
4172
4173 /*
4174 * Aggressive migration if:
Mel Gorman3a7053b2013-10-07 11:29:00 +01004175 * 1) destination numa is preferred
4176 * 2) task is cache cold, or
4177 * 3) too many balance attempts have failed.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004178 */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02004179 tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
Mel Gorman3a7053b2013-10-07 11:29:00 +01004180
4181 if (migrate_improves_locality(p, env)) {
4182#ifdef CONFIG_SCHEDSTATS
4183 if (tsk_cache_hot) {
4184 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
4185 schedstat_inc(p, se.statistics.nr_forced_migrations);
4186 }
4187#endif
4188 return 1;
4189 }
4190
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004191 if (!tsk_cache_hot ||
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004192 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
Zhang Hang4e2dcb72013-04-10 14:04:55 +08004193
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004194 if (tsk_cache_hot) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004195 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
Lucas De Marchi41acab82010-03-10 23:37:45 -03004196 schedstat_inc(p, se.statistics.nr_forced_migrations);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004197 }
Zhang Hang4e2dcb72013-04-10 14:04:55 +08004198
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004199 return 1;
4200 }
4201
Zhang Hang4e2dcb72013-04-10 14:04:55 +08004202 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
4203 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004204}
4205
Peter Zijlstra897c3952009-12-17 17:45:42 +01004206/*
4207 * move_one_task tries to move exactly one task from busiest to this_rq, as
4208 * part of active balancing operations within "domain".
4209 * Returns 1 if successful and 0 otherwise.
4210 *
4211 * Called with both runqueues locked.
4212 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004213static int move_one_task(struct lb_env *env)
Peter Zijlstra897c3952009-12-17 17:45:42 +01004214{
4215 struct task_struct *p, *n;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004216
Peter Zijlstra367456c2012-02-20 21:49:09 +01004217 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
Peter Zijlstra367456c2012-02-20 21:49:09 +01004218 if (!can_migrate_task(p, env))
4219 continue;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004220
Peter Zijlstra367456c2012-02-20 21:49:09 +01004221 move_task(p, env);
4222 /*
4223 * Right now, this is only the second place move_task()
4224 * is called, so we can safely collect move_task()
4225 * stats here rather than inside move_task().
4226 */
4227 schedstat_inc(env->sd, lb_gained[env->idle]);
4228 return 1;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004229 }
Peter Zijlstra897c3952009-12-17 17:45:42 +01004230 return 0;
4231}
4232
Peter Zijlstra367456c2012-02-20 21:49:09 +01004233static unsigned long task_h_load(struct task_struct *p);
4234
Peter Zijlstraeb953082012-04-17 13:38:40 +02004235static const unsigned int sched_nr_migrate_break = 32;
4236
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004237/*
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004238 * move_tasks tries to move up to imbalance weighted load from busiest to
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004239 * this_rq, as part of a balancing operation within domain "sd".
4240 * Returns 1 if successful and 0 otherwise.
4241 *
4242 * Called with both runqueues locked.
4243 */
4244static int move_tasks(struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004245{
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004246 struct list_head *tasks = &env->src_rq->cfs_tasks;
4247 struct task_struct *p;
Peter Zijlstra367456c2012-02-20 21:49:09 +01004248 unsigned long load;
4249 int pulled = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004250
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004251 if (env->imbalance <= 0)
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004252 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004253
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004254 while (!list_empty(tasks)) {
4255 p = list_first_entry(tasks, struct task_struct, se.group_node);
4256
Peter Zijlstra367456c2012-02-20 21:49:09 +01004257 env->loop++;
4258 /* We've more or less seen every task there is, call it quits */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004259 if (env->loop > env->loop_max)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004260 break;
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004261
4262 /* take a breather every nr_migrate tasks */
Peter Zijlstra367456c2012-02-20 21:49:09 +01004263 if (env->loop > env->loop_break) {
Peter Zijlstraeb953082012-04-17 13:38:40 +02004264 env->loop_break += sched_nr_migrate_break;
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004265 env->flags |= LBF_NEED_BREAK;
Peter Zijlstraee00e662009-12-17 17:25:20 +01004266 break;
Peter Zijlstraa195f002011-09-22 15:30:18 +02004267 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004268
Joonsoo Kimd3198082013-04-23 17:27:40 +09004269 if (!can_migrate_task(p, env))
Peter Zijlstra367456c2012-02-20 21:49:09 +01004270 goto next;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004271
Peter Zijlstra367456c2012-02-20 21:49:09 +01004272 load = task_h_load(p);
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004273
Peter Zijlstraeb953082012-04-17 13:38:40 +02004274 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004275 goto next;
4276
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004277 if ((load / 2) > env->imbalance)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004278 goto next;
4279
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004280 move_task(p, env);
Peter Zijlstraee00e662009-12-17 17:25:20 +01004281 pulled++;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004282 env->imbalance -= load;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004283
4284#ifdef CONFIG_PREEMPT
Peter Zijlstraee00e662009-12-17 17:25:20 +01004285 /*
4286 * NEWIDLE balancing is a source of latency, so preemptible
4287 * kernels will stop after the first task is pulled to minimize
4288 * the critical section.
4289 */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004290 if (env->idle == CPU_NEWLY_IDLE)
Peter Zijlstraee00e662009-12-17 17:25:20 +01004291 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004292#endif
4293
Peter Zijlstraee00e662009-12-17 17:25:20 +01004294 /*
4295 * We only want to steal up to the prescribed amount of
4296 * weighted load.
4297 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004298 if (env->imbalance <= 0)
Peter Zijlstraee00e662009-12-17 17:25:20 +01004299 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004300
Peter Zijlstra367456c2012-02-20 21:49:09 +01004301 continue;
4302next:
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004303 list_move_tail(&p->se.group_node, tasks);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004304 }
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004305
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004306 /*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004307 * Right now, this is one of only two places move_task() is called,
4308 * so we can safely collect move_task() stats here rather than
4309 * inside move_task().
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004310 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004311 schedstat_add(env->sd, lb_gained[env->idle], pulled);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004312
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004313 return pulled;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004314}
4315
Peter Zijlstra230059de2009-12-17 17:47:12 +01004316#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004317/*
4318 * update tg->load_weight by folding this cpu's load_avg
4319 */
Paul Turner48a16752012-10-04 13:18:31 +02004320static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004321{
Paul Turner48a16752012-10-04 13:18:31 +02004322 struct sched_entity *se = tg->se[cpu];
4323 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004324
Paul Turner48a16752012-10-04 13:18:31 +02004325 /* throttled entities do not contribute to load */
4326 if (throttled_hierarchy(cfs_rq))
4327 return;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004328
Paul Turneraff3e492012-10-04 13:18:30 +02004329 update_cfs_rq_blocked_load(cfs_rq, 1);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004330
Paul Turner82958362012-10-04 13:18:31 +02004331 if (se) {
4332 update_entity_load_avg(se, 1);
4333 /*
4334 * We pivot on our runnable average having decayed to zero for
4335 * list removal. This generally implies that all our children
4336 * have also been removed (modulo rounding error or bandwidth
4337 * control); however, such cases are rare and we can fix these
4338 * at enqueue.
4339 *
4340 * TODO: fix up out-of-order children on enqueue.
4341 */
4342 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
4343 list_del_leaf_cfs_rq(cfs_rq);
4344 } else {
Paul Turner48a16752012-10-04 13:18:31 +02004345 struct rq *rq = rq_of(cfs_rq);
Paul Turner82958362012-10-04 13:18:31 +02004346 update_rq_runnable_avg(rq, rq->nr_running);
4347 }
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004348}
4349
Paul Turner48a16752012-10-04 13:18:31 +02004350static void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004351{
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004352 struct rq *rq = cpu_rq(cpu);
Paul Turner48a16752012-10-04 13:18:31 +02004353 struct cfs_rq *cfs_rq;
4354 unsigned long flags;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004355
Paul Turner48a16752012-10-04 13:18:31 +02004356 raw_spin_lock_irqsave(&rq->lock, flags);
4357 update_rq_clock(rq);
Peter Zijlstra9763b672011-07-13 13:09:25 +02004358 /*
4359 * Iterates the task_group tree in a bottom up fashion, see
4360 * list_add_leaf_cfs_rq() for details.
4361 */
Paul Turner64660c82011-07-21 09:43:36 -07004362 for_each_leaf_cfs_rq(rq, cfs_rq) {
Paul Turner48a16752012-10-04 13:18:31 +02004363 /*
4364 * Note: We may want to consider periodically releasing
4365 * rq->lock about these updates so that creating many task
4366 * groups does not result in continually extending hold time.
4367 */
4368 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
Paul Turner64660c82011-07-21 09:43:36 -07004369 }
Paul Turner48a16752012-10-04 13:18:31 +02004370
4371 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004372}
4373
Peter Zijlstra9763b672011-07-13 13:09:25 +02004374/*
Vladimir Davydov68520792013-07-15 17:49:19 +04004375 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
Peter Zijlstra9763b672011-07-13 13:09:25 +02004376 * This needs to be done in a top-down fashion because the load of a child
4377 * group is a fraction of its parents load.
4378 */
Vladimir Davydov68520792013-07-15 17:49:19 +04004379static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
Peter Zijlstra9763b672011-07-13 13:09:25 +02004380{
Vladimir Davydov68520792013-07-15 17:49:19 +04004381 struct rq *rq = rq_of(cfs_rq);
4382 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
Peter Zijlstraa35b6462012-08-08 21:46:40 +02004383 unsigned long now = jiffies;
Vladimir Davydov68520792013-07-15 17:49:19 +04004384 unsigned long load;
Peter Zijlstraa35b6462012-08-08 21:46:40 +02004385
Vladimir Davydov68520792013-07-15 17:49:19 +04004386 if (cfs_rq->last_h_load_update == now)
Peter Zijlstraa35b6462012-08-08 21:46:40 +02004387 return;
4388
Vladimir Davydov68520792013-07-15 17:49:19 +04004389 cfs_rq->h_load_next = NULL;
4390 for_each_sched_entity(se) {
4391 cfs_rq = cfs_rq_of(se);
4392 cfs_rq->h_load_next = se;
4393 if (cfs_rq->last_h_load_update == now)
4394 break;
4395 }
Peter Zijlstraa35b6462012-08-08 21:46:40 +02004396
Vladimir Davydov68520792013-07-15 17:49:19 +04004397 if (!se) {
Vladimir Davydov7e3115e2013-09-14 19:39:46 +04004398 cfs_rq->h_load = cfs_rq->runnable_load_avg;
Vladimir Davydov68520792013-07-15 17:49:19 +04004399 cfs_rq->last_h_load_update = now;
4400 }
4401
4402 while ((se = cfs_rq->h_load_next) != NULL) {
4403 load = cfs_rq->h_load;
4404 load = div64_ul(load * se->avg.load_avg_contrib,
4405 cfs_rq->runnable_load_avg + 1);
4406 cfs_rq = group_cfs_rq(se);
4407 cfs_rq->h_load = load;
4408 cfs_rq->last_h_load_update = now;
4409 }
Peter Zijlstra9763b672011-07-13 13:09:25 +02004410}
4411
Peter Zijlstra367456c2012-02-20 21:49:09 +01004412static unsigned long task_h_load(struct task_struct *p)
Peter Zijlstra230059de2009-12-17 17:47:12 +01004413{
Peter Zijlstra367456c2012-02-20 21:49:09 +01004414 struct cfs_rq *cfs_rq = task_cfs_rq(p);
Peter Zijlstra230059de2009-12-17 17:47:12 +01004415
Vladimir Davydov68520792013-07-15 17:49:19 +04004416 update_cfs_rq_h_load(cfs_rq);
Alex Shia003a252013-06-20 10:18:51 +08004417 return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
4418 cfs_rq->runnable_load_avg + 1);
Peter Zijlstra230059de2009-12-17 17:47:12 +01004419}
4420#else
Paul Turner48a16752012-10-04 13:18:31 +02004421static inline void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004422{
4423}
4424
Peter Zijlstra367456c2012-02-20 21:49:09 +01004425static unsigned long task_h_load(struct task_struct *p)
4426{
Alex Shia003a252013-06-20 10:18:51 +08004427 return p->se.avg.load_avg_contrib;
Peter Zijlstra230059de2009-12-17 17:47:12 +01004428}
4429#endif
4430
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004431/********** Helpers for find_busiest_group ************************/
4432/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004433 * sg_lb_stats - stats of a sched_group required for load_balancing
4434 */
4435struct sg_lb_stats {
4436 unsigned long avg_load; /*Avg load across the CPUs of the group */
4437 unsigned long group_load; /* Total load over the CPUs of the group */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004438 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004439 unsigned long load_per_task;
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02004440 unsigned long group_power;
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02004441 unsigned int sum_nr_running; /* Nr tasks running in the group */
4442 unsigned int group_capacity;
4443 unsigned int idle_cpus;
4444 unsigned int group_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004445 int group_imb; /* Is there an imbalance in the group ? */
Nikhil Raofab47622010-10-15 13:12:29 -07004446 int group_has_capacity; /* Is there extra capacity in the group? */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004447};
4448
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004449/*
4450 * sd_lb_stats - Structure to store the statistics of a sched_domain
4451 * during load balancing.
4452 */
4453struct sd_lb_stats {
4454 struct sched_group *busiest; /* Busiest group in this sd */
4455 struct sched_group *local; /* Local group in this sd */
4456 unsigned long total_load; /* Total load of all groups in sd */
4457 unsigned long total_pwr; /* Total power of all groups in sd */
4458 unsigned long avg_load; /* Average load across all groups in sd */
4459
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004460 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02004461 struct sg_lb_stats local_stat; /* Statistics of the local group */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004462};
4463
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02004464static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
4465{
4466 /*
4467 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
4468 * local_stat because update_sg_lb_stats() does a full clear/assignment.
4469 * We must however clear busiest_stat::avg_load because
4470 * update_sd_pick_busiest() reads this before assignment.
4471 */
4472 *sds = (struct sd_lb_stats){
4473 .busiest = NULL,
4474 .local = NULL,
4475 .total_load = 0UL,
4476 .total_pwr = 0UL,
4477 .busiest_stat = {
4478 .avg_load = 0UL,
4479 },
4480 };
4481}
4482
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004483/**
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004484 * get_sd_load_idx - Obtain the load index for a given sched domain.
4485 * @sd: The sched_domain whose load_idx is to be obtained.
4486 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
Yacine Belkadie69f6182013-07-12 20:45:47 +02004487 *
4488 * Return: The load index.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004489 */
4490static inline int get_sd_load_idx(struct sched_domain *sd,
4491 enum cpu_idle_type idle)
4492{
4493 int load_idx;
4494
4495 switch (idle) {
4496 case CPU_NOT_IDLE:
4497 load_idx = sd->busy_idx;
4498 break;
4499
4500 case CPU_NEWLY_IDLE:
4501 load_idx = sd->newidle_idx;
4502 break;
4503 default:
4504 load_idx = sd->idle_idx;
4505 break;
4506 }
4507
4508 return load_idx;
4509}
4510
Li Zefan15f803c2013-03-05 16:07:11 +08004511static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004512{
Nikhil Rao1399fa72011-05-18 10:09:39 -07004513 return SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004514}
4515
4516unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
4517{
4518 return default_scale_freq_power(sd, cpu);
4519}
4520
Li Zefan15f803c2013-03-05 16:07:11 +08004521static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004522{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004523 unsigned long weight = sd->span_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004524 unsigned long smt_gain = sd->smt_gain;
4525
4526 smt_gain /= weight;
4527
4528 return smt_gain;
4529}
4530
4531unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
4532{
4533 return default_scale_smt_power(sd, cpu);
4534}
4535
Li Zefan15f803c2013-03-05 16:07:11 +08004536static unsigned long scale_rt_power(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004537{
4538 struct rq *rq = cpu_rq(cpu);
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004539 u64 total, available, age_stamp, avg;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004540
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004541 /*
4542 * Since we're reading these variables without serialization make sure
4543 * we read them once before doing sanity checks on them.
4544 */
4545 age_stamp = ACCESS_ONCE(rq->age_stamp);
4546 avg = ACCESS_ONCE(rq->rt_avg);
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07004547
Frederic Weisbecker78becc22013-04-12 01:51:02 +02004548 total = sched_avg_period() + (rq_clock(rq) - age_stamp);
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004549
4550 if (unlikely(total < avg)) {
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07004551 /* Ensures that power won't end up being negative */
4552 available = 0;
4553 } else {
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004554 available = total - avg;
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07004555 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004556
Nikhil Rao1399fa72011-05-18 10:09:39 -07004557 if (unlikely((s64)total < SCHED_POWER_SCALE))
4558 total = SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004559
Nikhil Rao1399fa72011-05-18 10:09:39 -07004560 total >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004561
4562 return div_u64(available, total);
4563}
4564
4565static void update_cpu_power(struct sched_domain *sd, int cpu)
4566{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004567 unsigned long weight = sd->span_weight;
Nikhil Rao1399fa72011-05-18 10:09:39 -07004568 unsigned long power = SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004569 struct sched_group *sdg = sd->groups;
4570
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004571 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
4572 if (sched_feat(ARCH_POWER))
4573 power *= arch_scale_smt_power(sd, cpu);
4574 else
4575 power *= default_scale_smt_power(sd, cpu);
4576
Nikhil Rao1399fa72011-05-18 10:09:39 -07004577 power >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004578 }
4579
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004580 sdg->sgp->power_orig = power;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004581
4582 if (sched_feat(ARCH_POWER))
4583 power *= arch_scale_freq_power(sd, cpu);
4584 else
4585 power *= default_scale_freq_power(sd, cpu);
4586
Nikhil Rao1399fa72011-05-18 10:09:39 -07004587 power >>= SCHED_POWER_SHIFT;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004588
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004589 power *= scale_rt_power(cpu);
Nikhil Rao1399fa72011-05-18 10:09:39 -07004590 power >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004591
4592 if (!power)
4593 power = 1;
4594
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02004595 cpu_rq(cpu)->cpu_power = power;
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004596 sdg->sgp->power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004597}
4598
Peter Zijlstra029632f2011-10-25 10:00:11 +02004599void update_group_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004600{
4601 struct sched_domain *child = sd->child;
4602 struct sched_group *group, *sdg = sd->groups;
Peter Zijlstra863bffc2013-08-28 11:44:39 +02004603 unsigned long power, power_orig;
Vincent Guittot4ec44122011-12-12 20:21:08 +01004604 unsigned long interval;
4605
4606 interval = msecs_to_jiffies(sd->balance_interval);
4607 interval = clamp(interval, 1UL, max_load_balance_interval);
4608 sdg->sgp->next_update = jiffies + interval;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004609
4610 if (!child) {
4611 update_cpu_power(sd, cpu);
4612 return;
4613 }
4614
Peter Zijlstra863bffc2013-08-28 11:44:39 +02004615 power_orig = power = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004616
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02004617 if (child->flags & SD_OVERLAP) {
4618 /*
4619 * SD_OVERLAP domains cannot assume that child groups
4620 * span the current group.
4621 */
4622
Peter Zijlstra863bffc2013-08-28 11:44:39 +02004623 for_each_cpu(cpu, sched_group_cpus(sdg)) {
4624 struct sched_group *sg = cpu_rq(cpu)->sd->groups;
4625
4626 power_orig += sg->sgp->power_orig;
4627 power += sg->sgp->power;
4628 }
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02004629 } else {
4630 /*
4631 * !SD_OVERLAP domains can assume that child groups
4632 * span the current group.
4633 */
4634
4635 group = child->groups;
4636 do {
Peter Zijlstra863bffc2013-08-28 11:44:39 +02004637 power_orig += group->sgp->power_orig;
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02004638 power += group->sgp->power;
4639 group = group->next;
4640 } while (group != child->groups);
4641 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004642
Peter Zijlstra863bffc2013-08-28 11:44:39 +02004643 sdg->sgp->power_orig = power_orig;
4644 sdg->sgp->power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004645}
4646
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004647/*
4648 * Try and fix up capacity for tiny siblings, this is needed when
4649 * things like SD_ASYM_PACKING need f_b_g to select another sibling
4650 * which on its own isn't powerful enough.
4651 *
4652 * See update_sd_pick_busiest() and check_asym_packing().
4653 */
4654static inline int
4655fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
4656{
4657 /*
Nikhil Rao1399fa72011-05-18 10:09:39 -07004658 * Only siblings can have significantly less than SCHED_POWER_SCALE
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004659 */
Peter Zijlstraa6c75f22011-04-07 14:09:52 +02004660 if (!(sd->flags & SD_SHARE_CPUPOWER))
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004661 return 0;
4662
4663 /*
4664 * If ~90% of the cpu_power is still there, we're good.
4665 */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004666 if (group->sgp->power * 32 > group->sgp->power_orig * 29)
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004667 return 1;
4668
4669 return 0;
4670}
4671
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02004672/*
4673 * Group imbalance indicates (and tries to solve) the problem where balancing
4674 * groups is inadequate due to tsk_cpus_allowed() constraints.
4675 *
4676 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
4677 * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
4678 * Something like:
4679 *
4680 * { 0 1 2 3 } { 4 5 6 7 }
4681 * * * * *
4682 *
4683 * If we were to balance group-wise we'd place two tasks in the first group and
4684 * two tasks in the second group. Clearly this is undesired as it will overload
4685 * cpu 3 and leave one of the cpus in the second group unused.
4686 *
4687 * The current solution to this issue is detecting the skew in the first group
Peter Zijlstra62633222013-08-19 12:41:09 +02004688 * by noticing the lower domain failed to reach balance and had difficulty
4689 * moving tasks due to affinity constraints.
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02004690 *
4691 * When this is so detected; this group becomes a candidate for busiest; see
4692 * update_sd_pick_busiest(). And calculcate_imbalance() and
Peter Zijlstra62633222013-08-19 12:41:09 +02004693 * find_busiest_group() avoid some of the usual balance conditions to allow it
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02004694 * to create an effective group imbalance.
4695 *
4696 * This is a somewhat tricky proposition since the next run might not find the
4697 * group imbalance and decide the groups need to be balanced again. A most
4698 * subtle and fragile situation.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004699 */
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02004700
Peter Zijlstra62633222013-08-19 12:41:09 +02004701static inline int sg_imbalanced(struct sched_group *group)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004702{
Peter Zijlstra62633222013-08-19 12:41:09 +02004703 return group->sgp->imbalance;
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02004704}
4705
Peter Zijlstrab37d9312013-08-28 11:50:34 +02004706/*
4707 * Compute the group capacity.
4708 *
Peter Zijlstrac61037e2013-08-28 12:40:38 +02004709 * Avoid the issue where N*frac(smt_power) >= 1 creates 'phantom' cores by
4710 * first dividing out the smt factor and computing the actual number of cores
4711 * and limit power unit capacity with that.
Peter Zijlstrab37d9312013-08-28 11:50:34 +02004712 */
4713static inline int sg_capacity(struct lb_env *env, struct sched_group *group)
4714{
Peter Zijlstrac61037e2013-08-28 12:40:38 +02004715 unsigned int capacity, smt, cpus;
4716 unsigned int power, power_orig;
Peter Zijlstrab37d9312013-08-28 11:50:34 +02004717
Peter Zijlstrac61037e2013-08-28 12:40:38 +02004718 power = group->sgp->power;
4719 power_orig = group->sgp->power_orig;
4720 cpus = group->group_weight;
Peter Zijlstrab37d9312013-08-28 11:50:34 +02004721
Peter Zijlstrac61037e2013-08-28 12:40:38 +02004722 /* smt := ceil(cpus / power), assumes: 1 < smt_power < 2 */
4723 smt = DIV_ROUND_UP(SCHED_POWER_SCALE * cpus, power_orig);
4724 capacity = cpus / smt; /* cores */
4725
4726 capacity = min_t(unsigned, capacity, DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE));
Peter Zijlstrab37d9312013-08-28 11:50:34 +02004727 if (!capacity)
4728 capacity = fix_small_capacity(env->sd, group);
4729
4730 return capacity;
4731}
4732
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004733/**
4734 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
4735 * @env: The load balancing environment.
4736 * @group: sched_group whose statistics are to be updated.
4737 * @load_idx: Load index of sched_domain of this_cpu for load calc.
4738 * @local_group: Does group contain this_cpu.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004739 * @sgs: variable to hold the statistics for this group.
4740 */
4741static inline void update_sg_lb_stats(struct lb_env *env,
4742 struct sched_group *group, int load_idx,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09004743 int local_group, struct sg_lb_stats *sgs)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004744{
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02004745 unsigned long nr_running;
4746 unsigned long load;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004747 int i;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004748
Peter Zijlstrab72ff132013-08-28 10:32:32 +02004749 memset(sgs, 0, sizeof(*sgs));
4750
Michael Wangb9403132012-07-12 16:10:13 +08004751 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004752 struct rq *rq = cpu_rq(i);
4753
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004754 nr_running = rq->nr_running;
4755
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004756 /* Bias balancing toward cpus of our domain */
Peter Zijlstra62633222013-08-19 12:41:09 +02004757 if (local_group)
Peter Zijlstra04f733b2012-05-11 00:12:02 +02004758 load = target_load(i, load_idx);
Peter Zijlstra62633222013-08-19 12:41:09 +02004759 else
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004760 load = source_load(i, load_idx);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004761
4762 sgs->group_load += load;
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004763 sgs->sum_nr_running += nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004764 sgs->sum_weighted_load += weighted_cpuload(i);
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004765 if (idle_cpu(i))
4766 sgs->idle_cpus++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004767 }
4768
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004769 /* Adjust by relative CPU power of the group */
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02004770 sgs->group_power = group->sgp->power;
4771 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004772
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004773 if (sgs->sum_nr_running)
Peter Zijlstra38d0f772013-08-15 19:47:56 +02004774 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004775
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004776 sgs->group_weight = group->group_weight;
Nikhil Raofab47622010-10-15 13:12:29 -07004777
Peter Zijlstrab37d9312013-08-28 11:50:34 +02004778 sgs->group_imb = sg_imbalanced(group);
4779 sgs->group_capacity = sg_capacity(env, group);
4780
Nikhil Raofab47622010-10-15 13:12:29 -07004781 if (sgs->group_capacity > sgs->sum_nr_running)
4782 sgs->group_has_capacity = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004783}
4784
4785/**
Michael Neuling532cb4c2010-06-08 14:57:02 +10004786 * update_sd_pick_busiest - return 1 on busiest group
Randy Dunlapcd968912012-06-08 13:18:33 -07004787 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10004788 * @sds: sched_domain statistics
4789 * @sg: sched_group candidate to be checked for being the busiest
Michael Neulingb6b12292010-06-10 12:06:21 +10004790 * @sgs: sched_group statistics
Michael Neuling532cb4c2010-06-08 14:57:02 +10004791 *
4792 * Determine if @sg is a busier group than the previously selected
4793 * busiest group.
Yacine Belkadie69f6182013-07-12 20:45:47 +02004794 *
4795 * Return: %true if @sg is a busier group than the previously selected
4796 * busiest group. %false otherwise.
Michael Neuling532cb4c2010-06-08 14:57:02 +10004797 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004798static bool update_sd_pick_busiest(struct lb_env *env,
Michael Neuling532cb4c2010-06-08 14:57:02 +10004799 struct sd_lb_stats *sds,
4800 struct sched_group *sg,
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004801 struct sg_lb_stats *sgs)
Michael Neuling532cb4c2010-06-08 14:57:02 +10004802{
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004803 if (sgs->avg_load <= sds->busiest_stat.avg_load)
Michael Neuling532cb4c2010-06-08 14:57:02 +10004804 return false;
4805
4806 if (sgs->sum_nr_running > sgs->group_capacity)
4807 return true;
4808
4809 if (sgs->group_imb)
4810 return true;
4811
4812 /*
4813 * ASYM_PACKING needs to move all the work to the lowest
4814 * numbered CPUs in the group, therefore mark all groups
4815 * higher than ourself as busy.
4816 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004817 if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
4818 env->dst_cpu < group_first_cpu(sg)) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10004819 if (!sds->busiest)
4820 return true;
4821
4822 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
4823 return true;
4824 }
4825
4826 return false;
4827}
4828
4829/**
Hui Kang461819a2011-10-11 23:00:59 -04004830 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07004831 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004832 * @balance: Should we balance.
4833 * @sds: variable to hold the statistics for this sched_domain.
4834 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004835static inline void update_sd_lb_stats(struct lb_env *env,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09004836 struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004837{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004838 struct sched_domain *child = env->sd->child;
4839 struct sched_group *sg = env->sd->groups;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004840 struct sg_lb_stats tmp_sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004841 int load_idx, prefer_sibling = 0;
4842
4843 if (child && child->flags & SD_PREFER_SIBLING)
4844 prefer_sibling = 1;
4845
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004846 load_idx = get_sd_load_idx(env->sd, env->idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004847
4848 do {
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004849 struct sg_lb_stats *sgs = &tmp_sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004850 int local_group;
4851
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004852 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004853 if (local_group) {
4854 sds->local = sg;
4855 sgs = &sds->local_stat;
Peter Zijlstrab72ff132013-08-28 10:32:32 +02004856
4857 if (env->idle != CPU_NEWLY_IDLE ||
4858 time_after_eq(jiffies, sg->sgp->next_update))
4859 update_group_power(env->sd, env->dst_cpu);
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004860 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004861
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004862 update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004863
Peter Zijlstrab72ff132013-08-28 10:32:32 +02004864 if (local_group)
4865 goto next_group;
4866
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004867 /*
4868 * In case the child domain prefers tasks go to siblings
Michael Neuling532cb4c2010-06-08 14:57:02 +10004869 * first, lower the sg capacity to one so that we'll try
Nikhil Rao75dd3212010-10-15 13:12:30 -07004870 * and move all the excess tasks away. We lower the capacity
4871 * of a group only if the local group has the capacity to fit
4872 * these excess tasks, i.e. nr_running < group_capacity. The
4873 * extra check prevents the case where you always pull from the
4874 * heaviest group when it is already under-utilized (possible
4875 * with a large weight task outweighs the tasks on the system).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004876 */
Peter Zijlstrab72ff132013-08-28 10:32:32 +02004877 if (prefer_sibling && sds->local &&
4878 sds->local_stat.group_has_capacity)
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02004879 sgs->group_capacity = min(sgs->group_capacity, 1U);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004880
Peter Zijlstrab72ff132013-08-28 10:32:32 +02004881 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10004882 sds->busiest = sg;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004883 sds->busiest_stat = *sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004884 }
4885
Peter Zijlstrab72ff132013-08-28 10:32:32 +02004886next_group:
4887 /* Now, start updating sd_lb_stats */
4888 sds->total_load += sgs->group_load;
4889 sds->total_pwr += sgs->group_power;
4890
Michael Neuling532cb4c2010-06-08 14:57:02 +10004891 sg = sg->next;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004892 } while (sg != env->sd->groups);
Michael Neuling532cb4c2010-06-08 14:57:02 +10004893}
4894
Michael Neuling532cb4c2010-06-08 14:57:02 +10004895/**
4896 * check_asym_packing - Check to see if the group is packed into the
4897 * sched doman.
4898 *
4899 * This is primarily intended to used at the sibling level. Some
4900 * cores like POWER7 prefer to use lower numbered SMT threads. In the
4901 * case of POWER7, it can move to lower SMT modes only when higher
4902 * threads are idle. When in lower SMT modes, the threads will
4903 * perform better since they share less core resources. Hence when we
4904 * have idle threads, we want them to be the higher ones.
4905 *
4906 * This packing function is run on idle threads. It checks to see if
4907 * the busiest CPU in this domain (core in the P7 case) has a higher
4908 * CPU number than the packing function is being run on. Here we are
4909 * assuming lower CPU number will be equivalent to lower a SMT thread
4910 * number.
4911 *
Yacine Belkadie69f6182013-07-12 20:45:47 +02004912 * Return: 1 when packing is required and a task should be moved to
Michael Neulingb6b12292010-06-10 12:06:21 +10004913 * this CPU. The amount of the imbalance is returned in *imbalance.
4914 *
Randy Dunlapcd968912012-06-08 13:18:33 -07004915 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10004916 * @sds: Statistics of the sched_domain which is to be packed
Michael Neuling532cb4c2010-06-08 14:57:02 +10004917 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004918static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
Michael Neuling532cb4c2010-06-08 14:57:02 +10004919{
4920 int busiest_cpu;
4921
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004922 if (!(env->sd->flags & SD_ASYM_PACKING))
Michael Neuling532cb4c2010-06-08 14:57:02 +10004923 return 0;
4924
4925 if (!sds->busiest)
4926 return 0;
4927
4928 busiest_cpu = group_first_cpu(sds->busiest);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004929 if (env->dst_cpu > busiest_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10004930 return 0;
4931
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004932 env->imbalance = DIV_ROUND_CLOSEST(
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02004933 sds->busiest_stat.avg_load * sds->busiest_stat.group_power,
4934 SCHED_POWER_SCALE);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004935
Michael Neuling532cb4c2010-06-08 14:57:02 +10004936 return 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004937}
4938
4939/**
4940 * fix_small_imbalance - Calculate the minor imbalance that exists
4941 * amongst the groups of a sched_domain, during
4942 * load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07004943 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004944 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004945 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004946static inline
4947void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004948{
4949 unsigned long tmp, pwr_now = 0, pwr_move = 0;
4950 unsigned int imbn = 2;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004951 unsigned long scaled_busy_load_per_task;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004952 struct sg_lb_stats *local, *busiest;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004953
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004954 local = &sds->local_stat;
4955 busiest = &sds->busiest_stat;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004956
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004957 if (!local->sum_nr_running)
4958 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
4959 else if (busiest->load_per_task > local->load_per_task)
4960 imbn = 1;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004961
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004962 scaled_busy_load_per_task =
4963 (busiest->load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02004964 busiest->group_power;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004965
Vladimir Davydov3029ede2013-09-15 17:49:14 +04004966 if (busiest->avg_load + scaled_busy_load_per_task >=
4967 local->avg_load + (scaled_busy_load_per_task * imbn)) {
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004968 env->imbalance = busiest->load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004969 return;
4970 }
4971
4972 /*
4973 * OK, we don't have enough imbalance to justify moving tasks,
4974 * however we may be able to increase total CPU power used by
4975 * moving them.
4976 */
4977
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02004978 pwr_now += busiest->group_power *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004979 min(busiest->load_per_task, busiest->avg_load);
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02004980 pwr_now += local->group_power *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004981 min(local->load_per_task, local->avg_load);
Nikhil Rao1399fa72011-05-18 10:09:39 -07004982 pwr_now /= SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004983
4984 /* Amount of load we'd subtract */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004985 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02004986 busiest->group_power;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004987 if (busiest->avg_load > tmp) {
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02004988 pwr_move += busiest->group_power *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004989 min(busiest->load_per_task,
4990 busiest->avg_load - tmp);
4991 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004992
4993 /* Amount of load we'd add */
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02004994 if (busiest->avg_load * busiest->group_power <
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004995 busiest->load_per_task * SCHED_POWER_SCALE) {
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02004996 tmp = (busiest->avg_load * busiest->group_power) /
4997 local->group_power;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004998 } else {
4999 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005000 local->group_power;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005001 }
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005002 pwr_move += local->group_power *
5003 min(local->load_per_task, local->avg_load + tmp);
Nikhil Rao1399fa72011-05-18 10:09:39 -07005004 pwr_move /= SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005005
5006 /* Move if we gain throughput */
5007 if (pwr_move > pwr_now)
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005008 env->imbalance = busiest->load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005009}
5010
5011/**
5012 * calculate_imbalance - Calculate the amount of imbalance present within the
5013 * groups of a given sched_domain during load balance.
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005014 * @env: load balance environment
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005015 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005016 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005017static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005018{
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005019 unsigned long max_pull, load_above_capacity = ~0UL;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005020 struct sg_lb_stats *local, *busiest;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005021
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005022 local = &sds->local_stat;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005023 busiest = &sds->busiest_stat;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005024
5025 if (busiest->group_imb) {
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005026 /*
5027 * In the group_imb case we cannot rely on group-wide averages
5028 * to ensure cpu-load equilibrium, look at wider averages. XXX
5029 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005030 busiest->load_per_task =
5031 min(busiest->load_per_task, sds->avg_load);
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005032 }
5033
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005034 /*
5035 * In the presence of smp nice balancing, certain scenarios can have
5036 * max load less than avg load(as we skip the groups at or below
5037 * its cpu_power, while calculating max_load..)
5038 */
Vladimir Davydovb1885552013-09-15 17:49:13 +04005039 if (busiest->avg_load <= sds->avg_load ||
5040 local->avg_load >= sds->avg_load) {
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005041 env->imbalance = 0;
5042 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005043 }
5044
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005045 if (!busiest->group_imb) {
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005046 /*
5047 * Don't want to pull so many tasks that a group would go idle.
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005048 * Except of course for the group_imb case, since then we might
5049 * have to drop below capacity to reach cpu-load equilibrium.
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005050 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005051 load_above_capacity =
5052 (busiest->sum_nr_running - busiest->group_capacity);
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005053
Nikhil Rao1399fa72011-05-18 10:09:39 -07005054 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005055 load_above_capacity /= busiest->group_power;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005056 }
5057
5058 /*
5059 * We're trying to get all the cpus to the average_load, so we don't
5060 * want to push ourselves above the average load, nor do we wish to
5061 * reduce the max loaded cpu below the average load. At the same time,
5062 * we also don't want to reduce the group load below the group capacity
5063 * (so that we can implement power-savings policies etc). Thus we look
5064 * for the minimum possible imbalance.
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005065 */
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005066 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005067
5068 /* How much load to actually move to equalise the imbalance */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005069 env->imbalance = min(
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005070 max_pull * busiest->group_power,
5071 (sds->avg_load - local->avg_load) * local->group_power
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005072 ) / SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005073
5074 /*
5075 * if *imbalance is less than the average load per runnable task
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005076 * there is no guarantee that any tasks will be moved so we'll have
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005077 * a think about bumping its value to force at least one task to be
5078 * moved
5079 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005080 if (env->imbalance < busiest->load_per_task)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005081 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005082}
Nikhil Raofab47622010-10-15 13:12:29 -07005083
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005084/******* find_busiest_group() helpers end here *********************/
5085
5086/**
5087 * find_busiest_group - Returns the busiest group within the sched_domain
5088 * if there is an imbalance. If there isn't an imbalance, and
5089 * the user has opted for power-savings, it returns a group whose
5090 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
5091 * such a group exists.
5092 *
5093 * Also calculates the amount of weighted load which should be moved
5094 * to restore balance.
5095 *
Randy Dunlapcd968912012-06-08 13:18:33 -07005096 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005097 *
Yacine Belkadie69f6182013-07-12 20:45:47 +02005098 * Return: - The busiest group if imbalance exists.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005099 * - If no imbalance and user has opted for power-savings balance,
5100 * return the least loaded group whose CPUs can be
5101 * put to idle by rebalancing its tasks onto our group.
5102 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005103static struct sched_group *find_busiest_group(struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005104{
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005105 struct sg_lb_stats *local, *busiest;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005106 struct sd_lb_stats sds;
5107
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005108 init_sd_lb_stats(&sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005109
5110 /*
5111 * Compute the various statistics relavent for load balancing at
5112 * this level.
5113 */
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005114 update_sd_lb_stats(env, &sds);
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005115 local = &sds.local_stat;
5116 busiest = &sds.busiest_stat;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005117
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005118 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
5119 check_asym_packing(env, &sds))
Michael Neuling532cb4c2010-06-08 14:57:02 +10005120 return sds.busiest;
5121
Peter Zijlstracc57aa82011-02-21 18:55:32 +01005122 /* There is no busy sibling group to pull tasks from */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005123 if (!sds.busiest || busiest->sum_nr_running == 0)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005124 goto out_balanced;
5125
Nikhil Rao1399fa72011-05-18 10:09:39 -07005126 sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
Ken Chenb0432d82011-04-07 17:23:22 -07005127
Peter Zijlstra866ab432011-02-21 18:56:47 +01005128 /*
5129 * If the busiest group is imbalanced the below checks don't
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005130 * work because they assume all things are equal, which typically
Peter Zijlstra866ab432011-02-21 18:56:47 +01005131 * isn't true due to cpus_allowed constraints and the like.
5132 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005133 if (busiest->group_imb)
Peter Zijlstra866ab432011-02-21 18:56:47 +01005134 goto force_balance;
5135
Peter Zijlstracc57aa82011-02-21 18:55:32 +01005136 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005137 if (env->idle == CPU_NEWLY_IDLE && local->group_has_capacity &&
5138 !busiest->group_has_capacity)
Nikhil Raofab47622010-10-15 13:12:29 -07005139 goto force_balance;
5140
Peter Zijlstracc57aa82011-02-21 18:55:32 +01005141 /*
5142 * If the local group is more busy than the selected busiest group
5143 * don't try and pull any tasks.
5144 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005145 if (local->avg_load >= busiest->avg_load)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005146 goto out_balanced;
5147
Peter Zijlstracc57aa82011-02-21 18:55:32 +01005148 /*
5149 * Don't pull any tasks if this group is already above the domain
5150 * average load.
5151 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005152 if (local->avg_load >= sds.avg_load)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005153 goto out_balanced;
5154
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005155 if (env->idle == CPU_IDLE) {
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005156 /*
5157 * This cpu is idle. If the busiest group load doesn't
5158 * have more tasks than the number of available cpu's and
5159 * there is no imbalance between this and busiest group
5160 * wrt to idle cpu's, it is balanced.
5161 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005162 if ((local->idle_cpus < busiest->idle_cpus) &&
5163 busiest->sum_nr_running <= busiest->group_weight)
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005164 goto out_balanced;
Peter Zijlstrac186faf2011-02-21 18:52:53 +01005165 } else {
5166 /*
5167 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
5168 * imbalance_pct to be conservative.
5169 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005170 if (100 * busiest->avg_load <=
5171 env->sd->imbalance_pct * local->avg_load)
Peter Zijlstrac186faf2011-02-21 18:52:53 +01005172 goto out_balanced;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005173 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005174
Nikhil Raofab47622010-10-15 13:12:29 -07005175force_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005176 /* Looks like there is an imbalance. Compute it */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005177 calculate_imbalance(env, &sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005178 return sds.busiest;
5179
5180out_balanced:
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005181 env->imbalance = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005182 return NULL;
5183}
5184
5185/*
5186 * find_busiest_queue - find the busiest runqueue among the cpus in group.
5187 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005188static struct rq *find_busiest_queue(struct lb_env *env,
Michael Wangb9403132012-07-12 16:10:13 +08005189 struct sched_group *group)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005190{
5191 struct rq *busiest = NULL, *rq;
Joonsoo Kim95a79b82013-08-06 17:36:41 +09005192 unsigned long busiest_load = 0, busiest_power = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005193 int i;
5194
Peter Zijlstra6906a402013-08-19 15:20:21 +02005195 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005196 unsigned long power = power_of(i);
Nikhil Rao1399fa72011-05-18 10:09:39 -07005197 unsigned long capacity = DIV_ROUND_CLOSEST(power,
5198 SCHED_POWER_SCALE);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005199 unsigned long wl;
5200
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005201 if (!capacity)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005202 capacity = fix_small_capacity(env->sd, group);
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005203
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005204 rq = cpu_rq(i);
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01005205 wl = weighted_cpuload(i);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005206
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01005207 /*
5208 * When comparing with imbalance, use weighted_cpuload()
5209 * which is not scaled with the cpu power.
5210 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005211 if (capacity && rq->nr_running == 1 && wl > env->imbalance)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005212 continue;
5213
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01005214 /*
5215 * For the load comparisons with the other cpu's, consider
5216 * the weighted_cpuload() scaled with the cpu power, so that
5217 * the load can be moved away from the cpu that is potentially
5218 * running at a lower capacity.
Joonsoo Kim95a79b82013-08-06 17:36:41 +09005219 *
5220 * Thus we're looking for max(wl_i / power_i), crosswise
5221 * multiplication to rid ourselves of the division works out
5222 * to: wl_i * power_j > wl_j * power_i; where j is our
5223 * previous maximum.
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01005224 */
Joonsoo Kim95a79b82013-08-06 17:36:41 +09005225 if (wl * busiest_power > busiest_load * power) {
5226 busiest_load = wl;
5227 busiest_power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005228 busiest = rq;
5229 }
5230 }
5231
5232 return busiest;
5233}
5234
5235/*
5236 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
5237 * so long as it is large enough.
5238 */
5239#define MAX_PINNED_INTERVAL 512
5240
5241/* Working cpumask for load_balance and load_balance_newidle. */
Joonsoo Kime6252c32013-04-23 17:27:41 +09005242DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005243
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005244static int need_active_balance(struct lb_env *env)
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01005245{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005246 struct sched_domain *sd = env->sd;
5247
5248 if (env->idle == CPU_NEWLY_IDLE) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10005249
5250 /*
5251 * ASYM_PACKING needs to force migrate tasks from busy but
5252 * higher numbered CPUs in order to pack all tasks in the
5253 * lowest numbered CPUs.
5254 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005255 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005256 return 1;
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01005257 }
5258
5259 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
5260}
5261
Tejun Heo969c7922010-05-06 18:49:21 +02005262static int active_load_balance_cpu_stop(void *data);
5263
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005264static int should_we_balance(struct lb_env *env)
5265{
5266 struct sched_group *sg = env->sd->groups;
5267 struct cpumask *sg_cpus, *sg_mask;
5268 int cpu, balance_cpu = -1;
5269
5270 /*
5271 * In the newly idle case, we will allow all the cpu's
5272 * to do the newly idle load balance.
5273 */
5274 if (env->idle == CPU_NEWLY_IDLE)
5275 return 1;
5276
5277 sg_cpus = sched_group_cpus(sg);
5278 sg_mask = sched_group_mask(sg);
5279 /* Try to find first idle cpu */
5280 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
5281 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
5282 continue;
5283
5284 balance_cpu = cpu;
5285 break;
5286 }
5287
5288 if (balance_cpu == -1)
5289 balance_cpu = group_balance_cpu(sg);
5290
5291 /*
5292 * First idle cpu or the first cpu(busiest) in this sched group
5293 * is eligible for doing load balancing at this and above domains.
5294 */
Joonsoo Kimb0cff9d2013-09-10 15:54:49 +09005295 return balance_cpu == env->dst_cpu;
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005296}
5297
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005298/*
5299 * Check this_cpu to ensure it is balanced within domain. Attempt to move
5300 * tasks if there is an imbalance.
5301 */
5302static int load_balance(int this_cpu, struct rq *this_rq,
5303 struct sched_domain *sd, enum cpu_idle_type idle,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005304 int *continue_balancing)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005305{
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305306 int ld_moved, cur_ld_moved, active_balance = 0;
Peter Zijlstra62633222013-08-19 12:41:09 +02005307 struct sched_domain *sd_parent = sd->parent;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005308 struct sched_group *group;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005309 struct rq *busiest;
5310 unsigned long flags;
Joonsoo Kime6252c32013-04-23 17:27:41 +09005311 struct cpumask *cpus = __get_cpu_var(load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005312
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005313 struct lb_env env = {
5314 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005315 .dst_cpu = this_cpu,
5316 .dst_rq = this_rq,
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305317 .dst_grpmask = sched_group_cpus(sd->groups),
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005318 .idle = idle,
Peter Zijlstraeb953082012-04-17 13:38:40 +02005319 .loop_break = sched_nr_migrate_break,
Michael Wangb9403132012-07-12 16:10:13 +08005320 .cpus = cpus,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005321 };
5322
Joonsoo Kimcfc03112013-04-23 17:27:39 +09005323 /*
5324 * For NEWLY_IDLE load_balancing, we don't need to consider
5325 * other cpus in our group
5326 */
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005327 if (idle == CPU_NEWLY_IDLE)
Joonsoo Kimcfc03112013-04-23 17:27:39 +09005328 env.dst_grpmask = NULL;
Joonsoo Kimcfc03112013-04-23 17:27:39 +09005329
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005330 cpumask_copy(cpus, cpu_active_mask);
5331
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005332 schedstat_inc(sd, lb_count[idle]);
5333
5334redo:
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005335 if (!should_we_balance(&env)) {
5336 *continue_balancing = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005337 goto out_balanced;
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005338 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005339
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005340 group = find_busiest_group(&env);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005341 if (!group) {
5342 schedstat_inc(sd, lb_nobusyg[idle]);
5343 goto out_balanced;
5344 }
5345
Michael Wangb9403132012-07-12 16:10:13 +08005346 busiest = find_busiest_queue(&env, group);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005347 if (!busiest) {
5348 schedstat_inc(sd, lb_nobusyq[idle]);
5349 goto out_balanced;
5350 }
5351
Michael Wang78feefc2012-08-06 16:41:59 +08005352 BUG_ON(busiest == env.dst_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005353
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005354 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005355
5356 ld_moved = 0;
5357 if (busiest->nr_running > 1) {
5358 /*
5359 * Attempt to move tasks. If find_busiest_group has found
5360 * an imbalance but busiest->nr_running <= 1, the group is
5361 * still unbalanced. ld_moved simply stays zero, so it is
5362 * correctly treated as an imbalance.
5363 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005364 env.flags |= LBF_ALL_PINNED;
Peter Zijlstrac82513e2012-04-26 13:12:27 +02005365 env.src_cpu = busiest->cpu;
5366 env.src_rq = busiest;
5367 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005368
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005369more_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005370 local_irq_save(flags);
Michael Wang78feefc2012-08-06 16:41:59 +08005371 double_rq_lock(env.dst_rq, busiest);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305372
5373 /*
5374 * cur_ld_moved - load moved in current iteration
5375 * ld_moved - cumulative load moved across iterations
5376 */
5377 cur_ld_moved = move_tasks(&env);
5378 ld_moved += cur_ld_moved;
Michael Wang78feefc2012-08-06 16:41:59 +08005379 double_rq_unlock(env.dst_rq, busiest);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005380 local_irq_restore(flags);
5381
5382 /*
5383 * some other cpu did the load balance for us.
5384 */
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305385 if (cur_ld_moved && env.dst_cpu != smp_processor_id())
5386 resched_cpu(env.dst_cpu);
5387
Joonsoo Kimf1cd0852013-04-23 17:27:37 +09005388 if (env.flags & LBF_NEED_BREAK) {
5389 env.flags &= ~LBF_NEED_BREAK;
5390 goto more_balance;
5391 }
5392
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305393 /*
5394 * Revisit (affine) tasks on src_cpu that couldn't be moved to
5395 * us and move them to an alternate dst_cpu in our sched_group
5396 * where they can run. The upper limit on how many times we
5397 * iterate on same src_cpu is dependent on number of cpus in our
5398 * sched_group.
5399 *
5400 * This changes load balance semantics a bit on who can move
5401 * load to a given_cpu. In addition to the given_cpu itself
5402 * (or a ilb_cpu acting on its behalf where given_cpu is
5403 * nohz-idle), we now have balance_cpu in a position to move
5404 * load to given_cpu. In rare situations, this may cause
5405 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
5406 * _independently_ and at _same_ time to move some load to
5407 * given_cpu) causing exceess load to be moved to given_cpu.
5408 * This however should not happen so much in practice and
5409 * moreover subsequent load balance cycles should correct the
5410 * excess load moved.
5411 */
Peter Zijlstra62633222013-08-19 12:41:09 +02005412 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305413
Vladimir Davydov7aff2e32013-09-15 21:30:13 +04005414 /* Prevent to re-select dst_cpu via env's cpus */
5415 cpumask_clear_cpu(env.dst_cpu, env.cpus);
5416
Michael Wang78feefc2012-08-06 16:41:59 +08005417 env.dst_rq = cpu_rq(env.new_dst_cpu);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305418 env.dst_cpu = env.new_dst_cpu;
Peter Zijlstra62633222013-08-19 12:41:09 +02005419 env.flags &= ~LBF_DST_PINNED;
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305420 env.loop = 0;
5421 env.loop_break = sched_nr_migrate_break;
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005422
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305423 /*
5424 * Go back to "more_balance" rather than "redo" since we
5425 * need to continue with same src_cpu.
5426 */
5427 goto more_balance;
5428 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005429
Peter Zijlstra62633222013-08-19 12:41:09 +02005430 /*
5431 * We failed to reach balance because of affinity.
5432 */
5433 if (sd_parent) {
5434 int *group_imbalance = &sd_parent->groups->sgp->imbalance;
5435
5436 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
5437 *group_imbalance = 1;
5438 } else if (*group_imbalance)
5439 *group_imbalance = 0;
5440 }
5441
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005442 /* All tasks on this runqueue were pinned by CPU affinity */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005443 if (unlikely(env.flags & LBF_ALL_PINNED)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005444 cpumask_clear_cpu(cpu_of(busiest), cpus);
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05305445 if (!cpumask_empty(cpus)) {
5446 env.loop = 0;
5447 env.loop_break = sched_nr_migrate_break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005448 goto redo;
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05305449 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005450 goto out_balanced;
5451 }
5452 }
5453
5454 if (!ld_moved) {
5455 schedstat_inc(sd, lb_failed[idle]);
Venkatesh Pallipadi58b26c42010-09-10 18:19:17 -07005456 /*
5457 * Increment the failure counter only on periodic balance.
5458 * We do not want newidle balance, which can be very
5459 * frequent, pollute the failure counter causing
5460 * excessive cache_hot migrations and active balances.
5461 */
5462 if (idle != CPU_NEWLY_IDLE)
5463 sd->nr_balance_failed++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005464
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005465 if (need_active_balance(&env)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005466 raw_spin_lock_irqsave(&busiest->lock, flags);
5467
Tejun Heo969c7922010-05-06 18:49:21 +02005468 /* don't kick the active_load_balance_cpu_stop,
5469 * if the curr task on busiest cpu can't be
5470 * moved to this_cpu
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005471 */
5472 if (!cpumask_test_cpu(this_cpu,
Peter Zijlstrafa17b502011-06-16 12:23:22 +02005473 tsk_cpus_allowed(busiest->curr))) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005474 raw_spin_unlock_irqrestore(&busiest->lock,
5475 flags);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005476 env.flags |= LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005477 goto out_one_pinned;
5478 }
5479
Tejun Heo969c7922010-05-06 18:49:21 +02005480 /*
5481 * ->active_balance synchronizes accesses to
5482 * ->active_balance_work. Once set, it's cleared
5483 * only after active load balance is finished.
5484 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005485 if (!busiest->active_balance) {
5486 busiest->active_balance = 1;
5487 busiest->push_cpu = this_cpu;
5488 active_balance = 1;
5489 }
5490 raw_spin_unlock_irqrestore(&busiest->lock, flags);
Tejun Heo969c7922010-05-06 18:49:21 +02005491
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005492 if (active_balance) {
Tejun Heo969c7922010-05-06 18:49:21 +02005493 stop_one_cpu_nowait(cpu_of(busiest),
5494 active_load_balance_cpu_stop, busiest,
5495 &busiest->active_balance_work);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005496 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005497
5498 /*
5499 * We've kicked active balancing, reset the failure
5500 * counter.
5501 */
5502 sd->nr_balance_failed = sd->cache_nice_tries+1;
5503 }
5504 } else
5505 sd->nr_balance_failed = 0;
5506
5507 if (likely(!active_balance)) {
5508 /* We were unbalanced, so reset the balancing interval */
5509 sd->balance_interval = sd->min_interval;
5510 } else {
5511 /*
5512 * If we've begun active balancing, start to back off. This
5513 * case may not be covered by the all_pinned logic if there
5514 * is only 1 task on the busy runqueue (because we don't call
5515 * move_tasks).
5516 */
5517 if (sd->balance_interval < sd->max_interval)
5518 sd->balance_interval *= 2;
5519 }
5520
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005521 goto out;
5522
5523out_balanced:
5524 schedstat_inc(sd, lb_balanced[idle]);
5525
5526 sd->nr_balance_failed = 0;
5527
5528out_one_pinned:
5529 /* tune up the balancing interval */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005530 if (((env.flags & LBF_ALL_PINNED) &&
Peter Zijlstra5b54b562011-09-22 15:23:13 +02005531 sd->balance_interval < MAX_PINNED_INTERVAL) ||
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005532 (sd->balance_interval < sd->max_interval))
5533 sd->balance_interval *= 2;
5534
Venkatesh Pallipadi46e49b32011-02-14 14:38:50 -08005535 ld_moved = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005536out:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005537 return ld_moved;
5538}
5539
5540/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005541 * idle_balance is called by schedule() if this_cpu is about to become
5542 * idle. Attempts to pull tasks from other CPUs.
5543 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02005544void idle_balance(int this_cpu, struct rq *this_rq)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005545{
5546 struct sched_domain *sd;
5547 int pulled_task = 0;
5548 unsigned long next_balance = jiffies + HZ;
Jason Low9bd721c2013-09-13 11:26:52 -07005549 u64 curr_cost = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005550
Frederic Weisbecker78becc22013-04-12 01:51:02 +02005551 this_rq->idle_stamp = rq_clock(this_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005552
5553 if (this_rq->avg_idle < sysctl_sched_migration_cost)
5554 return;
5555
Peter Zijlstraf492e122009-12-23 15:29:42 +01005556 /*
5557 * Drop the rq->lock, but keep IRQ/preempt disabled.
5558 */
5559 raw_spin_unlock(&this_rq->lock);
5560
Paul Turner48a16752012-10-04 13:18:31 +02005561 update_blocked_averages(this_cpu);
Peter Zijlstradce840a2011-04-07 14:09:50 +02005562 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005563 for_each_domain(this_cpu, sd) {
5564 unsigned long interval;
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005565 int continue_balancing = 1;
Jason Low9bd721c2013-09-13 11:26:52 -07005566 u64 t0, domain_cost;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005567
5568 if (!(sd->flags & SD_LOAD_BALANCE))
5569 continue;
5570
Jason Low9bd721c2013-09-13 11:26:52 -07005571 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
5572 break;
5573
Peter Zijlstraf492e122009-12-23 15:29:42 +01005574 if (sd->flags & SD_BALANCE_NEWIDLE) {
Jason Low9bd721c2013-09-13 11:26:52 -07005575 t0 = sched_clock_cpu(this_cpu);
5576
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005577 /* If we've pulled tasks over stop searching: */
Peter Zijlstraf492e122009-12-23 15:29:42 +01005578 pulled_task = load_balance(this_cpu, this_rq,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005579 sd, CPU_NEWLY_IDLE,
5580 &continue_balancing);
Jason Low9bd721c2013-09-13 11:26:52 -07005581
5582 domain_cost = sched_clock_cpu(this_cpu) - t0;
5583 if (domain_cost > sd->max_newidle_lb_cost)
5584 sd->max_newidle_lb_cost = domain_cost;
5585
5586 curr_cost += domain_cost;
Peter Zijlstraf492e122009-12-23 15:29:42 +01005587 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005588
5589 interval = msecs_to_jiffies(sd->balance_interval);
5590 if (time_after(next_balance, sd->last_balance + interval))
5591 next_balance = sd->last_balance + interval;
Nikhil Raod5ad1402010-11-17 11:42:04 -08005592 if (pulled_task) {
5593 this_rq->idle_stamp = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005594 break;
Nikhil Raod5ad1402010-11-17 11:42:04 -08005595 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005596 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005597 rcu_read_unlock();
Peter Zijlstraf492e122009-12-23 15:29:42 +01005598
5599 raw_spin_lock(&this_rq->lock);
5600
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005601 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
5602 /*
5603 * We are going idle. next_balance may be set based on
5604 * a busy processor. So reset next_balance.
5605 */
5606 this_rq->next_balance = next_balance;
5607 }
Jason Low9bd721c2013-09-13 11:26:52 -07005608
5609 if (curr_cost > this_rq->max_idle_balance_cost)
5610 this_rq->max_idle_balance_cost = curr_cost;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005611}
5612
5613/*
Tejun Heo969c7922010-05-06 18:49:21 +02005614 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
5615 * running tasks off the busiest CPU onto idle CPUs. It requires at
5616 * least 1 task to be running on each physical CPU where possible, and
5617 * avoids physical / logical imbalances.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005618 */
Tejun Heo969c7922010-05-06 18:49:21 +02005619static int active_load_balance_cpu_stop(void *data)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005620{
Tejun Heo969c7922010-05-06 18:49:21 +02005621 struct rq *busiest_rq = data;
5622 int busiest_cpu = cpu_of(busiest_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005623 int target_cpu = busiest_rq->push_cpu;
Tejun Heo969c7922010-05-06 18:49:21 +02005624 struct rq *target_rq = cpu_rq(target_cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005625 struct sched_domain *sd;
Tejun Heo969c7922010-05-06 18:49:21 +02005626
5627 raw_spin_lock_irq(&busiest_rq->lock);
5628
5629 /* make sure the requested cpu hasn't gone down in the meantime */
5630 if (unlikely(busiest_cpu != smp_processor_id() ||
5631 !busiest_rq->active_balance))
5632 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005633
5634 /* Is there any task to move? */
5635 if (busiest_rq->nr_running <= 1)
Tejun Heo969c7922010-05-06 18:49:21 +02005636 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005637
5638 /*
5639 * This condition is "impossible", if it occurs
5640 * we need to fix it. Originally reported by
5641 * Bjorn Helgaas on a 128-cpu setup.
5642 */
5643 BUG_ON(busiest_rq == target_rq);
5644
5645 /* move a task from busiest_rq to target_rq */
5646 double_lock_balance(busiest_rq, target_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005647
5648 /* Search for an sd spanning us and the target CPU. */
Peter Zijlstradce840a2011-04-07 14:09:50 +02005649 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005650 for_each_domain(target_cpu, sd) {
5651 if ((sd->flags & SD_LOAD_BALANCE) &&
5652 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
5653 break;
5654 }
5655
5656 if (likely(sd)) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005657 struct lb_env env = {
5658 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005659 .dst_cpu = target_cpu,
5660 .dst_rq = target_rq,
5661 .src_cpu = busiest_rq->cpu,
5662 .src_rq = busiest_rq,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005663 .idle = CPU_IDLE,
5664 };
5665
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005666 schedstat_inc(sd, alb_count);
5667
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005668 if (move_one_task(&env))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005669 schedstat_inc(sd, alb_pushed);
5670 else
5671 schedstat_inc(sd, alb_failed);
5672 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005673 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005674 double_unlock_balance(busiest_rq, target_rq);
Tejun Heo969c7922010-05-06 18:49:21 +02005675out_unlock:
5676 busiest_rq->active_balance = 0;
5677 raw_spin_unlock_irq(&busiest_rq->lock);
5678 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005679}
5680
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005681#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005682/*
5683 * idle load balancing details
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005684 * - When one of the busy CPUs notice that there may be an idle rebalancing
5685 * needed, they will kick the idle load balancer, which then does idle
5686 * load balancing for all the idle CPUs.
5687 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005688static struct {
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005689 cpumask_var_t idle_cpus_mask;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005690 atomic_t nr_cpus;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005691 unsigned long next_balance; /* in jiffy units */
5692} nohz ____cacheline_aligned;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005693
Peter Zijlstra8e7fbcb2012-01-09 11:28:35 +01005694static inline int find_new_ilb(int call_cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005695{
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005696 int ilb = cpumask_first(nohz.idle_cpus_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005697
Suresh Siddha786d6dc2011-12-01 17:07:35 -08005698 if (ilb < nr_cpu_ids && idle_cpu(ilb))
5699 return ilb;
5700
5701 return nr_cpu_ids;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005702}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005703
5704/*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005705 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
5706 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
5707 * CPU (if there is one).
5708 */
5709static void nohz_balancer_kick(int cpu)
5710{
5711 int ilb_cpu;
5712
5713 nohz.next_balance++;
5714
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005715 ilb_cpu = find_new_ilb(cpu);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005716
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005717 if (ilb_cpu >= nr_cpu_ids)
5718 return;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005719
Suresh Siddhacd490c52011-12-06 11:26:34 -08005720 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
Suresh Siddha1c792db2011-12-01 17:07:32 -08005721 return;
5722 /*
5723 * Use smp_send_reschedule() instead of resched_cpu().
5724 * This way we generate a sched IPI on the target cpu which
5725 * is idle. And the softirq performing nohz idle load balance
5726 * will be run before returning from the IPI.
5727 */
5728 smp_send_reschedule(ilb_cpu);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005729 return;
5730}
5731
Alex Shic1cc0172012-09-10 15:10:58 +08005732static inline void nohz_balance_exit_idle(int cpu)
Suresh Siddha71325962012-01-19 18:28:57 -08005733{
5734 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
5735 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
5736 atomic_dec(&nohz.nr_cpus);
5737 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
5738 }
5739}
5740
Suresh Siddha69e1e812011-12-01 17:07:33 -08005741static inline void set_cpu_sd_state_busy(void)
5742{
5743 struct sched_domain *sd;
Suresh Siddha69e1e812011-12-01 17:07:33 -08005744
Suresh Siddha69e1e812011-12-01 17:07:33 -08005745 rcu_read_lock();
Nathan Zimmer424c93f2013-05-09 11:24:03 -05005746 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005747
5748 if (!sd || !sd->nohz_idle)
5749 goto unlock;
5750 sd->nohz_idle = 0;
5751
5752 for (; sd; sd = sd->parent)
Suresh Siddha69e1e812011-12-01 17:07:33 -08005753 atomic_inc(&sd->groups->sgp->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005754unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08005755 rcu_read_unlock();
5756}
5757
5758void set_cpu_sd_state_idle(void)
5759{
5760 struct sched_domain *sd;
Suresh Siddha69e1e812011-12-01 17:07:33 -08005761
Suresh Siddha69e1e812011-12-01 17:07:33 -08005762 rcu_read_lock();
Nathan Zimmer424c93f2013-05-09 11:24:03 -05005763 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005764
5765 if (!sd || sd->nohz_idle)
5766 goto unlock;
5767 sd->nohz_idle = 1;
5768
5769 for (; sd; sd = sd->parent)
Suresh Siddha69e1e812011-12-01 17:07:33 -08005770 atomic_dec(&sd->groups->sgp->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005771unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08005772 rcu_read_unlock();
5773}
5774
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005775/*
Alex Shic1cc0172012-09-10 15:10:58 +08005776 * This routine will record that the cpu is going idle with tick stopped.
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005777 * This info will be used in performing idle load balancing in the future.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005778 */
Alex Shic1cc0172012-09-10 15:10:58 +08005779void nohz_balance_enter_idle(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005780{
Suresh Siddha71325962012-01-19 18:28:57 -08005781 /*
5782 * If this cpu is going down, then nothing needs to be done.
5783 */
5784 if (!cpu_active(cpu))
5785 return;
5786
Alex Shic1cc0172012-09-10 15:10:58 +08005787 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
5788 return;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005789
Alex Shic1cc0172012-09-10 15:10:58 +08005790 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
5791 atomic_inc(&nohz.nr_cpus);
5792 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005793}
Suresh Siddha71325962012-01-19 18:28:57 -08005794
Paul Gortmaker0db06282013-06-19 14:53:51 -04005795static int sched_ilb_notifier(struct notifier_block *nfb,
Suresh Siddha71325962012-01-19 18:28:57 -08005796 unsigned long action, void *hcpu)
5797{
5798 switch (action & ~CPU_TASKS_FROZEN) {
5799 case CPU_DYING:
Alex Shic1cc0172012-09-10 15:10:58 +08005800 nohz_balance_exit_idle(smp_processor_id());
Suresh Siddha71325962012-01-19 18:28:57 -08005801 return NOTIFY_OK;
5802 default:
5803 return NOTIFY_DONE;
5804 }
5805}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005806#endif
5807
5808static DEFINE_SPINLOCK(balancing);
5809
Peter Zijlstra49c022e2011-04-05 10:14:25 +02005810/*
5811 * Scale the max load_balance interval with the number of CPUs in the system.
5812 * This trades load-balance latency on larger machines for less cross talk.
5813 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02005814void update_max_interval(void)
Peter Zijlstra49c022e2011-04-05 10:14:25 +02005815{
5816 max_load_balance_interval = HZ*num_online_cpus()/10;
5817}
5818
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005819/*
5820 * It checks each scheduling domain to see if it is due to be balanced,
5821 * and initiates a balancing operation if so.
5822 *
Libinb9b08532013-04-01 19:14:01 +08005823 * Balancing parameters are set up in init_sched_domains.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005824 */
5825static void rebalance_domains(int cpu, enum cpu_idle_type idle)
5826{
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005827 int continue_balancing = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005828 struct rq *rq = cpu_rq(cpu);
5829 unsigned long interval;
Peter Zijlstra04f733b2012-05-11 00:12:02 +02005830 struct sched_domain *sd;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005831 /* Earliest time when we have to do rebalance again */
5832 unsigned long next_balance = jiffies + 60*HZ;
5833 int update_next_balance = 0;
Jason Lowf48627e2013-09-13 11:26:53 -07005834 int need_serialize, need_decay = 0;
5835 u64 max_cost = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005836
Paul Turner48a16752012-10-04 13:18:31 +02005837 update_blocked_averages(cpu);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08005838
Peter Zijlstradce840a2011-04-07 14:09:50 +02005839 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005840 for_each_domain(cpu, sd) {
Jason Lowf48627e2013-09-13 11:26:53 -07005841 /*
5842 * Decay the newidle max times here because this is a regular
5843 * visit to all the domains. Decay ~1% per second.
5844 */
5845 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
5846 sd->max_newidle_lb_cost =
5847 (sd->max_newidle_lb_cost * 253) / 256;
5848 sd->next_decay_max_lb_cost = jiffies + HZ;
5849 need_decay = 1;
5850 }
5851 max_cost += sd->max_newidle_lb_cost;
5852
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005853 if (!(sd->flags & SD_LOAD_BALANCE))
5854 continue;
5855
Jason Lowf48627e2013-09-13 11:26:53 -07005856 /*
5857 * Stop the load balance at this level. There is another
5858 * CPU in our sched group which is doing load balancing more
5859 * actively.
5860 */
5861 if (!continue_balancing) {
5862 if (need_decay)
5863 continue;
5864 break;
5865 }
5866
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005867 interval = sd->balance_interval;
5868 if (idle != CPU_IDLE)
5869 interval *= sd->busy_factor;
5870
5871 /* scale ms to jiffies */
5872 interval = msecs_to_jiffies(interval);
Peter Zijlstra49c022e2011-04-05 10:14:25 +02005873 interval = clamp(interval, 1UL, max_load_balance_interval);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005874
5875 need_serialize = sd->flags & SD_SERIALIZE;
5876
5877 if (need_serialize) {
5878 if (!spin_trylock(&balancing))
5879 goto out;
5880 }
5881
5882 if (time_after_eq(jiffies, sd->last_balance + interval)) {
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005883 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005884 /*
Peter Zijlstra62633222013-08-19 12:41:09 +02005885 * The LBF_DST_PINNED logic could have changed
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09005886 * env->dst_cpu, so we can't know our idle
5887 * state even if we migrated tasks. Update it.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005888 */
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09005889 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005890 }
5891 sd->last_balance = jiffies;
5892 }
5893 if (need_serialize)
5894 spin_unlock(&balancing);
5895out:
5896 if (time_after(next_balance, sd->last_balance + interval)) {
5897 next_balance = sd->last_balance + interval;
5898 update_next_balance = 1;
5899 }
Jason Lowf48627e2013-09-13 11:26:53 -07005900 }
5901 if (need_decay) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005902 /*
Jason Lowf48627e2013-09-13 11:26:53 -07005903 * Ensure the rq-wide value also decays but keep it at a
5904 * reasonable floor to avoid funnies with rq->avg_idle.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005905 */
Jason Lowf48627e2013-09-13 11:26:53 -07005906 rq->max_idle_balance_cost =
5907 max((u64)sysctl_sched_migration_cost, max_cost);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005908 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005909 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005910
5911 /*
5912 * next_balance will be updated only when there is a need.
5913 * When the cpu is attached to null domain for ex, it will not be
5914 * updated.
5915 */
5916 if (likely(update_next_balance))
5917 rq->next_balance = next_balance;
5918}
5919
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005920#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005921/*
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005922 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005923 * rebalancing for all the cpus for whom scheduler ticks are stopped.
5924 */
5925static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
5926{
5927 struct rq *this_rq = cpu_rq(this_cpu);
5928 struct rq *rq;
5929 int balance_cpu;
5930
Suresh Siddha1c792db2011-12-01 17:07:32 -08005931 if (idle != CPU_IDLE ||
5932 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
5933 goto end;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005934
5935 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
Suresh Siddha8a6d42d2011-12-06 11:19:37 -08005936 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005937 continue;
5938
5939 /*
5940 * If this cpu gets work to do, stop the load balancing
5941 * work being done for other cpus. Next load
5942 * balancing owner will pick it up.
5943 */
Suresh Siddha1c792db2011-12-01 17:07:32 -08005944 if (need_resched())
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005945 break;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005946
Vincent Guittot5ed4f1d2012-09-13 06:11:26 +02005947 rq = cpu_rq(balance_cpu);
5948
5949 raw_spin_lock_irq(&rq->lock);
5950 update_rq_clock(rq);
5951 update_idle_cpu_load(rq);
5952 raw_spin_unlock_irq(&rq->lock);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005953
5954 rebalance_domains(balance_cpu, CPU_IDLE);
5955
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005956 if (time_after(this_rq->next_balance, rq->next_balance))
5957 this_rq->next_balance = rq->next_balance;
5958 }
5959 nohz.next_balance = this_rq->next_balance;
Suresh Siddha1c792db2011-12-01 17:07:32 -08005960end:
5961 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005962}
5963
5964/*
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005965 * Current heuristic for kicking the idle load balancer in the presence
5966 * of an idle cpu is the system.
5967 * - This rq has more than one task.
5968 * - At any scheduler domain level, this cpu's scheduler group has multiple
5969 * busy cpu's exceeding the group's power.
5970 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
5971 * domain span are idle.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005972 */
5973static inline int nohz_kick_needed(struct rq *rq, int cpu)
5974{
5975 unsigned long now = jiffies;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005976 struct sched_domain *sd;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005977
Suresh Siddha1c792db2011-12-01 17:07:32 -08005978 if (unlikely(idle_cpu(cpu)))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005979 return 0;
5980
Suresh Siddha1c792db2011-12-01 17:07:32 -08005981 /*
5982 * We may be recently in ticked or tickless idle mode. At the first
5983 * busy tick after returning from idle, we will update the busy stats.
5984 */
Suresh Siddha69e1e812011-12-01 17:07:33 -08005985 set_cpu_sd_state_busy();
Alex Shic1cc0172012-09-10 15:10:58 +08005986 nohz_balance_exit_idle(cpu);
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005987
5988 /*
5989 * None are in tickless mode and hence no need for NOHZ idle load
5990 * balancing.
5991 */
5992 if (likely(!atomic_read(&nohz.nr_cpus)))
5993 return 0;
Suresh Siddha1c792db2011-12-01 17:07:32 -08005994
5995 if (time_before(now, nohz.next_balance))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005996 return 0;
5997
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005998 if (rq->nr_running >= 2)
5999 goto need_kick;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006000
Peter Zijlstra067491b2011-12-07 14:32:08 +01006001 rcu_read_lock();
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006002 for_each_domain(cpu, sd) {
6003 struct sched_group *sg = sd->groups;
6004 struct sched_group_power *sgp = sg->sgp;
6005 int nr_busy = atomic_read(&sgp->nr_busy_cpus);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006006
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006007 if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
Peter Zijlstra067491b2011-12-07 14:32:08 +01006008 goto need_kick_unlock;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006009
6010 if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
6011 && (cpumask_first_and(nohz.idle_cpus_mask,
6012 sched_domain_span(sd)) < cpu))
Peter Zijlstra067491b2011-12-07 14:32:08 +01006013 goto need_kick_unlock;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006014
6015 if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
6016 break;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006017 }
Peter Zijlstra067491b2011-12-07 14:32:08 +01006018 rcu_read_unlock();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006019 return 0;
Peter Zijlstra067491b2011-12-07 14:32:08 +01006020
6021need_kick_unlock:
6022 rcu_read_unlock();
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006023need_kick:
6024 return 1;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006025}
6026#else
6027static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
6028#endif
6029
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006030/*
6031 * run_rebalance_domains is triggered when needed from the scheduler tick.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006032 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006033 */
6034static void run_rebalance_domains(struct softirq_action *h)
6035{
6036 int this_cpu = smp_processor_id();
6037 struct rq *this_rq = cpu_rq(this_cpu);
Suresh Siddha6eb57e02011-10-03 15:09:01 -07006038 enum cpu_idle_type idle = this_rq->idle_balance ?
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006039 CPU_IDLE : CPU_NOT_IDLE;
6040
6041 rebalance_domains(this_cpu, idle);
6042
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006043 /*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006044 * If this cpu has a pending nohz_balance_kick, then do the
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006045 * balancing on behalf of the other idle cpus whose ticks are
6046 * stopped.
6047 */
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006048 nohz_idle_balance(this_cpu, idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006049}
6050
6051static inline int on_null_domain(int cpu)
6052{
Paul E. McKenney90a65012010-02-28 08:32:18 -08006053 return !rcu_dereference_sched(cpu_rq(cpu)->sd);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006054}
6055
6056/*
6057 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006058 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02006059void trigger_load_balance(struct rq *rq, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006060{
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006061 /* Don't need to rebalance while attached to NULL domain */
6062 if (time_after_eq(jiffies, rq->next_balance) &&
6063 likely(!on_null_domain(cpu)))
6064 raise_softirq(SCHED_SOFTIRQ);
Frederic Weisbecker3451d022011-08-10 23:21:01 +02006065#ifdef CONFIG_NO_HZ_COMMON
Suresh Siddha1c792db2011-12-01 17:07:32 -08006066 if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006067 nohz_balancer_kick(cpu);
6068#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006069}
6070
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01006071static void rq_online_fair(struct rq *rq)
6072{
6073 update_sysctl();
6074}
6075
6076static void rq_offline_fair(struct rq *rq)
6077{
6078 update_sysctl();
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07006079
6080 /* Ensure any throttled groups are reachable by pick_next_task */
6081 unthrottle_offline_cfs_rqs(rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01006082}
6083
Dhaval Giani55e12e52008-06-24 23:39:43 +05306084#endif /* CONFIG_SMP */
Peter Williamse1d14842007-10-24 18:23:51 +02006085
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006086/*
6087 * scheduler tick hitting a task of our scheduling class:
6088 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01006089static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006090{
6091 struct cfs_rq *cfs_rq;
6092 struct sched_entity *se = &curr->se;
6093
6094 for_each_sched_entity(se) {
6095 cfs_rq = cfs_rq_of(se);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01006096 entity_tick(cfs_rq, se, queued);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006097 }
Ben Segall18bf2802012-10-04 12:51:20 +02006098
Dave Kleikamp10e84b92013-07-31 13:53:35 -07006099 if (numabalancing_enabled)
Peter Zijlstracbee9f82012-10-25 14:16:43 +02006100 task_tick_numa(rq, curr);
Linus Torvalds3d59eeb2012-12-16 14:33:25 -08006101
Ben Segall18bf2802012-10-04 12:51:20 +02006102 update_rq_runnable_avg(rq, 1);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006103}
6104
6105/*
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006106 * called on fork with the child task as argument from the parent's context
6107 * - child not yet on the tasklist
6108 * - preemption disabled
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006109 */
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006110static void task_fork_fair(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006111{
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09006112 struct cfs_rq *cfs_rq;
6113 struct sched_entity *se = &p->se, *curr;
Ingo Molnar00bf7bf2007-10-15 17:00:14 +02006114 int this_cpu = smp_processor_id();
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006115 struct rq *rq = this_rq();
6116 unsigned long flags;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006117
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006118 raw_spin_lock_irqsave(&rq->lock, flags);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006119
Peter Zijlstra861d0342010-08-19 13:31:43 +02006120 update_rq_clock(rq);
6121
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09006122 cfs_rq = task_cfs_rq(current);
6123 curr = cfs_rq->curr;
6124
Daisuke Nishimura6c9a27f2013-09-10 18:16:36 +09006125 /*
6126 * Not only the cpu but also the task_group of the parent might have
6127 * been changed after parent->se.parent,cfs_rq were copied to
6128 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
6129 * of child point to valid ones.
6130 */
6131 rcu_read_lock();
6132 __set_task_cpu(p, this_cpu);
6133 rcu_read_unlock();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006134
Ting Yang7109c442007-08-28 12:53:24 +02006135 update_curr(cfs_rq);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006136
Mike Galbraithb5d9d732009-09-08 11:12:28 +02006137 if (curr)
6138 se->vruntime = curr->vruntime;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02006139 place_entity(cfs_rq, se, 1);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02006140
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006141 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
Dmitry Adamushko87fefa32007-10-15 17:00:08 +02006142 /*
Ingo Molnaredcb60a2007-10-15 17:00:08 +02006143 * Upon rescheduling, sched_class::put_prev_task() will place
6144 * 'current' within the tree based on its new key value.
6145 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02006146 swap(curr->vruntime, se->vruntime);
Bharata B Raoaec0a512008-08-28 14:42:49 +05306147 resched_task(rq->curr);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02006148 }
6149
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01006150 se->vruntime -= cfs_rq->min_vruntime;
6151
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006152 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006153}
6154
Steven Rostedtcb469842008-01-25 21:08:22 +01006155/*
6156 * Priority of the task has changed. Check to see if we preempt
6157 * the current task.
6158 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006159static void
6160prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
Steven Rostedtcb469842008-01-25 21:08:22 +01006161{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006162 if (!p->se.on_rq)
6163 return;
6164
Steven Rostedtcb469842008-01-25 21:08:22 +01006165 /*
6166 * Reschedule if we are currently running on this runqueue and
6167 * our priority decreased, or if we are not currently running on
6168 * this runqueue and our priority is higher than the current's
6169 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006170 if (rq->curr == p) {
Steven Rostedtcb469842008-01-25 21:08:22 +01006171 if (p->prio > oldprio)
6172 resched_task(rq->curr);
6173 } else
Peter Zijlstra15afe092008-09-20 23:38:02 +02006174 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01006175}
6176
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006177static void switched_from_fair(struct rq *rq, struct task_struct *p)
6178{
6179 struct sched_entity *se = &p->se;
6180 struct cfs_rq *cfs_rq = cfs_rq_of(se);
6181
6182 /*
6183 * Ensure the task's vruntime is normalized, so that when its
6184 * switched back to the fair class the enqueue_entity(.flags=0) will
6185 * do the right thing.
6186 *
6187 * If it was on_rq, then the dequeue_entity(.flags=0) will already
6188 * have normalized the vruntime, if it was !on_rq, then only when
6189 * the task is sleeping will it still have non-normalized vruntime.
6190 */
6191 if (!se->on_rq && p->state != TASK_RUNNING) {
6192 /*
6193 * Fix up our vruntime so that the current sleep doesn't
6194 * cause 'unlimited' sleep bonus.
6195 */
6196 place_entity(cfs_rq, se, 0);
6197 se->vruntime -= cfs_rq->min_vruntime;
6198 }
Paul Turner9ee474f2012-10-04 13:18:30 +02006199
Alex Shi141965c2013-06-26 13:05:39 +08006200#ifdef CONFIG_SMP
Paul Turner9ee474f2012-10-04 13:18:30 +02006201 /*
6202 * Remove our load from contribution when we leave sched_fair
6203 * and ensure we don't carry in an old decay_count if we
6204 * switch back.
6205 */
Kirill Tkhai87e3c8a2013-07-21 04:32:07 +04006206 if (se->avg.decay_count) {
6207 __synchronize_entity_decay(se);
6208 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
Paul Turner9ee474f2012-10-04 13:18:30 +02006209 }
6210#endif
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006211}
6212
Steven Rostedtcb469842008-01-25 21:08:22 +01006213/*
6214 * We switched to the sched_fair class.
6215 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006216static void switched_to_fair(struct rq *rq, struct task_struct *p)
Steven Rostedtcb469842008-01-25 21:08:22 +01006217{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006218 if (!p->se.on_rq)
6219 return;
6220
Steven Rostedtcb469842008-01-25 21:08:22 +01006221 /*
6222 * We were most likely switched from sched_rt, so
6223 * kick off the schedule if running, otherwise just see
6224 * if we can still preempt the current task.
6225 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006226 if (rq->curr == p)
Steven Rostedtcb469842008-01-25 21:08:22 +01006227 resched_task(rq->curr);
6228 else
Peter Zijlstra15afe092008-09-20 23:38:02 +02006229 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01006230}
6231
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02006232/* Account for a task changing its policy or group.
6233 *
6234 * This routine is mostly called to set cfs_rq->curr field when a task
6235 * migrates between groups/classes.
6236 */
6237static void set_curr_task_fair(struct rq *rq)
6238{
6239 struct sched_entity *se = &rq->curr->se;
6240
Paul Turnerec12cb72011-07-21 09:43:30 -07006241 for_each_sched_entity(se) {
6242 struct cfs_rq *cfs_rq = cfs_rq_of(se);
6243
6244 set_next_entity(cfs_rq, se);
6245 /* ensure bandwidth has been allocated on our new cfs_rq */
6246 account_cfs_rq_runtime(cfs_rq, 0);
6247 }
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02006248}
6249
Peter Zijlstra029632f2011-10-25 10:00:11 +02006250void init_cfs_rq(struct cfs_rq *cfs_rq)
6251{
6252 cfs_rq->tasks_timeline = RB_ROOT;
Peter Zijlstra029632f2011-10-25 10:00:11 +02006253 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
6254#ifndef CONFIG_64BIT
6255 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
6256#endif
Alex Shi141965c2013-06-26 13:05:39 +08006257#ifdef CONFIG_SMP
Paul Turner9ee474f2012-10-04 13:18:30 +02006258 atomic64_set(&cfs_rq->decay_counter, 1);
Alex Shi25099402013-06-20 10:18:55 +08006259 atomic_long_set(&cfs_rq->removed_load, 0);
Paul Turner9ee474f2012-10-04 13:18:30 +02006260#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02006261}
6262
Peter Zijlstra810b3812008-02-29 15:21:01 -05006263#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02006264static void task_move_group_fair(struct task_struct *p, int on_rq)
Peter Zijlstra810b3812008-02-29 15:21:01 -05006265{
Paul Turneraff3e492012-10-04 13:18:30 +02006266 struct cfs_rq *cfs_rq;
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02006267 /*
6268 * If the task was not on the rq at the time of this cgroup movement
6269 * it must have been asleep, sleeping tasks keep their ->vruntime
6270 * absolute on their old rq until wakeup (needed for the fair sleeper
6271 * bonus in place_entity()).
6272 *
6273 * If it was on the rq, we've just 'preempted' it, which does convert
6274 * ->vruntime to a relative base.
6275 *
6276 * Make sure both cases convert their relative position when migrating
6277 * to another cgroup's rq. This does somewhat interfere with the
6278 * fair sleeper stuff for the first placement, but who cares.
6279 */
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09006280 /*
6281 * When !on_rq, vruntime of the task has usually NOT been normalized.
6282 * But there are some cases where it has already been normalized:
6283 *
6284 * - Moving a forked child which is waiting for being woken up by
6285 * wake_up_new_task().
Daisuke Nishimura62af3782011-12-15 14:37:41 +09006286 * - Moving a task which has been woken up by try_to_wake_up() and
6287 * waiting for actually being woken up by sched_ttwu_pending().
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09006288 *
6289 * To prevent boost or penalty in the new cfs_rq caused by delta
6290 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
6291 */
Daisuke Nishimura62af3782011-12-15 14:37:41 +09006292 if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09006293 on_rq = 1;
6294
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01006295 if (!on_rq)
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02006296 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
6297 set_task_rq(p, task_cpu(p));
Paul Turneraff3e492012-10-04 13:18:30 +02006298 if (!on_rq) {
6299 cfs_rq = cfs_rq_of(&p->se);
6300 p->se.vruntime += cfs_rq->min_vruntime;
6301#ifdef CONFIG_SMP
6302 /*
6303 * migrate_task_rq_fair() will have removed our previous
6304 * contribution, but we must synchronize for ongoing future
6305 * decay.
6306 */
6307 p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
6308 cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
6309#endif
6310 }
Peter Zijlstra810b3812008-02-29 15:21:01 -05006311}
Peter Zijlstra029632f2011-10-25 10:00:11 +02006312
6313void free_fair_sched_group(struct task_group *tg)
6314{
6315 int i;
6316
6317 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
6318
6319 for_each_possible_cpu(i) {
6320 if (tg->cfs_rq)
6321 kfree(tg->cfs_rq[i]);
6322 if (tg->se)
6323 kfree(tg->se[i]);
6324 }
6325
6326 kfree(tg->cfs_rq);
6327 kfree(tg->se);
6328}
6329
6330int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6331{
6332 struct cfs_rq *cfs_rq;
6333 struct sched_entity *se;
6334 int i;
6335
6336 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
6337 if (!tg->cfs_rq)
6338 goto err;
6339 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
6340 if (!tg->se)
6341 goto err;
6342
6343 tg->shares = NICE_0_LOAD;
6344
6345 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
6346
6347 for_each_possible_cpu(i) {
6348 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
6349 GFP_KERNEL, cpu_to_node(i));
6350 if (!cfs_rq)
6351 goto err;
6352
6353 se = kzalloc_node(sizeof(struct sched_entity),
6354 GFP_KERNEL, cpu_to_node(i));
6355 if (!se)
6356 goto err_free_rq;
6357
6358 init_cfs_rq(cfs_rq);
6359 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
6360 }
6361
6362 return 1;
6363
6364err_free_rq:
6365 kfree(cfs_rq);
6366err:
6367 return 0;
6368}
6369
6370void unregister_fair_sched_group(struct task_group *tg, int cpu)
6371{
6372 struct rq *rq = cpu_rq(cpu);
6373 unsigned long flags;
6374
6375 /*
6376 * Only empty task groups can be destroyed; so we can speculatively
6377 * check on_list without danger of it being re-added.
6378 */
6379 if (!tg->cfs_rq[cpu]->on_list)
6380 return;
6381
6382 raw_spin_lock_irqsave(&rq->lock, flags);
6383 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
6384 raw_spin_unlock_irqrestore(&rq->lock, flags);
6385}
6386
6387void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
6388 struct sched_entity *se, int cpu,
6389 struct sched_entity *parent)
6390{
6391 struct rq *rq = cpu_rq(cpu);
6392
6393 cfs_rq->tg = tg;
6394 cfs_rq->rq = rq;
Peter Zijlstra029632f2011-10-25 10:00:11 +02006395 init_cfs_rq_runtime(cfs_rq);
6396
6397 tg->cfs_rq[cpu] = cfs_rq;
6398 tg->se[cpu] = se;
6399
6400 /* se could be NULL for root_task_group */
6401 if (!se)
6402 return;
6403
6404 if (!parent)
6405 se->cfs_rq = &rq->cfs;
6406 else
6407 se->cfs_rq = parent->my_q;
6408
6409 se->my_q = cfs_rq;
6410 update_load_set(&se->load, 0);
6411 se->parent = parent;
6412}
6413
6414static DEFINE_MUTEX(shares_mutex);
6415
6416int sched_group_set_shares(struct task_group *tg, unsigned long shares)
6417{
6418 int i;
6419 unsigned long flags;
6420
6421 /*
6422 * We can't change the weight of the root cgroup.
6423 */
6424 if (!tg->se[0])
6425 return -EINVAL;
6426
6427 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
6428
6429 mutex_lock(&shares_mutex);
6430 if (tg->shares == shares)
6431 goto done;
6432
6433 tg->shares = shares;
6434 for_each_possible_cpu(i) {
6435 struct rq *rq = cpu_rq(i);
6436 struct sched_entity *se;
6437
6438 se = tg->se[i];
6439 /* Propagate contribution to hierarchy */
6440 raw_spin_lock_irqsave(&rq->lock, flags);
Frederic Weisbecker71b1da42013-04-12 01:50:59 +02006441
6442 /* Possible calls to update_curr() need rq clock */
6443 update_rq_clock(rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08006444 for_each_sched_entity(se)
Peter Zijlstra029632f2011-10-25 10:00:11 +02006445 update_cfs_shares(group_cfs_rq(se));
6446 raw_spin_unlock_irqrestore(&rq->lock, flags);
6447 }
6448
6449done:
6450 mutex_unlock(&shares_mutex);
6451 return 0;
6452}
6453#else /* CONFIG_FAIR_GROUP_SCHED */
6454
6455void free_fair_sched_group(struct task_group *tg) { }
6456
6457int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6458{
6459 return 1;
6460}
6461
6462void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
6463
6464#endif /* CONFIG_FAIR_GROUP_SCHED */
6465
Peter Zijlstra810b3812008-02-29 15:21:01 -05006466
H Hartley Sweeten6d686f42010-01-13 20:21:52 -07006467static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
Peter Williams0d721ce2009-09-21 01:31:53 +00006468{
6469 struct sched_entity *se = &task->se;
Peter Williams0d721ce2009-09-21 01:31:53 +00006470 unsigned int rr_interval = 0;
6471
6472 /*
6473 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
6474 * idle runqueue:
6475 */
Peter Williams0d721ce2009-09-21 01:31:53 +00006476 if (rq->cfs.load.weight)
Zhu Yanhaia59f4e02013-01-08 12:56:52 +08006477 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
Peter Williams0d721ce2009-09-21 01:31:53 +00006478
6479 return rr_interval;
6480}
6481
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006482/*
6483 * All the scheduling class methods:
6484 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02006485const struct sched_class fair_sched_class = {
Ingo Molnar5522d5d2007-10-15 17:00:12 +02006486 .next = &idle_sched_class,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006487 .enqueue_task = enqueue_task_fair,
6488 .dequeue_task = dequeue_task_fair,
6489 .yield_task = yield_task_fair,
Mike Galbraithd95f4122011-02-01 09:50:51 -05006490 .yield_to_task = yield_to_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006491
Ingo Molnar2e09bf52007-10-15 17:00:05 +02006492 .check_preempt_curr = check_preempt_wakeup,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006493
6494 .pick_next_task = pick_next_task_fair,
6495 .put_prev_task = put_prev_task_fair,
6496
Peter Williams681f3e62007-10-24 18:23:51 +02006497#ifdef CONFIG_SMP
Li Zefan4ce72a22008-10-22 15:25:26 +08006498 .select_task_rq = select_task_rq_fair,
Paul Turner0a74bef2012-10-04 13:18:30 +02006499 .migrate_task_rq = migrate_task_rq_fair,
Alex Shi141965c2013-06-26 13:05:39 +08006500
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01006501 .rq_online = rq_online_fair,
6502 .rq_offline = rq_offline_fair,
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01006503
6504 .task_waking = task_waking_fair,
Peter Williams681f3e62007-10-24 18:23:51 +02006505#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006506
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02006507 .set_curr_task = set_curr_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006508 .task_tick = task_tick_fair,
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006509 .task_fork = task_fork_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01006510
6511 .prio_changed = prio_changed_fair,
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006512 .switched_from = switched_from_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01006513 .switched_to = switched_to_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05006514
Peter Williams0d721ce2009-09-21 01:31:53 +00006515 .get_rr_interval = get_rr_interval_fair,
6516
Peter Zijlstra810b3812008-02-29 15:21:01 -05006517#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02006518 .task_move_group = task_move_group_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05006519#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006520};
6521
6522#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +02006523void print_cfs_stats(struct seq_file *m, int cpu)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006524{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006525 struct cfs_rq *cfs_rq;
6526
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01006527 rcu_read_lock();
Ingo Molnarc3b64f12007-08-09 11:16:51 +02006528 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02006529 print_cfs_rq(m, cpu, cfs_rq);
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01006530 rcu_read_unlock();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006531}
6532#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02006533
6534__init void init_sched_fair_class(void)
6535{
6536#ifdef CONFIG_SMP
6537 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
6538
Frederic Weisbecker3451d022011-08-10 23:21:01 +02006539#ifdef CONFIG_NO_HZ_COMMON
Diwakar Tundlam554ceca2012-03-07 14:44:26 -08006540 nohz.next_balance = jiffies;
Peter Zijlstra029632f2011-10-25 10:00:11 +02006541 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
Suresh Siddha71325962012-01-19 18:28:57 -08006542 cpu_notifier(sched_ilb_notifier, 0);
Peter Zijlstra029632f2011-10-25 10:00:11 +02006543#endif
6544#endif /* SMP */
6545
6546}