blob: 5c208e090ae427de8c9f5dd4240e2c29c344b12f [file] [log] [blame]
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
Peter Zijlstra21805082007-08-25 18:41:53 +020018 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020021 */
22
23/*
Peter Zijlstra21805082007-08-25 18:41:53 +020024 * Targeted preemption latency for CPU-bound tasks:
Zou Nan hai722aab02007-11-26 21:21:49 +010025 * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020026 *
Peter Zijlstra21805082007-08-25 18:41:53 +020027 * NOTE: this latency value is not the same as the concept of
Ingo Molnard274a4c2007-10-15 17:00:14 +020028 * 'timeslice length' - timeslices in CFS are of variable length
29 * and have no persistent notion like in traditional, time-slice
30 * based scheduling concepts.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020031 *
Ingo Molnard274a4c2007-10-15 17:00:14 +020032 * (to see the precise effective timeslice length of your workload,
33 * run vmstat and monitor the context-switches (cs) field)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020034 */
Ingo Molnar19978ca2007-11-09 22:39:38 +010035unsigned int sysctl_sched_latency = 20000000ULL;
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020036
37/*
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010038 * Minimal preemption granularity for CPU-bound tasks:
Zou Nan hai722aab02007-11-26 21:21:49 +010039 * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds)
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010040 */
Zou Nan hai722aab02007-11-26 21:21:49 +010041unsigned int sysctl_sched_min_granularity = 4000000ULL;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010042
43/*
44 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
45 */
Zou Nan hai722aab02007-11-26 21:21:49 +010046static unsigned int sched_nr_latency = 5;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010047
48/*
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020049 * After fork, child runs first. (default) If set to 0 then
50 * parent will (try to) run first.
51 */
52const_debug unsigned int sysctl_sched_child_runs_first = 1;
Peter Zijlstra21805082007-08-25 18:41:53 +020053
54/*
Ingo Molnar1799e352007-09-19 23:34:46 +020055 * sys_sched_yield() compat mode
56 *
57 * This option switches the agressive yield implementation of the
58 * old scheduler back on.
59 */
60unsigned int __read_mostly sysctl_sched_compat_yield;
61
62/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020063 * SCHED_BATCH wake-up granularity.
Zou Nan hai722aab02007-11-26 21:21:49 +010064 * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020065 *
66 * This option delays the preemption effects of decoupled workloads
67 * and reduces their over-scheduling. Synchronous workloads will still
68 * have immediate wakeup/sleep latencies.
69 */
Ingo Molnar19978ca2007-11-09 22:39:38 +010070unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020071
72/*
73 * SCHED_OTHER wake-up granularity.
Zou Nan hai722aab02007-11-26 21:21:49 +010074 * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020075 *
76 * This option delays the preemption effects of decoupled workloads
77 * and reduces their over-scheduling. Synchronous workloads will still
78 * have immediate wakeup/sleep latencies.
79 */
Ingo Molnar19978ca2007-11-09 22:39:38 +010080unsigned int sysctl_sched_wakeup_granularity = 10000000UL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020081
Ingo Molnarda84d962007-10-15 17:00:18 +020082const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
83
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020084/**************************************************************
85 * CFS operations on generic schedulable entities:
86 */
87
88#ifdef CONFIG_FAIR_GROUP_SCHED
89
90/* cpu runqueue to which this cfs_rq is attached */
91static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
92{
93 return cfs_rq->rq;
94}
95
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020096/* An entity is a task if it doesn't "own" a runqueue */
97#define entity_is_task(se) (!se->my_q)
98
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020099#else /* CONFIG_FAIR_GROUP_SCHED */
100
101static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
102{
103 return container_of(cfs_rq, struct rq, cfs);
104}
105
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200106#define entity_is_task(se) 1
107
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200108#endif /* CONFIG_FAIR_GROUP_SCHED */
109
110static inline struct task_struct *task_of(struct sched_entity *se)
111{
112 return container_of(se, struct task_struct, se);
113}
114
115
116/**************************************************************
117 * Scheduling class tree data structure manipulation methods:
118 */
119
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200120static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
Peter Zijlstra02e04312007-10-15 17:00:07 +0200121{
Peter Zijlstra368059a2007-10-15 17:00:11 +0200122 s64 delta = (s64)(vruntime - min_vruntime);
123 if (delta > 0)
Peter Zijlstra02e04312007-10-15 17:00:07 +0200124 min_vruntime = vruntime;
125
126 return min_vruntime;
127}
128
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200129static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
Peter Zijlstrab0ffd242007-10-15 17:00:12 +0200130{
131 s64 delta = (s64)(vruntime - min_vruntime);
132 if (delta < 0)
133 min_vruntime = vruntime;
134
135 return min_vruntime;
136}
137
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200138static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
Peter Zijlstra90146232007-10-15 17:00:05 +0200139{
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200140 return se->vruntime - cfs_rq->min_vruntime;
Peter Zijlstra90146232007-10-15 17:00:05 +0200141}
142
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200143/*
144 * Enqueue an entity into the rb-tree:
145 */
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200146static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200147{
148 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
149 struct rb_node *parent = NULL;
150 struct sched_entity *entry;
Peter Zijlstra90146232007-10-15 17:00:05 +0200151 s64 key = entity_key(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200152 int leftmost = 1;
153
154 /*
155 * Find the right place in the rbtree:
156 */
157 while (*link) {
158 parent = *link;
159 entry = rb_entry(parent, struct sched_entity, run_node);
160 /*
161 * We dont care about collisions. Nodes with
162 * the same key stay together.
163 */
Peter Zijlstra90146232007-10-15 17:00:05 +0200164 if (key < entity_key(cfs_rq, entry)) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200165 link = &parent->rb_left;
166 } else {
167 link = &parent->rb_right;
168 leftmost = 0;
169 }
170 }
171
172 /*
173 * Maintain a cache of leftmost tree entries (it is frequently
174 * used):
175 */
176 if (leftmost)
Ingo Molnar57cb4992007-10-15 17:00:11 +0200177 cfs_rq->rb_leftmost = &se->run_node;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200178
179 rb_link_node(&se->run_node, parent, link);
180 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200181}
182
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200183static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200184{
185 if (cfs_rq->rb_leftmost == &se->run_node)
Ingo Molnar57cb4992007-10-15 17:00:11 +0200186 cfs_rq->rb_leftmost = rb_next(&se->run_node);
Ingo Molnare9acbff2007-10-15 17:00:04 +0200187
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200188 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200189}
190
191static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
192{
193 return cfs_rq->rb_leftmost;
194}
195
196static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
197{
198 return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
199}
200
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200201static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
202{
203 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
204 struct sched_entity *se = NULL;
205 struct rb_node *parent;
206
207 while (*link) {
208 parent = *link;
209 se = rb_entry(parent, struct sched_entity, run_node);
210 link = &parent->rb_right;
211 }
212
213 return se;
214}
215
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200216/**************************************************************
217 * Scheduling class statistics methods:
218 */
219
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100220#ifdef CONFIG_SCHED_DEBUG
221int sched_nr_latency_handler(struct ctl_table *table, int write,
222 struct file *filp, void __user *buffer, size_t *lenp,
223 loff_t *ppos)
224{
225 int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
226
227 if (ret || !write)
228 return ret;
229
230 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
231 sysctl_sched_min_granularity);
232
233 return 0;
234}
235#endif
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200236
237/*
238 * The idea is to set a period in which each task runs once.
239 *
240 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
241 * this period because otherwise the slices get too small.
242 *
243 * p = (nr <= nl) ? l : l*nr/nl
244 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200245static u64 __sched_period(unsigned long nr_running)
246{
247 u64 period = sysctl_sched_latency;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100248 unsigned long nr_latency = sched_nr_latency;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200249
250 if (unlikely(nr_running > nr_latency)) {
251 period *= nr_running;
252 do_div(period, nr_latency);
253 }
254
255 return period;
256}
257
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200258/*
259 * We calculate the wall-time slice from the period by taking a part
260 * proportional to the weight.
261 *
262 * s = p*w/rw
263 */
Peter Zijlstra6d0f0eb2007-10-15 17:00:05 +0200264static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Peter Zijlstra21805082007-08-25 18:41:53 +0200265{
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200266 u64 slice = __sched_period(cfs_rq->nr_running);
Peter Zijlstra21805082007-08-25 18:41:53 +0200267
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200268 slice *= se->load.weight;
269 do_div(slice, cfs_rq->load.weight);
Peter Zijlstra21805082007-08-25 18:41:53 +0200270
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200271 return slice;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200272}
273
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200274/*
275 * We calculate the vruntime slice.
276 *
277 * vs = s/w = p/rw
278 */
279static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
Peter Zijlstra67e9fb22007-10-15 17:00:10 +0200280{
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200281 u64 vslice = __sched_period(nr_running);
Peter Zijlstra67e9fb22007-10-15 17:00:10 +0200282
Peter Zijlstra10b77722007-11-09 22:39:37 +0100283 vslice *= NICE_0_LOAD;
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200284 do_div(vslice, rq_weight);
Peter Zijlstra67e9fb22007-10-15 17:00:10 +0200285
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200286 return vslice;
287}
Peter Zijlstra5f6d858e2007-10-15 17:00:12 +0200288
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200289static u64 sched_vslice(struct cfs_rq *cfs_rq)
290{
291 return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running);
292}
293
294static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
295{
296 return __sched_vslice(cfs_rq->load.weight + se->load.weight,
297 cfs_rq->nr_running + 1);
Peter Zijlstra67e9fb22007-10-15 17:00:10 +0200298}
299
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200300/*
301 * Update the current task's runtime statistics. Skip current tasks that
302 * are not in our scheduling class.
303 */
304static inline void
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200305__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
306 unsigned long delta_exec)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200307{
Ingo Molnarbbdba7c2007-10-15 17:00:06 +0200308 unsigned long delta_exec_weighted;
Peter Zijlstrab0ffd242007-10-15 17:00:12 +0200309 u64 vruntime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200310
Ingo Molnar8179ca22007-08-02 17:41:40 +0200311 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200312
313 curr->sum_exec_runtime += delta_exec;
Ingo Molnar7a62eab2007-10-15 17:00:06 +0200314 schedstat_add(cfs_rq, exec_clock, delta_exec);
Ingo Molnare9acbff2007-10-15 17:00:04 +0200315 delta_exec_weighted = delta_exec;
316 if (unlikely(curr->load.weight != NICE_0_LOAD)) {
317 delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
318 &curr->load);
319 }
320 curr->vruntime += delta_exec_weighted;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200321
322 /*
323 * maintain cfs_rq->min_vruntime to be a monotonic increasing
324 * value tracking the leftmost vruntime in the tree.
325 */
326 if (first_fair(cfs_rq)) {
Peter Zijlstrab0ffd242007-10-15 17:00:12 +0200327 vruntime = min_vruntime(curr->vruntime,
328 __pick_next_entity(cfs_rq)->vruntime);
Peter Zijlstra02e04312007-10-15 17:00:07 +0200329 } else
Peter Zijlstrab0ffd242007-10-15 17:00:12 +0200330 vruntime = curr->vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200331
332 cfs_rq->min_vruntime =
Peter Zijlstrab0ffd242007-10-15 17:00:12 +0200333 max_vruntime(cfs_rq->min_vruntime, vruntime);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200334}
335
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200336static void update_curr(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200337{
Ingo Molnar429d43bc2007-10-15 17:00:03 +0200338 struct sched_entity *curr = cfs_rq->curr;
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200339 u64 now = rq_of(cfs_rq)->clock;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200340 unsigned long delta_exec;
341
342 if (unlikely(!curr))
343 return;
344
345 /*
346 * Get the amount of time the current task was running
347 * since the last time we changed load (this cannot
348 * overflow on 32 bits):
349 */
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200350 delta_exec = (unsigned long)(now - curr->exec_start);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200351
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200352 __update_curr(cfs_rq, curr, delta_exec);
353 curr->exec_start = now;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100354
355 if (entity_is_task(curr)) {
356 struct task_struct *curtask = task_of(curr);
357
358 cpuacct_charge(curtask, delta_exec);
359 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200360}
361
362static inline void
Ingo Molnar5870db52007-08-09 11:16:47 +0200363update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200364{
Ingo Molnard2819182007-08-09 11:16:47 +0200365 schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200366}
367
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200368/*
369 * Task is being enqueued - update stats:
370 */
Ingo Molnard2417e52007-08-09 11:16:47 +0200371static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200372{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200373 /*
374 * Are we enqueueing a waiting task? (for current tasks
375 * a dequeue/enqueue event is a NOP)
376 */
Ingo Molnar429d43bc2007-10-15 17:00:03 +0200377 if (se != cfs_rq->curr)
Ingo Molnar5870db52007-08-09 11:16:47 +0200378 update_stats_wait_start(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200379}
380
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200381static void
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200382update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200383{
Ingo Molnarbbdba7c2007-10-15 17:00:06 +0200384 schedstat_set(se->wait_max, max(se->wait_max,
385 rq_of(cfs_rq)->clock - se->wait_start));
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200386 schedstat_set(se->wait_start, 0);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200387}
388
389static inline void
Ingo Molnar19b6a2e2007-08-09 11:16:48 +0200390update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200391{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200392 /*
393 * Mark the end of the wait period if dequeueing a
394 * waiting task:
395 */
Ingo Molnar429d43bc2007-10-15 17:00:03 +0200396 if (se != cfs_rq->curr)
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200397 update_stats_wait_end(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200398}
399
400/*
401 * We are picking a new current task - update its stats:
402 */
403static inline void
Ingo Molnar79303e92007-08-09 11:16:47 +0200404update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200405{
406 /*
407 * We are starting a new run period:
408 */
Ingo Molnard2819182007-08-09 11:16:47 +0200409 se->exec_start = rq_of(cfs_rq)->clock;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200410}
411
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200412/**************************************************
413 * Scheduling class queueing methods:
414 */
415
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200416static void
417account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
418{
419 update_load_add(&cfs_rq->load, se->load.weight);
420 cfs_rq->nr_running++;
421 se->on_rq = 1;
422}
423
424static void
425account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
426{
427 update_load_sub(&cfs_rq->load, se->load.weight);
428 cfs_rq->nr_running--;
429 se->on_rq = 0;
430}
431
Ingo Molnar2396af62007-08-09 11:16:48 +0200432static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200433{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200434#ifdef CONFIG_SCHEDSTATS
435 if (se->sleep_start) {
Ingo Molnard2819182007-08-09 11:16:47 +0200436 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200437
438 if ((s64)delta < 0)
439 delta = 0;
440
441 if (unlikely(delta > se->sleep_max))
442 se->sleep_max = delta;
443
444 se->sleep_start = 0;
445 se->sum_sleep_runtime += delta;
446 }
447 if (se->block_start) {
Ingo Molnard2819182007-08-09 11:16:47 +0200448 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200449
450 if ((s64)delta < 0)
451 delta = 0;
452
453 if (unlikely(delta > se->block_max))
454 se->block_max = delta;
455
456 se->block_start = 0;
457 se->sum_sleep_runtime += delta;
Ingo Molnar30084fb2007-10-02 14:13:08 +0200458
459 /*
460 * Blocking time is in units of nanosecs, so shift by 20 to
461 * get a milliseconds-range estimation of the amount of
462 * time that the task spent sleeping:
463 */
464 if (unlikely(prof_on == SLEEP_PROFILING)) {
Ingo Molnare22f5bb2007-10-15 17:00:06 +0200465 struct task_struct *tsk = task_of(se);
466
Ingo Molnar30084fb2007-10-02 14:13:08 +0200467 profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
468 delta >> 20);
469 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200470 }
471#endif
472}
473
Peter Zijlstraddc97292007-10-15 17:00:10 +0200474static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
475{
476#ifdef CONFIG_SCHED_DEBUG
477 s64 d = se->vruntime - cfs_rq->min_vruntime;
478
479 if (d < 0)
480 d = -d;
481
482 if (d > 3*sysctl_sched_latency)
483 schedstat_inc(cfs_rq, nr_spread_over);
484#endif
485}
486
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200487static void
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200488place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
489{
Peter Zijlstra67e9fb22007-10-15 17:00:10 +0200490 u64 vruntime;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200491
Peter Zijlstra67e9fb22007-10-15 17:00:10 +0200492 vruntime = cfs_rq->min_vruntime;
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +0200493
Ingo Molnar06877c32007-10-15 17:00:13 +0200494 if (sched_feat(TREE_AVG)) {
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +0200495 struct sched_entity *last = __pick_last_entity(cfs_rq);
496 if (last) {
Peter Zijlstra67e9fb22007-10-15 17:00:10 +0200497 vruntime += last->vruntime;
498 vruntime >>= 1;
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +0200499 }
Peter Zijlstra67e9fb22007-10-15 17:00:10 +0200500 } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200501 vruntime += sched_vslice(cfs_rq)/2;
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +0200502
Peter Zijlstra2cb86002007-11-09 22:39:37 +0100503 /*
504 * The 'current' period is already promised to the current tasks,
505 * however the extra weight of the new task will slow them down a
506 * little, place the new task so that it fits in the slot that
507 * stays open at the end.
508 */
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +0200509 if (initial && sched_feat(START_DEBIT))
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200510 vruntime += sched_vslice_add(cfs_rq, se);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200511
Ingo Molnar8465e792007-10-15 17:00:11 +0200512 if (!initial) {
Peter Zijlstra2cb86002007-11-09 22:39:37 +0100513 /* sleeps upto a single latency don't count. */
Ingo Molnar6cbf1c12007-12-18 15:21:13 +0100514 if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se))
Ingo Molnar94359f02007-10-15 17:00:11 +0200515 vruntime -= sysctl_sched_latency;
516
Peter Zijlstra2cb86002007-11-09 22:39:37 +0100517 /* ensure we never gain time by being placed backwards. */
518 vruntime = max_vruntime(se->vruntime, vruntime);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200519 }
520
Peter Zijlstra67e9fb22007-10-15 17:00:10 +0200521 se->vruntime = vruntime;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200522}
523
524static void
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +0200525enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200526{
527 /*
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +0200528 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200529 */
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200530 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200531
Ingo Molnare9acbff2007-10-15 17:00:04 +0200532 if (wakeup) {
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200533 place_entity(cfs_rq, se, 0);
Ingo Molnar2396af62007-08-09 11:16:48 +0200534 enqueue_sleeper(cfs_rq, se);
Ingo Molnare9acbff2007-10-15 17:00:04 +0200535 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200536
Ingo Molnard2417e52007-08-09 11:16:47 +0200537 update_stats_enqueue(cfs_rq, se);
Peter Zijlstraddc97292007-10-15 17:00:10 +0200538 check_spread(cfs_rq, se);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +0200539 if (se != cfs_rq->curr)
540 __enqueue_entity(cfs_rq, se);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200541 account_entity_enqueue(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200542}
543
544static void
Ingo Molnar525c2712007-08-09 11:16:48 +0200545dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200546{
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +0200547 /*
548 * Update run-time statistics of the 'current'.
549 */
550 update_curr(cfs_rq);
551
Ingo Molnar19b6a2e2007-08-09 11:16:48 +0200552 update_stats_dequeue(cfs_rq, se);
Dmitry Adamushkodb36cc72007-10-15 17:00:06 +0200553 if (sleep) {
Peter Zijlstra67e9fb22007-10-15 17:00:10 +0200554#ifdef CONFIG_SCHEDSTATS
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200555 if (entity_is_task(se)) {
556 struct task_struct *tsk = task_of(se);
557
558 if (tsk->state & TASK_INTERRUPTIBLE)
Ingo Molnard2819182007-08-09 11:16:47 +0200559 se->sleep_start = rq_of(cfs_rq)->clock;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200560 if (tsk->state & TASK_UNINTERRUPTIBLE)
Ingo Molnard2819182007-08-09 11:16:47 +0200561 se->block_start = rq_of(cfs_rq)->clock;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200562 }
Dmitry Adamushkodb36cc72007-10-15 17:00:06 +0200563#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +0200564 }
565
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +0200566 if (se != cfs_rq->curr)
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200567 __dequeue_entity(cfs_rq, se);
568 account_entity_dequeue(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200569}
570
571/*
572 * Preempt the current task with a newly woken task if needed:
573 */
Peter Zijlstra7c92e542007-09-05 14:32:49 +0200574static void
Ingo Molnar2e09bf52007-10-15 17:00:05 +0200575check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200576{
Peter Zijlstra11697832007-09-05 14:32:49 +0200577 unsigned long ideal_runtime, delta_exec;
578
Peter Zijlstra6d0f0eb2007-10-15 17:00:05 +0200579 ideal_runtime = sched_slice(cfs_rq, curr);
Peter Zijlstra11697832007-09-05 14:32:49 +0200580 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
Ingo Molnar3e3e13f2007-11-09 22:39:39 +0100581 if (delta_exec > ideal_runtime)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200582 resched_task(rq_of(cfs_rq)->curr);
583}
584
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +0200585static void
Ingo Molnar8494f412007-08-09 11:16:48 +0200586set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200587{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +0200588 /* 'current' is not kept within the tree. */
589 if (se->on_rq) {
590 /*
591 * Any task has to be enqueued before it get to execute on
592 * a CPU. So account for the time it spent waiting on the
593 * runqueue.
594 */
595 update_stats_wait_end(cfs_rq, se);
596 __dequeue_entity(cfs_rq, se);
597 }
598
Ingo Molnar79303e92007-08-09 11:16:47 +0200599 update_stats_curr_start(cfs_rq, se);
Ingo Molnar429d43bc2007-10-15 17:00:03 +0200600 cfs_rq->curr = se;
Ingo Molnareba1ed42007-10-15 17:00:02 +0200601#ifdef CONFIG_SCHEDSTATS
602 /*
603 * Track our maximum slice length, if the CPU's load is at
604 * least twice that of our own weight (i.e. dont track it
605 * when there are only lesser-weight tasks around):
606 */
Dmitry Adamushko495eca42007-10-15 17:00:06 +0200607 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
Ingo Molnareba1ed42007-10-15 17:00:02 +0200608 se->slice_max = max(se->slice_max,
609 se->sum_exec_runtime - se->prev_sum_exec_runtime);
610 }
611#endif
Peter Zijlstra4a55b452007-09-05 14:32:49 +0200612 se->prev_sum_exec_runtime = se->sum_exec_runtime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200613}
614
Ingo Molnar9948f4b2007-08-09 11:16:48 +0200615static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200616{
Dmitry Adamushko08ec3df2007-10-15 17:00:13 +0200617 struct sched_entity *se = NULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200618
Dmitry Adamushko08ec3df2007-10-15 17:00:13 +0200619 if (first_fair(cfs_rq)) {
620 se = __pick_next_entity(cfs_rq);
621 set_next_entity(cfs_rq, se);
622 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200623
624 return se;
625}
626
Ingo Molnarab6cde22007-08-09 11:16:48 +0200627static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200628{
629 /*
630 * If still on the runqueue then deactivate_task()
631 * was not called and update_curr() has to be done:
632 */
633 if (prev->on_rq)
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200634 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200635
Peter Zijlstraddc97292007-10-15 17:00:10 +0200636 check_spread(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200637 if (prev->on_rq) {
Ingo Molnar5870db52007-08-09 11:16:47 +0200638 update_stats_wait_start(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200639 /* Put 'current' back into the tree. */
640 __enqueue_entity(cfs_rq, prev);
641 }
Ingo Molnar429d43bc2007-10-15 17:00:03 +0200642 cfs_rq->curr = NULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200643}
644
645static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
646{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200647 /*
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200648 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200649 */
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200650 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200651
Peter Zijlstrace6c1312007-10-15 17:00:14 +0200652 if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
Ingo Molnar2e09bf52007-10-15 17:00:05 +0200653 check_preempt_tick(cfs_rq, curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200654}
655
656/**************************************************
657 * CFS operations on tasks:
658 */
659
660#ifdef CONFIG_FAIR_GROUP_SCHED
661
662/* Walk up scheduling entities hierarchy */
663#define for_each_sched_entity(se) \
664 for (; se; se = se->parent)
665
666static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
667{
668 return p->se.cfs_rq;
669}
670
671/* runqueue on which this entity is (to be) queued */
672static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
673{
674 return se->cfs_rq;
675}
676
677/* runqueue "owned" by this group */
678static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
679{
680 return grp->my_q;
681}
682
683/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
684 * another cpu ('this_cpu')
685 */
686static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
687{
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200688 return cfs_rq->tg->cfs_rq[this_cpu];
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200689}
690
691/* Iterate thr' all leaf cfs_rq's on a runqueue */
692#define for_each_leaf_cfs_rq(rq, cfs_rq) \
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +0100693 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200694
Srivatsa Vaddagirifad095a2007-10-15 17:00:12 +0200695/* Do the two (enqueued) entities belong to the same group ? */
696static inline int
697is_same_group(struct sched_entity *se, struct sched_entity *pse)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200698{
Srivatsa Vaddagirifad095a2007-10-15 17:00:12 +0200699 if (se->cfs_rq == pse->cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200700 return 1;
701
702 return 0;
703}
704
Srivatsa Vaddagirifad095a2007-10-15 17:00:12 +0200705static inline struct sched_entity *parent_entity(struct sched_entity *se)
706{
707 return se->parent;
708}
709
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +0100710#define GROUP_IMBALANCE_PCT 20
711
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200712#else /* CONFIG_FAIR_GROUP_SCHED */
713
714#define for_each_sched_entity(se) \
715 for (; se; se = NULL)
716
717static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
718{
719 return &task_rq(p)->cfs;
720}
721
722static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
723{
724 struct task_struct *p = task_of(se);
725 struct rq *rq = task_rq(p);
726
727 return &rq->cfs;
728}
729
730/* runqueue "owned" by this group */
731static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
732{
733 return NULL;
734}
735
736static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
737{
738 return &cpu_rq(this_cpu)->cfs;
739}
740
741#define for_each_leaf_cfs_rq(rq, cfs_rq) \
742 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
743
Srivatsa Vaddagirifad095a2007-10-15 17:00:12 +0200744static inline int
745is_same_group(struct sched_entity *se, struct sched_entity *pse)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200746{
747 return 1;
748}
749
Srivatsa Vaddagirifad095a2007-10-15 17:00:12 +0200750static inline struct sched_entity *parent_entity(struct sched_entity *se)
751{
752 return NULL;
753}
754
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200755#endif /* CONFIG_FAIR_GROUP_SCHED */
756
757/*
758 * The enqueue_task method is called before nr_running is
759 * increased. Here we update the fair scheduling stats and
760 * then put the task into the rbtree:
761 */
Ingo Molnarfd390f62007-08-09 11:16:48 +0200762static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200763{
764 struct cfs_rq *cfs_rq;
Srivatsa Vaddagiri58e2d4c2008-01-25 21:08:00 +0100765 struct sched_entity *se = &p->se,
766 *topse = NULL; /* Highest schedulable entity */
767 int incload = 1;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200768
769 for_each_sched_entity(se) {
Srivatsa Vaddagiri58e2d4c2008-01-25 21:08:00 +0100770 topse = se;
771 if (se->on_rq) {
772 incload = 0;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200773 break;
Srivatsa Vaddagiri58e2d4c2008-01-25 21:08:00 +0100774 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200775 cfs_rq = cfs_rq_of(se);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +0200776 enqueue_entity(cfs_rq, se, wakeup);
Srivatsa Vaddagirib9fa3df2007-10-15 17:00:12 +0200777 wakeup = 1;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200778 }
Srivatsa Vaddagiri58e2d4c2008-01-25 21:08:00 +0100779 /* Increment cpu load if we just enqueued the first task of a group on
780 * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs
781 * at the highest grouping level.
782 */
783 if (incload)
784 inc_cpu_load(rq, topse->load.weight);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200785}
786
787/*
788 * The dequeue_task method is called before nr_running is
789 * decreased. We remove the task from the rbtree and
790 * update the fair scheduling stats:
791 */
Ingo Molnarf02231e2007-08-09 11:16:48 +0200792static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200793{
794 struct cfs_rq *cfs_rq;
Srivatsa Vaddagiri58e2d4c2008-01-25 21:08:00 +0100795 struct sched_entity *se = &p->se,
796 *topse = NULL; /* Highest schedulable entity */
797 int decload = 1;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200798
799 for_each_sched_entity(se) {
Srivatsa Vaddagiri58e2d4c2008-01-25 21:08:00 +0100800 topse = se;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200801 cfs_rq = cfs_rq_of(se);
Ingo Molnar525c2712007-08-09 11:16:48 +0200802 dequeue_entity(cfs_rq, se, sleep);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200803 /* Don't dequeue parent if it has other entities besides us */
Srivatsa Vaddagiri58e2d4c2008-01-25 21:08:00 +0100804 if (cfs_rq->load.weight) {
805 if (parent_entity(se))
806 decload = 0;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200807 break;
Srivatsa Vaddagiri58e2d4c2008-01-25 21:08:00 +0100808 }
Srivatsa Vaddagirib9fa3df2007-10-15 17:00:12 +0200809 sleep = 1;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200810 }
Srivatsa Vaddagiri58e2d4c2008-01-25 21:08:00 +0100811 /* Decrement cpu load if we just dequeued the last task of a group on
812 * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs
813 * at the highest grouping level.
814 */
815 if (decload)
816 dec_cpu_load(rq, topse->load.weight);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200817}
818
819/*
Ingo Molnar1799e352007-09-19 23:34:46 +0200820 * sched_yield() support is very simple - we dequeue and enqueue.
821 *
822 * If compat_yield is turned on then we requeue to the end of the tree.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200823 */
Dmitry Adamushko4530d7a2007-10-15 17:00:08 +0200824static void yield_task_fair(struct rq *rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200825{
Ingo Molnardb292ca2007-12-04 17:04:39 +0100826 struct task_struct *curr = rq->curr;
827 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
828 struct sched_entity *rightmost, *se = &curr->se;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200829
830 /*
Ingo Molnar1799e352007-09-19 23:34:46 +0200831 * Are we the only task in the tree?
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200832 */
Ingo Molnar1799e352007-09-19 23:34:46 +0200833 if (unlikely(cfs_rq->nr_running == 1))
834 return;
835
Ingo Molnardb292ca2007-12-04 17:04:39 +0100836 if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
Ingo Molnar1799e352007-09-19 23:34:46 +0200837 __update_rq_clock(rq);
838 /*
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +0200839 * Update run-time statistics of the 'current'.
Ingo Molnar1799e352007-09-19 23:34:46 +0200840 */
Dmitry Adamushko2b1e3152007-10-15 17:00:12 +0200841 update_curr(cfs_rq);
Ingo Molnar1799e352007-09-19 23:34:46 +0200842
843 return;
844 }
845 /*
846 * Find the rightmost entry in the rbtree:
847 */
Dmitry Adamushko2b1e3152007-10-15 17:00:12 +0200848 rightmost = __pick_last_entity(cfs_rq);
Ingo Molnar1799e352007-09-19 23:34:46 +0200849 /*
850 * Already in the rightmost position?
851 */
Dmitry Adamushko2b1e3152007-10-15 17:00:12 +0200852 if (unlikely(rightmost->vruntime < se->vruntime))
Ingo Molnar1799e352007-09-19 23:34:46 +0200853 return;
854
855 /*
856 * Minimally necessary key value to be last in the tree:
Dmitry Adamushko2b1e3152007-10-15 17:00:12 +0200857 * Upon rescheduling, sched_class::put_prev_task() will place
858 * 'current' within the tree based on its new key value.
Ingo Molnar1799e352007-09-19 23:34:46 +0200859 */
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200860 se->vruntime = rightmost->vruntime + 1;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200861}
862
863/*
864 * Preempt the current task with a newly woken task if needed:
865 */
Ingo Molnar2e09bf52007-10-15 17:00:05 +0200866static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200867{
868 struct task_struct *curr = rq->curr;
Srivatsa Vaddagirifad095a2007-10-15 17:00:12 +0200869 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
Srivatsa Vaddagiri8651a862007-10-15 17:00:12 +0200870 struct sched_entity *se = &curr->se, *pse = &p->se;
Ingo Molnar502d26b2007-11-09 22:39:39 +0100871 unsigned long gran;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200872
873 if (unlikely(rt_prio(p->prio))) {
Ingo Molnara8e504d2007-08-09 11:16:47 +0200874 update_rq_clock(rq);
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200875 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200876 resched_task(curr);
877 return;
878 }
Ingo Molnar91c234b2007-10-15 17:00:18 +0200879 /*
880 * Batch tasks do not preempt (their preemption is driven by
881 * the tick):
882 */
883 if (unlikely(p->policy == SCHED_BATCH))
884 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200885
Ingo Molnar77d9cc42007-11-09 22:39:39 +0100886 if (!sched_feat(WAKEUP_PREEMPT))
887 return;
Peter Zijlstrace6c1312007-10-15 17:00:14 +0200888
Ingo Molnar77d9cc42007-11-09 22:39:39 +0100889 while (!is_same_group(se, pse)) {
890 se = parent_entity(se);
891 pse = parent_entity(pse);
Ingo Molnar2e09bf52007-10-15 17:00:05 +0200892 }
Ingo Molnar77d9cc42007-11-09 22:39:39 +0100893
Ingo Molnar77d9cc42007-11-09 22:39:39 +0100894 gran = sysctl_sched_wakeup_granularity;
895 if (unlikely(se->load.weight != NICE_0_LOAD))
896 gran = calc_delta_fair(gran, &se->load);
897
Ingo Molnar502d26b2007-11-09 22:39:39 +0100898 if (pse->vruntime + gran < se->vruntime)
Ingo Molnar77d9cc42007-11-09 22:39:39 +0100899 resched_task(curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200900}
901
Ingo Molnarfb8d4722007-08-09 11:16:48 +0200902static struct task_struct *pick_next_task_fair(struct rq *rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200903{
904 struct cfs_rq *cfs_rq = &rq->cfs;
905 struct sched_entity *se;
906
907 if (unlikely(!cfs_rq->nr_running))
908 return NULL;
909
910 do {
Ingo Molnar9948f4b2007-08-09 11:16:48 +0200911 se = pick_next_entity(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200912 cfs_rq = group_cfs_rq(se);
913 } while (cfs_rq);
914
915 return task_of(se);
916}
917
918/*
919 * Account for a descheduled task:
920 */
Ingo Molnar31ee5292007-08-09 11:16:49 +0200921static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200922{
923 struct sched_entity *se = &prev->se;
924 struct cfs_rq *cfs_rq;
925
926 for_each_sched_entity(se) {
927 cfs_rq = cfs_rq_of(se);
Ingo Molnarab6cde22007-08-09 11:16:48 +0200928 put_prev_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200929 }
930}
931
Peter Williams681f3e62007-10-24 18:23:51 +0200932#ifdef CONFIG_SMP
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200933/**************************************************
934 * Fair scheduling class load-balancing methods:
935 */
936
937/*
938 * Load-balancing iterator. Note: while the runqueue stays locked
939 * during the whole iteration, the current task might be
940 * dequeued so the iterator has to be dequeue-safe. Here we
941 * achieve that by always pre-iterating before returning
942 * the current task:
943 */
Alexey Dobriyana9957442007-10-15 17:00:13 +0200944static struct task_struct *
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200945__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
946{
947 struct task_struct *p;
948
949 if (!curr)
950 return NULL;
951
952 p = rb_entry(curr, struct task_struct, se.run_node);
953 cfs_rq->rb_load_balance_curr = rb_next(curr);
954
955 return p;
956}
957
958static struct task_struct *load_balance_start_fair(void *arg)
959{
960 struct cfs_rq *cfs_rq = arg;
961
962 return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
963}
964
965static struct task_struct *load_balance_next_fair(void *arg)
966{
967 struct cfs_rq *cfs_rq = arg;
968
969 return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
970}
971
Peter Williams43010652007-08-09 11:16:46 +0200972static unsigned long
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200973load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
Peter Williamse1d14842007-10-24 18:23:51 +0200974 unsigned long max_load_move,
Peter Williamsa4ac01c2007-08-09 11:16:46 +0200975 struct sched_domain *sd, enum cpu_idle_type idle,
976 int *all_pinned, int *this_best_prio)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200977{
978 struct cfs_rq *busy_cfs_rq;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200979 long rem_load_move = max_load_move;
980 struct rq_iterator cfs_rq_iterator;
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +0100981 unsigned long load_moved;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200982
983 cfs_rq_iterator.start = load_balance_start_fair;
984 cfs_rq_iterator.next = load_balance_next_fair;
985
986 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
Peter Williamsa4ac01c2007-08-09 11:16:46 +0200987#ifdef CONFIG_FAIR_GROUP_SCHED
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +0100988 struct cfs_rq *this_cfs_rq = busy_cfs_rq->tg->cfs_rq[this_cpu];
989 unsigned long maxload, task_load, group_weight;
990 unsigned long thisload, per_task_load;
991 struct sched_entity *se = busy_cfs_rq->tg->se[busiest->cpu];
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200992
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +0100993 task_load = busy_cfs_rq->load.weight;
994 group_weight = se->load.weight;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200995
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +0100996 /*
997 * 'group_weight' is contributed by tasks of total weight
998 * 'task_load'. To move 'rem_load_move' worth of weight only,
999 * we need to move a maximum task load of:
1000 *
1001 * maxload = (remload / group_weight) * task_load;
1002 */
1003 maxload = (rem_load_move * task_load) / group_weight;
1004
1005 if (!maxload || !task_load)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001006 continue;
1007
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01001008 per_task_load = task_load / busy_cfs_rq->nr_running;
1009 /*
1010 * balance_tasks will try to forcibly move atleast one task if
1011 * possible (because of SCHED_LOAD_SCALE_FUZZ). Avoid that if
1012 * maxload is less than GROUP_IMBALANCE_FUZZ% the per_task_load.
1013 */
1014 if (100 * maxload < GROUP_IMBALANCE_PCT * per_task_load)
1015 continue;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001016
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01001017 /* Disable priority-based load balance */
1018 *this_best_prio = 0;
1019 thisload = this_cfs_rq->load.weight;
Peter Williamsa4ac01c2007-08-09 11:16:46 +02001020#else
Ingo Molnare56f31a2007-08-10 23:05:11 +02001021# define maxload rem_load_move
Peter Williamsa4ac01c2007-08-09 11:16:46 +02001022#endif
Peter Williamse1d14842007-10-24 18:23:51 +02001023 /*
1024 * pass busy_cfs_rq argument into
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001025 * load_balance_[start|next]_fair iterators
1026 */
1027 cfs_rq_iterator.arg = busy_cfs_rq;
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01001028 load_moved = balance_tasks(this_rq, this_cpu, busiest,
Peter Williamse1d14842007-10-24 18:23:51 +02001029 maxload, sd, idle, all_pinned,
1030 this_best_prio,
1031 &cfs_rq_iterator);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001032
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01001033#ifdef CONFIG_FAIR_GROUP_SCHED
1034 /*
1035 * load_moved holds the task load that was moved. The
1036 * effective (group) weight moved would be:
1037 * load_moved_eff = load_moved/task_load * group_weight;
1038 */
1039 load_moved = (group_weight * load_moved) / task_load;
1040
1041 /* Adjust shares on both cpus to reflect load_moved */
1042 group_weight -= load_moved;
1043 set_se_shares(se, group_weight);
1044
1045 se = busy_cfs_rq->tg->se[this_cpu];
1046 if (!thisload)
1047 group_weight = load_moved;
1048 else
1049 group_weight = se->load.weight + load_moved;
1050 set_se_shares(se, group_weight);
1051#endif
1052
1053 rem_load_move -= load_moved;
1054
Peter Williamse1d14842007-10-24 18:23:51 +02001055 if (rem_load_move <= 0)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001056 break;
1057 }
1058
Peter Williams43010652007-08-09 11:16:46 +02001059 return max_load_move - rem_load_move;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001060}
1061
Peter Williamse1d14842007-10-24 18:23:51 +02001062static int
1063move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1064 struct sched_domain *sd, enum cpu_idle_type idle)
1065{
1066 struct cfs_rq *busy_cfs_rq;
1067 struct rq_iterator cfs_rq_iterator;
1068
1069 cfs_rq_iterator.start = load_balance_start_fair;
1070 cfs_rq_iterator.next = load_balance_next_fair;
1071
1072 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
1073 /*
1074 * pass busy_cfs_rq argument into
1075 * load_balance_[start|next]_fair iterators
1076 */
1077 cfs_rq_iterator.arg = busy_cfs_rq;
1078 if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
1079 &cfs_rq_iterator))
1080 return 1;
1081 }
1082
1083 return 0;
1084}
Peter Williams681f3e62007-10-24 18:23:51 +02001085#endif
Peter Williamse1d14842007-10-24 18:23:51 +02001086
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001087/*
1088 * scheduler tick hitting a task of our scheduling class:
1089 */
1090static void task_tick_fair(struct rq *rq, struct task_struct *curr)
1091{
1092 struct cfs_rq *cfs_rq;
1093 struct sched_entity *se = &curr->se;
1094
1095 for_each_sched_entity(se) {
1096 cfs_rq = cfs_rq_of(se);
1097 entity_tick(cfs_rq, se);
1098 }
1099}
1100
Ingo Molnar8eb172d2007-10-29 21:18:11 +01001101#define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02001102
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001103/*
1104 * Share the fairness runtime between parent and child, thus the
1105 * total amount of pressure for CPU stays equal - new tasks
1106 * get a chance to run but frequent forkers are not allowed to
1107 * monopolize the CPU. Note: the parent runqueue is locked,
1108 * the child is not running yet.
1109 */
Ingo Molnaree0827d2007-08-09 11:16:49 +02001110static void task_new_fair(struct rq *rq, struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001111{
1112 struct cfs_rq *cfs_rq = task_cfs_rq(p);
Ingo Molnar429d43bc2007-10-15 17:00:03 +02001113 struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
Ingo Molnar00bf7bf2007-10-15 17:00:14 +02001114 int this_cpu = smp_processor_id();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001115
1116 sched_info_queued(p);
1117
Ting Yang7109c442007-08-28 12:53:24 +02001118 update_curr(cfs_rq);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001119 place_entity(cfs_rq, se, 1);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02001120
Srivatsa Vaddagiri3c90e6e2007-11-09 22:39:39 +01001121 /* 'curr' will be NULL if the child belongs to a different group */
Ingo Molnar00bf7bf2007-10-15 17:00:14 +02001122 if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
Srivatsa Vaddagiri3c90e6e2007-11-09 22:39:39 +01001123 curr && curr->vruntime < se->vruntime) {
Dmitry Adamushko87fefa32007-10-15 17:00:08 +02001124 /*
Ingo Molnaredcb60a2007-10-15 17:00:08 +02001125 * Upon rescheduling, sched_class::put_prev_task() will place
1126 * 'current' within the tree based on its new key value.
1127 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02001128 swap(curr->vruntime, se->vruntime);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02001129 }
1130
Srivatsa Vaddagirib9dca1e2007-10-17 16:55:11 +02001131 enqueue_task_fair(rq, p, 0);
Ingo Molnarbb61c212007-10-15 17:00:02 +02001132 resched_task(rq->curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001133}
1134
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001135/* Account for a task changing its policy or group.
1136 *
1137 * This routine is mostly called to set cfs_rq->curr field when a task
1138 * migrates between groups/classes.
1139 */
1140static void set_curr_task_fair(struct rq *rq)
1141{
1142 struct sched_entity *se = &rq->curr->se;
1143
1144 for_each_sched_entity(se)
1145 set_next_entity(cfs_rq_of(se), se);
1146}
1147
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001148/*
1149 * All the scheduling class methods:
1150 */
Ingo Molnar5522d5d2007-10-15 17:00:12 +02001151static const struct sched_class fair_sched_class = {
1152 .next = &idle_sched_class,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001153 .enqueue_task = enqueue_task_fair,
1154 .dequeue_task = dequeue_task_fair,
1155 .yield_task = yield_task_fair,
1156
Ingo Molnar2e09bf52007-10-15 17:00:05 +02001157 .check_preempt_curr = check_preempt_wakeup,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001158
1159 .pick_next_task = pick_next_task_fair,
1160 .put_prev_task = put_prev_task_fair,
1161
Peter Williams681f3e62007-10-24 18:23:51 +02001162#ifdef CONFIG_SMP
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001163 .load_balance = load_balance_fair,
Peter Williamse1d14842007-10-24 18:23:51 +02001164 .move_one_task = move_one_task_fair,
Peter Williams681f3e62007-10-24 18:23:51 +02001165#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001166
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001167 .set_curr_task = set_curr_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001168 .task_tick = task_tick_fair,
1169 .task_new = task_new_fair,
1170};
1171
1172#ifdef CONFIG_SCHED_DEBUG
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02001173static void print_cfs_stats(struct seq_file *m, int cpu)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001174{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001175 struct cfs_rq *cfs_rq;
1176
Srivatsa Vaddagiri75c28ac2007-10-15 17:00:09 +02001177#ifdef CONFIG_FAIR_GROUP_SCHED
1178 print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs);
1179#endif
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +01001180 lock_task_group_list();
Ingo Molnarc3b64f12007-08-09 11:16:51 +02001181 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02001182 print_cfs_rq(m, cpu, cfs_rq);
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +01001183 unlock_task_group_list();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001184}
1185#endif