blob: ab9a034c4a17411f96c0f7d94a172408cc51b80b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * kernel/sched.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
Ingo Molnarc31f2e82007-07-09 18:52:01 +020019 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
Ingo Molnarb9131762008-01-25 21:08:19 +010025 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
Ingo Molnardff06c12007-07-09 18:52:00 +020033#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/highmem.h>
35#include <linux/smp_lock.h>
36#include <asm/mmu_context.h>
37#include <linux/interrupt.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080038#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/completion.h>
40#include <linux/kernel_stat.h>
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070041#include <linux/debug_locks.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020042#include <linux/perf_event.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/security.h>
44#include <linux/notifier.h>
45#include <linux/profile.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080046#include <linux/freezer.h>
akpm@osdl.org198e2f12006-01-12 01:05:30 -080047#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <linux/blkdev.h>
49#include <linux/delay.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070050#include <linux/pid_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <linux/smp.h>
52#include <linux/threads.h>
53#include <linux/timer.h>
54#include <linux/rcupdate.h>
55#include <linux/cpu.h>
56#include <linux/cpuset.h>
57#include <linux/percpu.h>
58#include <linux/kthread.h>
Alexey Dobriyanb5aadf72008-10-06 13:23:43 +040059#include <linux/proc_fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#include <linux/seq_file.h>
Nick Piggine692ab52007-07-26 13:40:43 +020061#include <linux/sysctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062#include <linux/syscalls.h>
63#include <linux/times.h>
Jay Lan8f0ab512006-09-30 23:28:59 -070064#include <linux/tsacct_kern.h>
bibo maoc6fd91f2006-03-26 01:38:20 -080065#include <linux/kprobes.h>
Shailabh Nagar0ff92242006-07-14 00:24:37 -070066#include <linux/delayacct.h>
Ingo Molnardff06c12007-07-09 18:52:00 +020067#include <linux/unistd.h>
Jens Axboef5ff8422007-09-21 09:19:54 +020068#include <linux/pagemap.h>
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +010069#include <linux/hrtimer.h>
Reynes Philippe30914a52008-03-17 16:19:05 -070070#include <linux/tick.h>
Peter Zijlstraf00b45c2008-04-19 19:45:00 +020071#include <linux/debugfs.h>
72#include <linux/ctype.h>
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020073#include <linux/ftrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Eric Dumazet5517d862007-05-08 00:32:57 -070075#include <asm/tlb.h>
Satyam Sharma838225b2007-10-24 18:23:50 +020076#include <asm/irq_regs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Gregory Haskins6e0534f2008-05-12 21:21:01 +020078#include "sched_cpupri.h"
79
Steven Rostedta8d154b2009-04-10 09:36:00 -040080#define CREATE_TRACE_POINTS
Steven Rostedtad8d75f2009-04-14 19:39:12 -040081#include <trace/events/sched.h>
Steven Rostedta8d154b2009-04-10 09:36:00 -040082
Linus Torvalds1da177e2005-04-16 15:20:36 -070083/*
84 * Convert user-nice values [ -20 ... 0 ... 19 ]
85 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
86 * and back.
87 */
88#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
89#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
90#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
91
92/*
93 * 'User priority' is the nice value converted to something we
94 * can work with better when scaling various scheduler parameters,
95 * it's a [ 0 ... 39 ] range.
96 */
97#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
98#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
99#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
100
101/*
Ingo Molnard7876a02008-01-25 21:08:19 +0100102 * Helpers for converting nanosecond timing to jiffy resolution
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 */
Eric Dumazetd6322fa2007-11-09 22:39:38 +0100104#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200106#define NICE_0_LOAD SCHED_LOAD_SCALE
107#define NICE_0_SHIFT SCHED_LOAD_SHIFT
108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109/*
110 * These are the 'tuning knobs' of the scheduler:
111 *
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +0200112 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 * Timeslices get refilled after they expire.
114 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#define DEF_TIMESLICE (100 * HZ / 1000)
Peter Williams2dd73a42006-06-27 02:54:34 -0700116
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200117/*
118 * single value that denotes runtime == period, ie unlimited time.
119 */
120#define RUNTIME_INF ((u64)~0ULL)
121
Ingo Molnare05606d2007-07-09 18:51:59 +0200122static inline int rt_policy(int policy)
123{
Roel Kluin3f33a7c2008-05-13 23:44:11 +0200124 if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
Ingo Molnare05606d2007-07-09 18:51:59 +0200125 return 1;
126 return 0;
127}
128
129static inline int task_has_rt_policy(struct task_struct *p)
130{
131 return rt_policy(p->policy);
132}
133
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134/*
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200135 * This is the priority-queue data structure of the RT scheduling class:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 */
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200137struct rt_prio_array {
138 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
139 struct list_head queue[MAX_RT_PRIO];
140};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200142struct rt_bandwidth {
Ingo Molnarea736ed2008-03-25 13:51:45 +0100143 /* nests inside the rq lock: */
144 spinlock_t rt_runtime_lock;
145 ktime_t rt_period;
146 u64 rt_runtime;
147 struct hrtimer rt_period_timer;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200148};
149
150static struct rt_bandwidth def_rt_bandwidth;
151
152static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
153
154static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
155{
156 struct rt_bandwidth *rt_b =
157 container_of(timer, struct rt_bandwidth, rt_period_timer);
158 ktime_t now;
159 int overrun;
160 int idle = 0;
161
162 for (;;) {
163 now = hrtimer_cb_get_time(timer);
164 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
165
166 if (!overrun)
167 break;
168
169 idle = do_sched_rt_period_timer(rt_b, overrun);
170 }
171
172 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
173}
174
175static
176void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
177{
178 rt_b->rt_period = ns_to_ktime(period);
179 rt_b->rt_runtime = runtime;
180
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200181 spin_lock_init(&rt_b->rt_runtime_lock);
182
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200183 hrtimer_init(&rt_b->rt_period_timer,
184 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
185 rt_b->rt_period_timer.function = sched_rt_period_timer;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200186}
187
Krzysztof Heltc8bfff62008-09-05 23:46:19 +0200188static inline int rt_bandwidth_enabled(void)
189{
190 return sysctl_sched_rt_runtime >= 0;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200191}
192
193static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
194{
195 ktime_t now;
196
Hiroshi Shimamotocac64d02009-02-25 09:59:26 -0800197 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200198 return;
199
200 if (hrtimer_active(&rt_b->rt_period_timer))
201 return;
202
203 spin_lock(&rt_b->rt_runtime_lock);
204 for (;;) {
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100205 unsigned long delta;
206 ktime_t soft, hard;
207
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200208 if (hrtimer_active(&rt_b->rt_period_timer))
209 break;
210
211 now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
212 hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100213
214 soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
215 hard = hrtimer_get_expires(&rt_b->rt_period_timer);
216 delta = ktime_to_ns(ktime_sub(hard, soft));
217 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
Arun R Bharadwaj5c333862009-04-16 12:14:37 +0530218 HRTIMER_MODE_ABS_PINNED, 0);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200219 }
220 spin_unlock(&rt_b->rt_runtime_lock);
221}
222
223#ifdef CONFIG_RT_GROUP_SCHED
224static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
225{
226 hrtimer_cancel(&rt_b->rt_period_timer);
227}
228#endif
229
Heiko Carstens712555e2008-04-28 11:33:07 +0200230/*
231 * sched_domains_mutex serializes calls to arch_init_sched_domains,
232 * detach_destroy_domains and partition_sched_domains.
233 */
234static DEFINE_MUTEX(sched_domains_mutex);
235
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100236#ifdef CONFIG_GROUP_SCHED
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200237
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -0700238#include <linux/cgroup.h>
239
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200240struct cfs_rq;
241
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100242static LIST_HEAD(task_groups);
243
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200244/* task group related information */
Ingo Molnar4cf86d72007-10-15 17:00:14 +0200245struct task_group {
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100246#ifdef CONFIG_CGROUP_SCHED
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -0700247 struct cgroup_subsys_state css;
248#endif
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100249
Arun R Bharadwaj6c415b92008-12-01 20:49:05 +0530250#ifdef CONFIG_USER_SCHED
251 uid_t uid;
252#endif
253
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100254#ifdef CONFIG_FAIR_GROUP_SCHED
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200255 /* schedulable entities of this group on each cpu */
256 struct sched_entity **se;
257 /* runqueue "owned" by this group on each cpu */
258 struct cfs_rq **cfs_rq;
259 unsigned long shares;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100260#endif
261
262#ifdef CONFIG_RT_GROUP_SCHED
263 struct sched_rt_entity **rt_se;
264 struct rt_rq **rt_rq;
265
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200266 struct rt_bandwidth rt_bandwidth;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100267#endif
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +0100268
Srivatsa Vaddagiriae8393e2007-10-29 21:18:11 +0100269 struct rcu_head rcu;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100270 struct list_head list;
Peter Zijlstraf473aa52008-04-19 19:45:00 +0200271
272 struct task_group *parent;
273 struct list_head siblings;
274 struct list_head children;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200275};
276
Dhaval Giani354d60c2008-04-19 19:44:59 +0200277#ifdef CONFIG_USER_SCHED
Peter Zijlstraeff766a2008-04-19 19:45:00 +0200278
Arun R Bharadwaj6c415b92008-12-01 20:49:05 +0530279/* Helper function to pass uid information to create_sched_user() */
280void set_tg_uid(struct user_struct *user)
281{
282 user->tg->uid = user->uid;
283}
284
Peter Zijlstraeff766a2008-04-19 19:45:00 +0200285/*
286 * Root task group.
Anirban Sinha84e9dab2009-08-28 22:40:43 -0700287 * Every UID task group (including init_task_group aka UID-0) will
288 * be a child to this group.
Peter Zijlstraeff766a2008-04-19 19:45:00 +0200289 */
290struct task_group root_task_group;
291
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100292#ifdef CONFIG_FAIR_GROUP_SCHED
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200293/* Default task group's sched entity on each cpu */
294static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
295/* Default task group's cfs_rq on each cpu */
Linus Torvaldsada3fa12009-09-15 09:39:44 -0700296static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +0200297#endif /* CONFIG_FAIR_GROUP_SCHED */
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100298
299#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100300static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
Tejun Heob9bf3122009-06-24 15:13:47 +0900301static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +0200302#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +0200303#else /* !CONFIG_USER_SCHED */
Peter Zijlstraeff766a2008-04-19 19:45:00 +0200304#define root_task_group init_task_group
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +0200305#endif /* CONFIG_USER_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100306
Peter Zijlstra8ed36992008-02-13 15:45:39 +0100307/* task_group_lock serializes add/remove of task groups and also changes to
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +0100308 * a task group's cpu shares.
309 */
Peter Zijlstra8ed36992008-02-13 15:45:39 +0100310static DEFINE_SPINLOCK(task_group_lock);
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +0100311
Peter Zijlstra57310a92009-03-09 13:56:21 +0100312#ifdef CONFIG_SMP
313static int root_task_group_empty(void)
314{
315 return list_empty(&root_task_group.children);
316}
317#endif
318
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100319#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100320#ifdef CONFIG_USER_SCHED
Ingo Molnar0eab9142008-01-25 21:08:19 +0100321# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +0200322#else /* !CONFIG_USER_SCHED */
Srivatsa Vaddagiri93f992c2008-01-25 21:07:59 +0100323# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +0200324#endif /* CONFIG_USER_SCHED */
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200325
Miao Xiecb4ad1f2008-04-28 12:54:56 +0800326/*
Lai Jiangshan2e084782008-06-12 16:42:58 +0800327 * A weight of 0 or 1 can cause arithmetics problems.
328 * A weight of a cfs_rq is the sum of weights of which entities
329 * are queued on this cfs_rq, so a weight of a entity should not be
330 * too large, so as the shares value of a task group.
Miao Xiecb4ad1f2008-04-28 12:54:56 +0800331 * (The default weight is 1024 - so there's no practical
332 * limitation from this.)
333 */
Peter Zijlstra18d95a22008-04-19 19:45:00 +0200334#define MIN_SHARES 2
Lai Jiangshan2e084782008-06-12 16:42:58 +0800335#define MAX_SHARES (1UL << 18)
Peter Zijlstra18d95a22008-04-19 19:45:00 +0200336
Srivatsa Vaddagiri93f992c2008-01-25 21:07:59 +0100337static int init_task_group_load = INIT_TASK_GROUP_LOAD;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100338#endif
339
340/* Default task group.
341 * Every task in system belong to this group at bootup.
342 */
Mike Travis434d53b2008-04-04 18:11:04 -0700343struct task_group init_task_group;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200344
345/* return group to which a task belongs */
Ingo Molnar4cf86d72007-10-15 17:00:14 +0200346static inline struct task_group *task_group(struct task_struct *p)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200347{
Ingo Molnar4cf86d72007-10-15 17:00:14 +0200348 struct task_group *tg;
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +0200349
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100350#ifdef CONFIG_USER_SCHED
David Howellsc69e8d92008-11-14 10:39:19 +1100351 rcu_read_lock();
352 tg = __task_cred(p)->user->tg;
353 rcu_read_unlock();
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100354#elif defined(CONFIG_CGROUP_SCHED)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -0700355 tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
356 struct task_group, css);
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200357#else
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100358 tg = &init_task_group;
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200359#endif
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +0200360 return tg;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200361}
362
363/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100364static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200365{
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100366#ifdef CONFIG_FAIR_GROUP_SCHED
Dmitry Adamushkoce96b5a2007-11-15 20:57:40 +0100367 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
368 p->se.parent = task_group(p)->se[cpu];
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100369#endif
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100370
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100371#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100372 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
373 p->rt.parent = task_group(p)->rt_se[cpu];
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100374#endif
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200375}
376
377#else
378
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100379static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
Peter Zijlstra83378262008-06-27 13:41:37 +0200380static inline struct task_group *task_group(struct task_struct *p)
381{
382 return NULL;
383}
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200384
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100385#endif /* CONFIG_GROUP_SCHED */
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200386
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200387/* CFS-related fields in a runqueue */
388struct cfs_rq {
389 struct load_weight load;
390 unsigned long nr_running;
391
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200392 u64 exec_clock;
Ingo Molnare9acbff2007-10-15 17:00:04 +0200393 u64 min_vruntime;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200394
395 struct rb_root tasks_timeline;
396 struct rb_node *rb_leftmost;
Peter Zijlstra4a55bd52008-04-19 19:45:00 +0200397
398 struct list_head tasks;
399 struct list_head *balance_iterator;
400
401 /*
402 * 'curr' points to currently running entity on this cfs_rq.
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200403 * It is set to NULL otherwise (i.e when none are currently running).
404 */
Peter Zijlstra47932412008-11-04 21:25:09 +0100405 struct sched_entity *curr, *next, *last;
Peter Zijlstraddc97292007-10-15 17:00:10 +0200406
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100407 unsigned int nr_spread_over;
Peter Zijlstraddc97292007-10-15 17:00:10 +0200408
Ingo Molnar62160e32007-10-15 17:00:03 +0200409#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200410 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
411
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100412 /*
413 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200414 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
415 * (like users, containers etc.)
416 *
417 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
418 * list is used during load balance.
419 */
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100420 struct list_head leaf_cfs_rq_list;
421 struct task_group *tg; /* group that "owns" this runqueue */
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200422
423#ifdef CONFIG_SMP
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200424 /*
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200425 * the part of load.weight contributed by tasks
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200426 */
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200427 unsigned long task_weight;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200428
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200429 /*
430 * h_load = weight * f(tg)
431 *
432 * Where f(tg) is the recursive weight fraction assigned to
433 * this group.
434 */
435 unsigned long h_load;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200436
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200437 /*
438 * this cpu's part of tg->shares
439 */
440 unsigned long shares;
Peter Zijlstraf1d239f2008-06-27 13:41:38 +0200441
442 /*
443 * load.weight at the time we set shares
444 */
445 unsigned long rq_weight;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200446#endif
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200447#endif
448};
449
450/* Real-Time classes' related field in a runqueue: */
451struct rt_rq {
452 struct rt_prio_array active;
Steven Rostedt63489e42008-01-25 21:08:03 +0100453 unsigned long rt_nr_running;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100454#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskinse864c492008-12-29 09:39:49 -0500455 struct {
456 int curr; /* highest queued rt task prio */
Gregory Haskins398a1532009-01-14 09:10:04 -0500457#ifdef CONFIG_SMP
Gregory Haskinse864c492008-12-29 09:39:49 -0500458 int next; /* next highest */
Gregory Haskins398a1532009-01-14 09:10:04 -0500459#endif
Gregory Haskinse864c492008-12-29 09:39:49 -0500460 } highest_prio;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100461#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100462#ifdef CONFIG_SMP
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100463 unsigned long rt_nr_migratory;
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200464 unsigned long rt_nr_total;
Gregory Haskinsa22d7fc2008-01-25 21:08:12 +0100465 int overloaded;
Gregory Haskins917b6272008-12-29 09:39:53 -0500466 struct plist_head pushable_tasks;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100467#endif
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100468 int rt_throttled;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100469 u64 rt_time;
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200470 u64 rt_runtime;
Ingo Molnarea736ed2008-03-25 13:51:45 +0100471 /* Nests inside the rq lock: */
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200472 spinlock_t rt_runtime_lock;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100473
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100474#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100475 unsigned long rt_nr_boosted;
476
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100477 struct rq *rq;
478 struct list_head leaf_rt_rq_list;
479 struct task_group *tg;
480 struct sched_rt_entity *rt_se;
481#endif
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200482};
483
Gregory Haskins57d885f2008-01-25 21:08:18 +0100484#ifdef CONFIG_SMP
485
486/*
487 * We add the notion of a root-domain which will be used to define per-domain
Ingo Molnar0eab9142008-01-25 21:08:19 +0100488 * variables. Each exclusive cpuset essentially defines an island domain by
489 * fully partitioning the member cpus from any other cpuset. Whenever a new
Gregory Haskins57d885f2008-01-25 21:08:18 +0100490 * exclusive cpuset is created, we also create and attach a new root-domain
491 * object.
492 *
Gregory Haskins57d885f2008-01-25 21:08:18 +0100493 */
494struct root_domain {
495 atomic_t refcount;
Rusty Russellc6c49272008-11-25 02:35:05 +1030496 cpumask_var_t span;
497 cpumask_var_t online;
Gregory Haskins637f5082008-01-25 21:08:18 +0100498
Ingo Molnar0eab9142008-01-25 21:08:19 +0100499 /*
Gregory Haskins637f5082008-01-25 21:08:18 +0100500 * The "RT overload" flag: it gets set if a CPU has more than
501 * one runnable RT task.
502 */
Rusty Russellc6c49272008-11-25 02:35:05 +1030503 cpumask_var_t rto_mask;
Ingo Molnar0eab9142008-01-25 21:08:19 +0100504 atomic_t rto_count;
Gregory Haskins6e0534f2008-05-12 21:21:01 +0200505#ifdef CONFIG_SMP
506 struct cpupri cpupri;
507#endif
Gregory Haskins57d885f2008-01-25 21:08:18 +0100508};
509
Gregory Haskinsdc938522008-01-25 21:08:26 +0100510/*
511 * By default the system creates a single root-domain with all cpus as
512 * members (mimicking the global state we have today).
513 */
Gregory Haskins57d885f2008-01-25 21:08:18 +0100514static struct root_domain def_root_domain;
515
516#endif
517
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200518/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 * This is the main, per-CPU runqueue data structure.
520 *
521 * Locking rule: those places that want to lock multiple runqueues
522 * (such as the load balancing or the thread migration code), lock
523 * acquire operations must be ordered by ascending &runqueue.
524 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700525struct rq {
Ingo Molnard8016492007-10-18 21:32:55 +0200526 /* runqueue lock: */
527 spinlock_t lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
529 /*
530 * nr_running and cpu_load should be in the same cacheline because
531 * remote CPUs use both these fields when doing load calculation.
532 */
533 unsigned long nr_running;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200534 #define CPU_LOAD_IDX_MAX 5
535 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -0700536#ifdef CONFIG_NO_HZ
537 unsigned char in_nohz_recently;
538#endif
Ingo Molnard8016492007-10-18 21:32:55 +0200539 /* capture load from *all* tasks on this cpu: */
540 struct load_weight load;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200541 unsigned long nr_load_updates;
542 u64 nr_switches;
543
544 struct cfs_rq cfs;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100545 struct rt_rq rt;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100546
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200547#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnard8016492007-10-18 21:32:55 +0200548 /* list of leaf cfs_rq on this cpu: */
549 struct list_head leaf_cfs_rq_list;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100550#endif
551#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100552 struct list_head leaf_rt_rq_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
555 /*
556 * This is part of a global counter where only the total sum
557 * over all CPUs matters. A task can increase this counter on
558 * one CPU and if it got migrated afterwards it may decrease
559 * it on another CPU. Always updated under the runqueue lock:
560 */
561 unsigned long nr_uninterruptible;
562
Ingo Molnar36c8b582006-07-03 00:25:41 -0700563 struct task_struct *curr, *idle;
Christoph Lameterc9819f42006-12-10 02:20:25 -0800564 unsigned long next_balance;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 struct mm_struct *prev_mm;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200566
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200567 u64 clock;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200568
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 atomic_t nr_iowait;
570
571#ifdef CONFIG_SMP
Ingo Molnar0eab9142008-01-25 21:08:19 +0100572 struct root_domain *rd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 struct sched_domain *sd;
574
Henrik Austada0a522c2009-02-13 20:35:45 +0100575 unsigned char idle_at_tick;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 /* For active balancing */
Gregory Haskins3f029d32009-07-29 11:08:47 -0400577 int post_schedule;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 int active_balance;
579 int push_cpu;
Ingo Molnard8016492007-10-18 21:32:55 +0200580 /* cpu of this runqueue: */
581 int cpu;
Gregory Haskins1f11eb62008-06-04 15:04:05 -0400582 int online;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583
Peter Zijlstraa8a51d52008-06-27 13:41:26 +0200584 unsigned long avg_load_per_task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585
Ingo Molnar36c8b582006-07-03 00:25:41 -0700586 struct task_struct *migration_thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 struct list_head migration_queue;
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200588
589 u64 rt_avg;
590 u64 age_stamp;
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100591 u64 idle_stamp;
592 u64 avg_idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593#endif
594
Thomas Gleixnerdce48a82009-04-11 10:43:41 +0200595 /* calc_load related fields */
596 unsigned long calc_load_update;
597 long calc_load_active;
598
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100599#ifdef CONFIG_SCHED_HRTICK
Peter Zijlstra31656512008-07-18 18:01:23 +0200600#ifdef CONFIG_SMP
601 int hrtick_csd_pending;
602 struct call_single_data hrtick_csd;
603#endif
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100604 struct hrtimer hrtick_timer;
605#endif
606
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607#ifdef CONFIG_SCHEDSTATS
608 /* latency stats */
609 struct sched_info rq_sched_info;
Ken Chen9c2c4802008-12-16 23:41:22 -0800610 unsigned long long rq_cpu_time;
611 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
613 /* sys_sched_yield() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200614 unsigned int yld_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
616 /* schedule() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200617 unsigned int sched_switch;
618 unsigned int sched_count;
619 unsigned int sched_goidle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
621 /* try_to_wake_up() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200622 unsigned int ttwu_count;
623 unsigned int ttwu_local;
Ingo Molnarb8efb562007-10-15 17:00:10 +0200624
625 /* BKL stats */
Ken Chen480b9432007-10-18 21:32:56 +0200626 unsigned int bkl_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627#endif
628};
629
Fenghua Yuf34e3b62007-07-19 01:48:13 -0700630static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631
Peter Zijlstra7d478722009-09-14 19:55:44 +0200632static inline
633void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnardd41f592007-07-09 18:51:59 +0200634{
Peter Zijlstra7d478722009-09-14 19:55:44 +0200635 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
Ingo Molnardd41f592007-07-09 18:51:59 +0200636}
637
Christoph Lameter0a2966b2006-09-25 23:30:51 -0700638static inline int cpu_of(struct rq *rq)
639{
640#ifdef CONFIG_SMP
641 return rq->cpu;
642#else
643 return 0;
644#endif
645}
646
Ingo Molnar20d315d2007-07-09 18:51:58 +0200647/*
Nick Piggin674311d2005-06-25 14:57:27 -0700648 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -0700649 * See detach_destroy_domains: synchronize_sched for details.
Nick Piggin674311d2005-06-25 14:57:27 -0700650 *
651 * The domain tree of any CPU may only be accessed from within
652 * preempt-disabled sections.
653 */
Ingo Molnar48f24c42006-07-03 00:25:40 -0700654#define for_each_domain(cpu, __sd) \
655 for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
657#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
658#define this_rq() (&__get_cpu_var(runqueues))
659#define task_rq(p) cpu_rq(task_cpu(p))
660#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
Hitoshi Mitake54d35f22009-06-29 14:44:57 +0900661#define raw_rq() (&__raw_get_cpu_var(runqueues))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100663inline void update_rq_clock(struct rq *rq)
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200664{
665 rq->clock = sched_clock_cpu(cpu_of(rq));
666}
667
Ingo Molnare436d802007-07-19 21:28:35 +0200668/*
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200669 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
670 */
671#ifdef CONFIG_SCHED_DEBUG
672# define const_debug __read_mostly
673#else
674# define const_debug static const
675#endif
676
Ingo Molnar017730c2008-05-12 21:20:52 +0200677/**
678 * runqueue_is_locked
Randy Dunlape17b38b2009-10-11 19:12:00 -0700679 * @cpu: the processor in question.
Ingo Molnar017730c2008-05-12 21:20:52 +0200680 *
681 * Returns true if the current cpu runqueue is locked.
682 * This interface allows printk to be called with the runqueue lock
683 * held and know whether or not it is OK to wake up the klogd.
684 */
Andrew Morton89f19f02009-09-19 11:55:44 -0700685int runqueue_is_locked(int cpu)
Ingo Molnar017730c2008-05-12 21:20:52 +0200686{
Andrew Morton89f19f02009-09-19 11:55:44 -0700687 return spin_is_locked(&cpu_rq(cpu)->lock);
Ingo Molnar017730c2008-05-12 21:20:52 +0200688}
689
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200690/*
691 * Debugging: various feature bits
692 */
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200693
694#define SCHED_FEAT(name, enabled) \
695 __SCHED_FEAT_##name ,
696
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200697enum {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200698#include "sched_features.h"
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200699};
700
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200701#undef SCHED_FEAT
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200702
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200703#define SCHED_FEAT(name, enabled) \
704 (1UL << __SCHED_FEAT_##name) * enabled |
705
706const_debug unsigned int sysctl_sched_features =
707#include "sched_features.h"
708 0;
709
710#undef SCHED_FEAT
711
712#ifdef CONFIG_SCHED_DEBUG
713#define SCHED_FEAT(name, enabled) \
714 #name ,
715
Harvey Harrison983ed7a2008-04-24 18:17:55 -0700716static __read_mostly char *sched_feat_names[] = {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200717#include "sched_features.h"
718 NULL
719};
720
721#undef SCHED_FEAT
722
Li Zefan34f3a812008-10-30 15:23:32 +0800723static int sched_feat_show(struct seq_file *m, void *v)
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200724{
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200725 int i;
726
727 for (i = 0; sched_feat_names[i]; i++) {
Li Zefan34f3a812008-10-30 15:23:32 +0800728 if (!(sysctl_sched_features & (1UL << i)))
729 seq_puts(m, "NO_");
730 seq_printf(m, "%s ", sched_feat_names[i]);
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200731 }
Li Zefan34f3a812008-10-30 15:23:32 +0800732 seq_puts(m, "\n");
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200733
Li Zefan34f3a812008-10-30 15:23:32 +0800734 return 0;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200735}
736
737static ssize_t
738sched_feat_write(struct file *filp, const char __user *ubuf,
739 size_t cnt, loff_t *ppos)
740{
741 char buf[64];
742 char *cmp = buf;
743 int neg = 0;
744 int i;
745
746 if (cnt > 63)
747 cnt = 63;
748
749 if (copy_from_user(&buf, ubuf, cnt))
750 return -EFAULT;
751
752 buf[cnt] = 0;
753
Ingo Molnarc24b7c52008-04-18 10:55:34 +0200754 if (strncmp(buf, "NO_", 3) == 0) {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200755 neg = 1;
756 cmp += 3;
757 }
758
759 for (i = 0; sched_feat_names[i]; i++) {
760 int len = strlen(sched_feat_names[i]);
761
762 if (strncmp(cmp, sched_feat_names[i], len) == 0) {
763 if (neg)
764 sysctl_sched_features &= ~(1UL << i);
765 else
766 sysctl_sched_features |= (1UL << i);
767 break;
768 }
769 }
770
771 if (!sched_feat_names[i])
772 return -EINVAL;
773
774 filp->f_pos += cnt;
775
776 return cnt;
777}
778
Li Zefan34f3a812008-10-30 15:23:32 +0800779static int sched_feat_open(struct inode *inode, struct file *filp)
780{
781 return single_open(filp, sched_feat_show, NULL);
782}
783
Alexey Dobriyan828c0952009-10-01 15:43:56 -0700784static const struct file_operations sched_feat_fops = {
Li Zefan34f3a812008-10-30 15:23:32 +0800785 .open = sched_feat_open,
786 .write = sched_feat_write,
787 .read = seq_read,
788 .llseek = seq_lseek,
789 .release = single_release,
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200790};
791
792static __init int sched_init_debug(void)
793{
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200794 debugfs_create_file("sched_features", 0644, NULL, NULL,
795 &sched_feat_fops);
796
797 return 0;
798}
799late_initcall(sched_init_debug);
800
801#endif
802
803#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200804
805/*
Peter Zijlstrab82d9fd2007-11-09 22:39:39 +0100806 * Number of tasks to iterate in a single balance run.
807 * Limited because this is done with IRQs disabled.
808 */
809const_debug unsigned int sysctl_sched_nr_migrate = 32;
810
811/*
Peter Zijlstra2398f2c2008-06-27 13:41:35 +0200812 * ratelimit for updating the group shares.
Peter Zijlstra55cd5342008-08-04 08:54:26 +0200813 * default: 0.25ms
Peter Zijlstra2398f2c2008-06-27 13:41:35 +0200814 */
Peter Zijlstra55cd5342008-08-04 08:54:26 +0200815unsigned int sysctl_sched_shares_ratelimit = 250000;
Peter Zijlstra2398f2c2008-06-27 13:41:35 +0200816
817/*
Peter Zijlstraffda12a2008-10-17 19:27:02 +0200818 * Inject some fuzzyness into changing the per-cpu group shares
819 * this avoids remote rq-locks at the expense of fairness.
820 * default: 4
821 */
822unsigned int sysctl_sched_shares_thresh = 4;
823
824/*
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200825 * period over which we average the RT time consumption, measured
826 * in ms.
827 *
828 * default: 1s
829 */
830const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
831
832/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100833 * period over which we measure -rt task cpu usage in us.
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100834 * default: 1s
835 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100836unsigned int sysctl_sched_rt_period = 1000000;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100837
Ingo Molnar6892b752008-02-13 14:02:36 +0100838static __read_mostly int scheduler_running;
839
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100840/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100841 * part of the period that we allow rt tasks to run in us.
842 * default: 0.95s
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100843 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100844int sysctl_sched_rt_runtime = 950000;
845
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200846static inline u64 global_rt_period(void)
847{
848 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
849}
850
851static inline u64 global_rt_runtime(void)
852{
roel kluine26873b2008-07-22 16:51:15 -0400853 if (sysctl_sched_rt_runtime < 0)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200854 return RUNTIME_INF;
855
856 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
857}
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100858
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859#ifndef prepare_arch_switch
Nick Piggin4866cde2005-06-25 14:57:23 -0700860# define prepare_arch_switch(next) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861#endif
Nick Piggin4866cde2005-06-25 14:57:23 -0700862#ifndef finish_arch_switch
863# define finish_arch_switch(prev) do { } while (0)
864#endif
865
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100866static inline int task_current(struct rq *rq, struct task_struct *p)
867{
868 return rq->curr == p;
869}
870
Nick Piggin4866cde2005-06-25 14:57:23 -0700871#ifndef __ARCH_WANT_UNLOCKED_CTXSW
Ingo Molnar70b97a72006-07-03 00:25:42 -0700872static inline int task_running(struct rq *rq, struct task_struct *p)
Nick Piggin4866cde2005-06-25 14:57:23 -0700873{
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100874 return task_current(rq, p);
Nick Piggin4866cde2005-06-25 14:57:23 -0700875}
876
Ingo Molnar70b97a72006-07-03 00:25:42 -0700877static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -0700878{
879}
880
Ingo Molnar70b97a72006-07-03 00:25:42 -0700881static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
Nick Piggin4866cde2005-06-25 14:57:23 -0700882{
Ingo Molnarda04c032005-09-13 11:17:59 +0200883#ifdef CONFIG_DEBUG_SPINLOCK
884 /* this is a valid case when another task releases the spinlock */
885 rq->lock.owner = current;
886#endif
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700887 /*
888 * If we are tracking spinlock dependencies then we have to
889 * fix up the runqueue lock - which gets 'carried over' from
890 * prev into current:
891 */
892 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
893
Nick Piggin4866cde2005-06-25 14:57:23 -0700894 spin_unlock_irq(&rq->lock);
895}
896
897#else /* __ARCH_WANT_UNLOCKED_CTXSW */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700898static inline int task_running(struct rq *rq, struct task_struct *p)
Nick Piggin4866cde2005-06-25 14:57:23 -0700899{
900#ifdef CONFIG_SMP
901 return p->oncpu;
902#else
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100903 return task_current(rq, p);
Nick Piggin4866cde2005-06-25 14:57:23 -0700904#endif
905}
906
Ingo Molnar70b97a72006-07-03 00:25:42 -0700907static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -0700908{
909#ifdef CONFIG_SMP
910 /*
911 * We can optimise this out completely for !SMP, because the
912 * SMP rebalancing from interrupt is the only thing that cares
913 * here.
914 */
915 next->oncpu = 1;
916#endif
917#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
918 spin_unlock_irq(&rq->lock);
919#else
920 spin_unlock(&rq->lock);
921#endif
922}
923
Ingo Molnar70b97a72006-07-03 00:25:42 -0700924static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
Nick Piggin4866cde2005-06-25 14:57:23 -0700925{
926#ifdef CONFIG_SMP
927 /*
928 * After ->oncpu is cleared, the task can be moved to a different CPU.
929 * We must ensure this doesn't happen until the switch is completely
930 * finished.
931 */
932 smp_wmb();
933 prev->oncpu = 0;
934#endif
935#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
936 local_irq_enable();
937#endif
938}
939#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940
941/*
Ingo Molnarb29739f2006-06-27 02:54:51 -0700942 * __task_rq_lock - lock the runqueue a given task resides on.
943 * Must be called interrupts disabled.
944 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700945static inline struct rq *__task_rq_lock(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700946 __acquires(rq->lock)
947{
Andi Kleen3a5c3592007-10-15 17:00:14 +0200948 for (;;) {
949 struct rq *rq = task_rq(p);
950 spin_lock(&rq->lock);
951 if (likely(rq == task_rq(p)))
952 return rq;
Ingo Molnarb29739f2006-06-27 02:54:51 -0700953 spin_unlock(&rq->lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -0700954 }
Ingo Molnarb29739f2006-06-27 02:54:51 -0700955}
956
957/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 * task_rq_lock - lock the runqueue a given task resides on and disable
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100959 * interrupts. Note the ordering: we can safely lookup the task_rq without
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 * explicitly disabling preemption.
961 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700962static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 __acquires(rq->lock)
964{
Ingo Molnar70b97a72006-07-03 00:25:42 -0700965 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966
Andi Kleen3a5c3592007-10-15 17:00:14 +0200967 for (;;) {
968 local_irq_save(*flags);
969 rq = task_rq(p);
970 spin_lock(&rq->lock);
971 if (likely(rq == task_rq(p)))
972 return rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 spin_unlock_irqrestore(&rq->lock, *flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975}
976
Oleg Nesterovad474ca2008-11-10 15:39:30 +0100977void task_rq_unlock_wait(struct task_struct *p)
978{
979 struct rq *rq = task_rq(p);
980
981 smp_mb(); /* spin-unlock-wait is not a full memory barrier */
982 spin_unlock_wait(&rq->lock);
983}
984
Alexey Dobriyana9957442007-10-15 17:00:13 +0200985static void __task_rq_unlock(struct rq *rq)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700986 __releases(rq->lock)
987{
988 spin_unlock(&rq->lock);
989}
990
Ingo Molnar70b97a72006-07-03 00:25:42 -0700991static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 __releases(rq->lock)
993{
994 spin_unlock_irqrestore(&rq->lock, *flags);
995}
996
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997/*
Robert P. J. Daycc2a73b2006-12-10 02:20:00 -0800998 * this_rq_lock - lock this runqueue and disable interrupts.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02001000static struct rq *this_rq_lock(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 __acquires(rq->lock)
1002{
Ingo Molnar70b97a72006-07-03 00:25:42 -07001003 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
1005 local_irq_disable();
1006 rq = this_rq();
1007 spin_lock(&rq->lock);
1008
1009 return rq;
1010}
1011
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001012#ifdef CONFIG_SCHED_HRTICK
1013/*
1014 * Use HR-timers to deliver accurate preemption points.
1015 *
1016 * Its all a bit involved since we cannot program an hrt while holding the
1017 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
1018 * reschedule event.
1019 *
1020 * When we get rescheduled we reprogram the hrtick_timer outside of the
1021 * rq->lock.
1022 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001023
1024/*
1025 * Use hrtick when:
1026 * - enabled by features
1027 * - hrtimer is actually high res
1028 */
1029static inline int hrtick_enabled(struct rq *rq)
1030{
1031 if (!sched_feat(HRTICK))
1032 return 0;
Ingo Molnarba420592008-07-20 11:02:06 +02001033 if (!cpu_active(cpu_of(rq)))
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001034 return 0;
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001035 return hrtimer_is_hres_active(&rq->hrtick_timer);
1036}
1037
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001038static void hrtick_clear(struct rq *rq)
1039{
1040 if (hrtimer_active(&rq->hrtick_timer))
1041 hrtimer_cancel(&rq->hrtick_timer);
1042}
1043
1044/*
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001045 * High-resolution timer tick.
1046 * Runs from hardirq context with interrupts disabled.
1047 */
1048static enum hrtimer_restart hrtick(struct hrtimer *timer)
1049{
1050 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
1051
1052 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1053
1054 spin_lock(&rq->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +02001055 update_rq_clock(rq);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001056 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
1057 spin_unlock(&rq->lock);
1058
1059 return HRTIMER_NORESTART;
1060}
1061
Rabin Vincent95e904c2008-05-11 05:55:33 +05301062#ifdef CONFIG_SMP
Peter Zijlstra31656512008-07-18 18:01:23 +02001063/*
1064 * called from hardirq (IPI) context
1065 */
1066static void __hrtick_start(void *arg)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001067{
Peter Zijlstra31656512008-07-18 18:01:23 +02001068 struct rq *rq = arg;
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001069
Peter Zijlstra31656512008-07-18 18:01:23 +02001070 spin_lock(&rq->lock);
1071 hrtimer_restart(&rq->hrtick_timer);
1072 rq->hrtick_csd_pending = 0;
1073 spin_unlock(&rq->lock);
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001074}
1075
Peter Zijlstra31656512008-07-18 18:01:23 +02001076/*
1077 * Called to set the hrtick timer state.
1078 *
1079 * called with rq->lock held and irqs disabled
1080 */
1081static void hrtick_start(struct rq *rq, u64 delay)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001082{
Peter Zijlstra31656512008-07-18 18:01:23 +02001083 struct hrtimer *timer = &rq->hrtick_timer;
1084 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001085
Arjan van de Vencc584b22008-09-01 15:02:30 -07001086 hrtimer_set_expires(timer, time);
Peter Zijlstra31656512008-07-18 18:01:23 +02001087
1088 if (rq == this_rq()) {
1089 hrtimer_restart(timer);
1090 } else if (!rq->hrtick_csd_pending) {
Peter Zijlstra6e275632009-02-25 13:59:48 +01001091 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
Peter Zijlstra31656512008-07-18 18:01:23 +02001092 rq->hrtick_csd_pending = 1;
1093 }
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001094}
1095
1096static int
1097hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
1098{
1099 int cpu = (int)(long)hcpu;
1100
1101 switch (action) {
1102 case CPU_UP_CANCELED:
1103 case CPU_UP_CANCELED_FROZEN:
1104 case CPU_DOWN_PREPARE:
1105 case CPU_DOWN_PREPARE_FROZEN:
1106 case CPU_DEAD:
1107 case CPU_DEAD_FROZEN:
Peter Zijlstra31656512008-07-18 18:01:23 +02001108 hrtick_clear(cpu_rq(cpu));
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001109 return NOTIFY_OK;
1110 }
1111
1112 return NOTIFY_DONE;
1113}
1114
Rakib Mullickfa748202008-09-22 14:55:45 -07001115static __init void init_hrtick(void)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001116{
1117 hotcpu_notifier(hotplug_hrtick, 0);
1118}
Peter Zijlstra31656512008-07-18 18:01:23 +02001119#else
1120/*
1121 * Called to set the hrtick timer state.
1122 *
1123 * called with rq->lock held and irqs disabled
1124 */
1125static void hrtick_start(struct rq *rq, u64 delay)
1126{
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +01001127 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
Arun R Bharadwaj5c333862009-04-16 12:14:37 +05301128 HRTIMER_MODE_REL_PINNED, 0);
Peter Zijlstra31656512008-07-18 18:01:23 +02001129}
1130
Andrew Morton006c75f2008-09-22 14:55:46 -07001131static inline void init_hrtick(void)
Peter Zijlstra31656512008-07-18 18:01:23 +02001132{
1133}
Rabin Vincent95e904c2008-05-11 05:55:33 +05301134#endif /* CONFIG_SMP */
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001135
1136static void init_rq_hrtick(struct rq *rq)
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001137{
Peter Zijlstra31656512008-07-18 18:01:23 +02001138#ifdef CONFIG_SMP
1139 rq->hrtick_csd_pending = 0;
1140
1141 rq->hrtick_csd.flags = 0;
1142 rq->hrtick_csd.func = __hrtick_start;
1143 rq->hrtick_csd.info = rq;
1144#endif
1145
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001146 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1147 rq->hrtick_timer.function = hrtick;
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001148}
Andrew Morton006c75f2008-09-22 14:55:46 -07001149#else /* CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001150static inline void hrtick_clear(struct rq *rq)
1151{
1152}
1153
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001154static inline void init_rq_hrtick(struct rq *rq)
1155{
1156}
1157
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001158static inline void init_hrtick(void)
1159{
1160}
Andrew Morton006c75f2008-09-22 14:55:46 -07001161#endif /* CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001162
Ingo Molnar1b9f19c2007-07-09 18:51:59 +02001163/*
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001164 * resched_task - mark a task 'to be rescheduled now'.
1165 *
1166 * On UP this means the setting of the need_resched flag, on SMP it
1167 * might also involve a cross-CPU call to trigger the scheduler on
1168 * the target CPU.
1169 */
1170#ifdef CONFIG_SMP
1171
1172#ifndef tsk_is_polling
1173#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1174#endif
1175
Peter Zijlstra31656512008-07-18 18:01:23 +02001176static void resched_task(struct task_struct *p)
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001177{
1178 int cpu;
1179
1180 assert_spin_locked(&task_rq(p)->lock);
1181
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001182 if (test_tsk_need_resched(p))
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001183 return;
1184
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001185 set_tsk_need_resched(p);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001186
1187 cpu = task_cpu(p);
1188 if (cpu == smp_processor_id())
1189 return;
1190
1191 /* NEED_RESCHED must be visible before we test polling */
1192 smp_mb();
1193 if (!tsk_is_polling(p))
1194 smp_send_reschedule(cpu);
1195}
1196
1197static void resched_cpu(int cpu)
1198{
1199 struct rq *rq = cpu_rq(cpu);
1200 unsigned long flags;
1201
1202 if (!spin_trylock_irqsave(&rq->lock, flags))
1203 return;
1204 resched_task(cpu_curr(cpu));
1205 spin_unlock_irqrestore(&rq->lock, flags);
1206}
Thomas Gleixner06d83082008-03-22 09:20:24 +01001207
1208#ifdef CONFIG_NO_HZ
1209/*
1210 * When add_timer_on() enqueues a timer into the timer wheel of an
1211 * idle CPU then this timer might expire before the next timer event
1212 * which is scheduled to wake up that CPU. In case of a completely
1213 * idle system the next event might even be infinite time into the
1214 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1215 * leaves the inner idle loop so the newly added timer is taken into
1216 * account when the CPU goes back to idle and evaluates the timer
1217 * wheel for the next timer event.
1218 */
1219void wake_up_idle_cpu(int cpu)
1220{
1221 struct rq *rq = cpu_rq(cpu);
1222
1223 if (cpu == smp_processor_id())
1224 return;
1225
1226 /*
1227 * This is safe, as this function is called with the timer
1228 * wheel base lock of (cpu) held. When the CPU is on the way
1229 * to idle and has not yet set rq->curr to idle then it will
1230 * be serialized on the timer wheel base lock and take the new
1231 * timer into account automatically.
1232 */
1233 if (rq->curr != rq->idle)
1234 return;
1235
1236 /*
1237 * We can set TIF_RESCHED on the idle task of the other CPU
1238 * lockless. The worst case is that the other CPU runs the
1239 * idle task through an additional NOOP schedule()
1240 */
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001241 set_tsk_need_resched(rq->idle);
Thomas Gleixner06d83082008-03-22 09:20:24 +01001242
1243 /* NEED_RESCHED must be visible before we test polling */
1244 smp_mb();
1245 if (!tsk_is_polling(rq->idle))
1246 smp_send_reschedule(cpu);
1247}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001248#endif /* CONFIG_NO_HZ */
Thomas Gleixner06d83082008-03-22 09:20:24 +01001249
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001250static u64 sched_avg_period(void)
1251{
1252 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1253}
1254
1255static void sched_avg_update(struct rq *rq)
1256{
1257 s64 period = sched_avg_period();
1258
1259 while ((s64)(rq->clock - rq->age_stamp) > period) {
1260 rq->age_stamp += period;
1261 rq->rt_avg /= 2;
1262 }
1263}
1264
1265static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1266{
1267 rq->rt_avg += rt_delta;
1268 sched_avg_update(rq);
1269}
1270
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001271#else /* !CONFIG_SMP */
Peter Zijlstra31656512008-07-18 18:01:23 +02001272static void resched_task(struct task_struct *p)
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001273{
1274 assert_spin_locked(&task_rq(p)->lock);
Peter Zijlstra31656512008-07-18 18:01:23 +02001275 set_tsk_need_resched(p);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001276}
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001277
1278static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1279{
1280}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001281#endif /* CONFIG_SMP */
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001282
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001283#if BITS_PER_LONG == 32
1284# define WMULT_CONST (~0UL)
1285#else
1286# define WMULT_CONST (1UL << 32)
1287#endif
1288
1289#define WMULT_SHIFT 32
1290
Ingo Molnar194081e2007-08-09 11:16:51 +02001291/*
1292 * Shift right and round:
1293 */
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001294#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
Ingo Molnar194081e2007-08-09 11:16:51 +02001295
Peter Zijlstraa7be37a2008-06-27 13:41:11 +02001296/*
1297 * delta *= weight / lw
1298 */
Ingo Molnarcb1c4fc2007-08-02 17:41:40 +02001299static unsigned long
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001300calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1301 struct load_weight *lw)
1302{
1303 u64 tmp;
1304
Lai Jiangshan7a232e02008-06-12 16:43:07 +08001305 if (!lw->inv_weight) {
1306 if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST))
1307 lw->inv_weight = 1;
1308 else
1309 lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)
1310 / (lw->weight+1);
1311 }
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001312
1313 tmp = (u64)delta_exec * weight;
1314 /*
1315 * Check whether we'd overflow the 64-bit multiplication:
1316 */
Ingo Molnar194081e2007-08-09 11:16:51 +02001317 if (unlikely(tmp > WMULT_CONST))
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001318 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
Ingo Molnar194081e2007-08-09 11:16:51 +02001319 WMULT_SHIFT/2);
1320 else
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001321 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001322
Ingo Molnarecf691d2007-08-02 17:41:40 +02001323 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001324}
1325
Ingo Molnar10919852007-10-15 17:00:04 +02001326static inline void update_load_add(struct load_weight *lw, unsigned long inc)
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001327{
1328 lw->weight += inc;
Ingo Molnare89996a2008-03-14 23:48:28 +01001329 lw->inv_weight = 0;
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001330}
1331
Ingo Molnar10919852007-10-15 17:00:04 +02001332static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001333{
1334 lw->weight -= dec;
Ingo Molnare89996a2008-03-14 23:48:28 +01001335 lw->inv_weight = 0;
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001336}
1337
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338/*
Peter Williams2dd73a42006-06-27 02:54:34 -07001339 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1340 * of tasks with abnormal "nice" values across CPUs the contribution that
1341 * each task makes to its run queue's load is weighted according to its
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01001342 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
Peter Williams2dd73a42006-06-27 02:54:34 -07001343 * scaled version of the new time slice allocation that they receive on time
1344 * slice expiry etc.
1345 */
1346
Peter Zijlstracce7ade2009-01-15 14:53:37 +01001347#define WEIGHT_IDLEPRIO 3
1348#define WMULT_IDLEPRIO 1431655765
Ingo Molnardd41f592007-07-09 18:51:59 +02001349
1350/*
1351 * Nice levels are multiplicative, with a gentle 10% change for every
1352 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1353 * nice 1, it will get ~10% less CPU time than another CPU-bound task
1354 * that remained on nice 0.
1355 *
1356 * The "10% effect" is relative and cumulative: from _any_ nice level,
1357 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
Ingo Molnarf9153ee2007-07-16 09:46:30 +02001358 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1359 * If a task goes up by ~10% and another task goes down by ~10% then
1360 * the relative distance between them is ~25%.)
Ingo Molnardd41f592007-07-09 18:51:59 +02001361 */
1362static const int prio_to_weight[40] = {
Ingo Molnar254753d2007-08-09 11:16:51 +02001363 /* -20 */ 88761, 71755, 56483, 46273, 36291,
1364 /* -15 */ 29154, 23254, 18705, 14949, 11916,
1365 /* -10 */ 9548, 7620, 6100, 4904, 3906,
1366 /* -5 */ 3121, 2501, 1991, 1586, 1277,
1367 /* 0 */ 1024, 820, 655, 526, 423,
1368 /* 5 */ 335, 272, 215, 172, 137,
1369 /* 10 */ 110, 87, 70, 56, 45,
1370 /* 15 */ 36, 29, 23, 18, 15,
Ingo Molnardd41f592007-07-09 18:51:59 +02001371};
1372
Ingo Molnar5714d2d2007-07-16 09:46:31 +02001373/*
1374 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1375 *
1376 * In cases where the weight does not change often, we can use the
1377 * precalculated inverse to speed up arithmetics by turning divisions
1378 * into multiplications:
1379 */
Ingo Molnardd41f592007-07-09 18:51:59 +02001380static const u32 prio_to_wmult[40] = {
Ingo Molnar254753d2007-08-09 11:16:51 +02001381 /* -20 */ 48388, 59856, 76040, 92818, 118348,
1382 /* -15 */ 147320, 184698, 229616, 287308, 360437,
1383 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
1384 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
1385 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
1386 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
1387 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
1388 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
Ingo Molnardd41f592007-07-09 18:51:59 +02001389};
Peter Williams2dd73a42006-06-27 02:54:34 -07001390
Ingo Molnardd41f592007-07-09 18:51:59 +02001391static void activate_task(struct rq *rq, struct task_struct *p, int wakeup);
1392
1393/*
1394 * runqueue iterator, to support SMP load-balancing between different
1395 * scheduling classes, without having to expose their internal data
1396 * structures to the load-balancing proper:
1397 */
1398struct rq_iterator {
1399 void *arg;
1400 struct task_struct *(*start)(void *);
1401 struct task_struct *(*next)(void *);
1402};
1403
Peter Williamse1d14842007-10-24 18:23:51 +02001404#ifdef CONFIG_SMP
1405static unsigned long
1406balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
1407 unsigned long max_load_move, struct sched_domain *sd,
1408 enum cpu_idle_type idle, int *all_pinned,
1409 int *this_best_prio, struct rq_iterator *iterator);
1410
1411static int
1412iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
1413 struct sched_domain *sd, enum cpu_idle_type idle,
1414 struct rq_iterator *iterator);
Peter Williamse1d14842007-10-24 18:23:51 +02001415#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02001416
Bharata B Raoef12fef2009-03-31 10:02:22 +05301417/* Time spent by the tasks of the cpu accounting group executing in ... */
1418enum cpuacct_stat_index {
1419 CPUACCT_STAT_USER, /* ... user mode */
1420 CPUACCT_STAT_SYSTEM, /* ... kernel mode */
1421
1422 CPUACCT_STAT_NSTATS,
1423};
1424
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001425#ifdef CONFIG_CGROUP_CPUACCT
1426static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
Bharata B Raoef12fef2009-03-31 10:02:22 +05301427static void cpuacct_update_stats(struct task_struct *tsk,
1428 enum cpuacct_stat_index idx, cputime_t val);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001429#else
1430static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
Bharata B Raoef12fef2009-03-31 10:02:22 +05301431static inline void cpuacct_update_stats(struct task_struct *tsk,
1432 enum cpuacct_stat_index idx, cputime_t val) {}
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001433#endif
1434
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001435static inline void inc_cpu_load(struct rq *rq, unsigned long load)
1436{
1437 update_load_add(&rq->load, load);
1438}
1439
1440static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1441{
1442 update_load_sub(&rq->load, load);
1443}
1444
Ingo Molnar7940ca32008-08-19 13:40:47 +02001445#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
Peter Zijlstraeb755802008-08-19 12:33:05 +02001446typedef int (*tg_visitor)(struct task_group *, void *);
1447
1448/*
1449 * Iterate the full tree, calling @down when first entering a node and @up when
1450 * leaving it for the final time.
1451 */
1452static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
1453{
1454 struct task_group *parent, *child;
1455 int ret;
1456
1457 rcu_read_lock();
1458 parent = &root_task_group;
1459down:
1460 ret = (*down)(parent, data);
1461 if (ret)
1462 goto out_unlock;
1463 list_for_each_entry_rcu(child, &parent->children, siblings) {
1464 parent = child;
1465 goto down;
1466
1467up:
1468 continue;
1469 }
1470 ret = (*up)(parent, data);
1471 if (ret)
1472 goto out_unlock;
1473
1474 child = parent;
1475 parent = parent->parent;
1476 if (parent)
1477 goto up;
1478out_unlock:
1479 rcu_read_unlock();
1480
1481 return ret;
1482}
1483
1484static int tg_nop(struct task_group *tg, void *data)
1485{
1486 return 0;
1487}
1488#endif
1489
Gregory Haskinse7693a32008-01-25 21:08:09 +01001490#ifdef CONFIG_SMP
Peter Zijlstraf5f08f32009-09-10 13:35:28 +02001491/* Used instead of source_load when we know the type == 0 */
1492static unsigned long weighted_cpuload(const int cpu)
1493{
1494 return cpu_rq(cpu)->load.weight;
1495}
1496
1497/*
1498 * Return a low guess at the load of a migration-source cpu weighted
1499 * according to the scheduling class and "nice" value.
1500 *
1501 * We want to under-estimate the load of migration sources, to
1502 * balance conservatively.
1503 */
1504static unsigned long source_load(int cpu, int type)
1505{
1506 struct rq *rq = cpu_rq(cpu);
1507 unsigned long total = weighted_cpuload(cpu);
1508
1509 if (type == 0 || !sched_feat(LB_BIAS))
1510 return total;
1511
1512 return min(rq->cpu_load[type-1], total);
1513}
1514
1515/*
1516 * Return a high guess at the load of a migration-target cpu weighted
1517 * according to the scheduling class and "nice" value.
1518 */
1519static unsigned long target_load(int cpu, int type)
1520{
1521 struct rq *rq = cpu_rq(cpu);
1522 unsigned long total = weighted_cpuload(cpu);
1523
1524 if (type == 0 || !sched_feat(LB_BIAS))
1525 return total;
1526
1527 return max(rq->cpu_load[type-1], total);
1528}
1529
Peter Zijlstraae154be2009-09-10 14:40:57 +02001530static struct sched_group *group_of(int cpu)
1531{
1532 struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
1533
1534 if (!sd)
1535 return NULL;
1536
1537 return sd->groups;
1538}
1539
1540static unsigned long power_of(int cpu)
1541{
1542 struct sched_group *group = group_of(cpu);
1543
1544 if (!group)
1545 return SCHED_LOAD_SCALE;
1546
1547 return group->cpu_power;
1548}
1549
Gregory Haskinse7693a32008-01-25 21:08:09 +01001550static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001551
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001552static unsigned long cpu_avg_load_per_task(int cpu)
1553{
1554 struct rq *rq = cpu_rq(cpu);
Ingo Molnaraf6d5962008-11-29 20:45:15 +01001555 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001556
Steven Rostedt4cd42622008-11-26 21:04:24 -05001557 if (nr_running)
1558 rq->avg_load_per_task = rq->load.weight / nr_running;
Balbir Singha2d47772008-11-12 16:19:00 +05301559 else
1560 rq->avg_load_per_task = 0;
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001561
1562 return rq->avg_load_per_task;
1563}
1564
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001565#ifdef CONFIG_FAIR_GROUP_SCHED
1566
Peter Zijlstra34d76c42009-08-27 13:08:56 +02001567struct update_shares_data {
1568 unsigned long rq_weight[NR_CPUS];
1569};
1570
1571static DEFINE_PER_CPU(struct update_shares_data, update_shares_data);
1572
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001573static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1574
1575/*
1576 * Calculate and set the cpu's group shares.
1577 */
Peter Zijlstra34d76c42009-08-27 13:08:56 +02001578static void update_group_shares_cpu(struct task_group *tg, int cpu,
1579 unsigned long sd_shares,
1580 unsigned long sd_rq_weight,
1581 struct update_shares_data *usd)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001582{
Peter Zijlstra34d76c42009-08-27 13:08:56 +02001583 unsigned long shares, rq_weight;
Peter Zijlstraa5004272009-07-27 14:04:49 +02001584 int boost = 0;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001585
Peter Zijlstra34d76c42009-08-27 13:08:56 +02001586 rq_weight = usd->rq_weight[cpu];
Peter Zijlstraa5004272009-07-27 14:04:49 +02001587 if (!rq_weight) {
1588 boost = 1;
1589 rq_weight = NICE_0_LOAD;
1590 }
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001591
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001592 /*
Peter Zijlstraa8af7242009-08-21 13:58:54 +02001593 * \Sum_j shares_j * rq_weight_i
1594 * shares_i = -----------------------------
1595 * \Sum_j rq_weight_j
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001596 */
Ken Chenec4e0e22008-11-18 22:41:57 -08001597 shares = (sd_shares * rq_weight) / sd_rq_weight;
Peter Zijlstraffda12a2008-10-17 19:27:02 +02001598 shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001599
Peter Zijlstraffda12a2008-10-17 19:27:02 +02001600 if (abs(shares - tg->se[cpu]->load.weight) >
1601 sysctl_sched_shares_thresh) {
1602 struct rq *rq = cpu_rq(cpu);
1603 unsigned long flags;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001604
Peter Zijlstraffda12a2008-10-17 19:27:02 +02001605 spin_lock_irqsave(&rq->lock, flags);
Peter Zijlstra34d76c42009-08-27 13:08:56 +02001606 tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
Peter Zijlstraa5004272009-07-27 14:04:49 +02001607 tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
Peter Zijlstraffda12a2008-10-17 19:27:02 +02001608 __set_se_shares(tg->se[cpu], shares);
1609 spin_unlock_irqrestore(&rq->lock, flags);
1610 }
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001611}
1612
1613/*
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001614 * Re-compute the task group their per cpu shares over the given domain.
1615 * This needs to be done in a bottom-up fashion because the rq weight of a
1616 * parent group depends on the shares of its child groups.
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001617 */
Peter Zijlstraeb755802008-08-19 12:33:05 +02001618static int tg_shares_up(struct task_group *tg, void *data)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001619{
Peter Zijlstra34d76c42009-08-27 13:08:56 +02001620 unsigned long weight, rq_weight = 0, shares = 0;
1621 struct update_shares_data *usd;
Peter Zijlstraeb755802008-08-19 12:33:05 +02001622 struct sched_domain *sd = data;
Peter Zijlstra34d76c42009-08-27 13:08:56 +02001623 unsigned long flags;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001624 int i;
1625
Peter Zijlstra34d76c42009-08-27 13:08:56 +02001626 if (!tg->se[0])
1627 return 0;
1628
1629 local_irq_save(flags);
1630 usd = &__get_cpu_var(update_shares_data);
1631
Rusty Russell758b2cd2008-11-25 02:35:04 +10301632 for_each_cpu(i, sched_domain_span(sd)) {
Peter Zijlstra34d76c42009-08-27 13:08:56 +02001633 weight = tg->cfs_rq[i]->load.weight;
1634 usd->rq_weight[i] = weight;
1635
Ken Chenec4e0e22008-11-18 22:41:57 -08001636 /*
1637 * If there are currently no tasks on the cpu pretend there
1638 * is one of average load so that when a new task gets to
1639 * run here it will not get delayed by group starvation.
1640 */
Ken Chenec4e0e22008-11-18 22:41:57 -08001641 if (!weight)
1642 weight = NICE_0_LOAD;
1643
Ken Chenec4e0e22008-11-18 22:41:57 -08001644 rq_weight += weight;
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001645 shares += tg->cfs_rq[i]->shares;
1646 }
1647
1648 if ((!shares && rq_weight) || shares > tg->shares)
1649 shares = tg->shares;
1650
1651 if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
1652 shares = tg->shares;
1653
Rusty Russell758b2cd2008-11-25 02:35:04 +10301654 for_each_cpu(i, sched_domain_span(sd))
Peter Zijlstra34d76c42009-08-27 13:08:56 +02001655 update_group_shares_cpu(tg, i, shares, rq_weight, usd);
1656
1657 local_irq_restore(flags);
Peter Zijlstraeb755802008-08-19 12:33:05 +02001658
1659 return 0;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001660}
1661
1662/*
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001663 * Compute the cpu's hierarchical load factor for each task group.
1664 * This needs to be done in a top-down fashion because the load of a child
1665 * group is a fraction of its parents load.
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001666 */
Peter Zijlstraeb755802008-08-19 12:33:05 +02001667static int tg_load_down(struct task_group *tg, void *data)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001668{
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001669 unsigned long load;
Peter Zijlstraeb755802008-08-19 12:33:05 +02001670 long cpu = (long)data;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001671
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001672 if (!tg->parent) {
1673 load = cpu_rq(cpu)->load.weight;
1674 } else {
1675 load = tg->parent->cfs_rq[cpu]->h_load;
1676 load *= tg->cfs_rq[cpu]->shares;
1677 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
1678 }
1679
1680 tg->cfs_rq[cpu]->h_load = load;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001681
Peter Zijlstraeb755802008-08-19 12:33:05 +02001682 return 0;
Peter Zijlstra4d8d5952008-06-27 13:41:19 +02001683}
1684
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001685static void update_shares(struct sched_domain *sd)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001686{
Peter Zijlstrae7097152009-06-03 15:41:20 +02001687 s64 elapsed;
1688 u64 now;
1689
1690 if (root_task_group_empty())
1691 return;
1692
1693 now = cpu_clock(raw_smp_processor_id());
1694 elapsed = now - sd->last_update;
Peter Zijlstra2398f2c2008-06-27 13:41:35 +02001695
1696 if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
1697 sd->last_update = now;
Peter Zijlstraeb755802008-08-19 12:33:05 +02001698 walk_tg_tree(tg_nop, tg_shares_up, sd);
Peter Zijlstra2398f2c2008-06-27 13:41:35 +02001699 }
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001700}
1701
Peter Zijlstra3e5459b2008-06-27 13:41:24 +02001702static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1703{
Peter Zijlstrae7097152009-06-03 15:41:20 +02001704 if (root_task_group_empty())
1705 return;
1706
Peter Zijlstra3e5459b2008-06-27 13:41:24 +02001707 spin_unlock(&rq->lock);
1708 update_shares(sd);
1709 spin_lock(&rq->lock);
1710}
1711
Peter Zijlstraeb755802008-08-19 12:33:05 +02001712static void update_h_load(long cpu)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001713{
Peter Zijlstrae7097152009-06-03 15:41:20 +02001714 if (root_task_group_empty())
1715 return;
1716
Peter Zijlstraeb755802008-08-19 12:33:05 +02001717 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001718}
1719
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001720#else
1721
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001722static inline void update_shares(struct sched_domain *sd)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001723{
1724}
1725
Peter Zijlstra3e5459b2008-06-27 13:41:24 +02001726static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1727{
1728}
1729
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001730#endif
1731
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001732#ifdef CONFIG_PREEMPT
1733
Peter Zijlstrab78bb862009-09-15 14:23:18 +02001734static void double_rq_lock(struct rq *rq1, struct rq *rq2);
1735
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001736/*
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001737 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1738 * way at the expense of forcing extra atomic operations in all
1739 * invocations. This assures that the double_lock is acquired using the
1740 * same underlying policy as the spinlock_t on this architecture, which
1741 * reduces latency compared to the unfair variant below. However, it
1742 * also adds more overhead and therefore may reduce throughput.
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001743 */
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001744static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1745 __releases(this_rq->lock)
1746 __acquires(busiest->lock)
1747 __acquires(this_rq->lock)
1748{
1749 spin_unlock(&this_rq->lock);
1750 double_rq_lock(this_rq, busiest);
1751
1752 return 1;
1753}
1754
1755#else
1756/*
1757 * Unfair double_lock_balance: Optimizes throughput at the expense of
1758 * latency by eliminating extra atomic operations when the locks are
1759 * already in proper order on entry. This favors lower cpu-ids and will
1760 * grant the double lock to lower cpus over higher ids under contention,
1761 * regardless of entry order into the function.
1762 */
1763static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001764 __releases(this_rq->lock)
1765 __acquires(busiest->lock)
1766 __acquires(this_rq->lock)
1767{
1768 int ret = 0;
1769
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001770 if (unlikely(!spin_trylock(&busiest->lock))) {
1771 if (busiest < this_rq) {
1772 spin_unlock(&this_rq->lock);
1773 spin_lock(&busiest->lock);
1774 spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
1775 ret = 1;
1776 } else
1777 spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
1778 }
1779 return ret;
1780}
1781
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001782#endif /* CONFIG_PREEMPT */
1783
1784/*
1785 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1786 */
1787static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1788{
1789 if (unlikely(!irqs_disabled())) {
1790 /* printk() doesn't work good under rq->lock */
1791 spin_unlock(&this_rq->lock);
1792 BUG_ON(1);
1793 }
1794
1795 return _double_lock_balance(this_rq, busiest);
1796}
1797
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001798static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1799 __releases(busiest->lock)
1800{
1801 spin_unlock(&busiest->lock);
1802 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1803}
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001804#endif
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001805
1806#ifdef CONFIG_FAIR_GROUP_SCHED
1807static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
1808{
Vegard Nossum30432092008-06-27 21:35:50 +02001809#ifdef CONFIG_SMP
Ingo Molnar34e83e82008-06-27 15:42:36 +02001810 cfs_rq->shares = shares;
1811#endif
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001812}
1813#endif
1814
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02001815static void calc_load_account_active(struct rq *this_rq);
1816
Ingo Molnardd41f592007-07-09 18:51:59 +02001817#include "sched_stats.h"
Ingo Molnardd41f592007-07-09 18:51:59 +02001818#include "sched_idletask.c"
Ingo Molnar5522d5d2007-10-15 17:00:12 +02001819#include "sched_fair.c"
1820#include "sched_rt.c"
Ingo Molnardd41f592007-07-09 18:51:59 +02001821#ifdef CONFIG_SCHED_DEBUG
1822# include "sched_debug.c"
1823#endif
1824
1825#define sched_class_highest (&rt_sched_class)
Gregory Haskins1f11eb62008-06-04 15:04:05 -04001826#define for_each_class(class) \
1827 for (class = sched_class_highest; class; class = class->next)
Ingo Molnardd41f592007-07-09 18:51:59 +02001828
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001829static void inc_nr_running(struct rq *rq)
Ingo Molnar6363ca52008-05-29 11:28:57 +02001830{
1831 rq->nr_running++;
Ingo Molnar6363ca52008-05-29 11:28:57 +02001832}
1833
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001834static void dec_nr_running(struct rq *rq)
Ingo Molnar9c217242007-08-02 17:41:40 +02001835{
1836 rq->nr_running--;
Ingo Molnar9c217242007-08-02 17:41:40 +02001837}
1838
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001839static void set_load_weight(struct task_struct *p)
1840{
1841 if (task_has_rt_policy(p)) {
Ingo Molnardd41f592007-07-09 18:51:59 +02001842 p->se.load.weight = prio_to_weight[0] * 2;
1843 p->se.load.inv_weight = prio_to_wmult[0] >> 1;
1844 return;
1845 }
1846
1847 /*
1848 * SCHED_IDLE tasks get minimal weight:
1849 */
1850 if (p->policy == SCHED_IDLE) {
1851 p->se.load.weight = WEIGHT_IDLEPRIO;
1852 p->se.load.inv_weight = WMULT_IDLEPRIO;
1853 return;
1854 }
1855
1856 p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
1857 p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001858}
1859
Gregory Haskins2087a1a2008-06-27 14:30:00 -06001860static void update_avg(u64 *avg, u64 sample)
1861{
1862 s64 diff = sample - *avg;
1863 *avg += diff >> 3;
1864}
1865
Ingo Molnar8159f872007-08-09 11:16:49 +02001866static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001867{
Peter Zijlstra831451a2009-01-14 12:39:18 +01001868 if (wakeup)
1869 p->se.start_runtime = p->se.sum_exec_runtime;
1870
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001871 sched_info_queued(p);
Ingo Molnarfd390f62007-08-09 11:16:48 +02001872 p->sched_class->enqueue_task(rq, p, wakeup);
Ingo Molnardd41f592007-07-09 18:51:59 +02001873 p->se.on_rq = 1;
1874}
1875
Ingo Molnar69be72c2007-08-09 11:16:49 +02001876static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
Ingo Molnardd41f592007-07-09 18:51:59 +02001877{
Peter Zijlstra831451a2009-01-14 12:39:18 +01001878 if (sleep) {
1879 if (p->se.last_wakeup) {
1880 update_avg(&p->se.avg_overlap,
1881 p->se.sum_exec_runtime - p->se.last_wakeup);
1882 p->se.last_wakeup = 0;
1883 } else {
1884 update_avg(&p->se.avg_wakeup,
1885 sysctl_sched_wakeup_granularity);
1886 }
Gregory Haskins2087a1a2008-06-27 14:30:00 -06001887 }
1888
Ankita Garg46ac22b2008-07-01 14:30:06 +05301889 sched_info_dequeued(p);
Ingo Molnarf02231e2007-08-09 11:16:48 +02001890 p->sched_class->dequeue_task(rq, p, sleep);
Ingo Molnardd41f592007-07-09 18:51:59 +02001891 p->se.on_rq = 0;
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001892}
1893
1894/*
Ingo Molnardd41f592007-07-09 18:51:59 +02001895 * __normal_prio - return the priority that is based on the static prio
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001896 */
Ingo Molnar14531182007-07-09 18:51:59 +02001897static inline int __normal_prio(struct task_struct *p)
1898{
Ingo Molnardd41f592007-07-09 18:51:59 +02001899 return p->static_prio;
Ingo Molnar14531182007-07-09 18:51:59 +02001900}
1901
1902/*
Ingo Molnarb29739f2006-06-27 02:54:51 -07001903 * Calculate the expected normal priority: i.e. priority
1904 * without taking RT-inheritance into account. Might be
1905 * boosted by interactivity modifiers. Changes upon fork,
1906 * setprio syscalls, and whenever the interactivity
1907 * estimator recalculates.
1908 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07001909static inline int normal_prio(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -07001910{
1911 int prio;
1912
Ingo Molnare05606d2007-07-09 18:51:59 +02001913 if (task_has_rt_policy(p))
Ingo Molnarb29739f2006-06-27 02:54:51 -07001914 prio = MAX_RT_PRIO-1 - p->rt_priority;
1915 else
1916 prio = __normal_prio(p);
1917 return prio;
1918}
1919
1920/*
1921 * Calculate the current priority, i.e. the priority
1922 * taken into account by the scheduler. This value might
1923 * be boosted by RT tasks, or might be boosted by
1924 * interactivity modifiers. Will be RT if the task got
1925 * RT-boosted. If not then it returns p->normal_prio.
1926 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07001927static int effective_prio(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -07001928{
1929 p->normal_prio = normal_prio(p);
1930 /*
1931 * If we are RT tasks or we were boosted to RT priority,
1932 * keep the priority unchanged. Otherwise, update priority
1933 * to the normal priority:
1934 */
1935 if (!rt_prio(p->prio))
1936 return p->normal_prio;
1937 return p->prio;
1938}
1939
1940/*
Ingo Molnardd41f592007-07-09 18:51:59 +02001941 * activate_task - move a task to the runqueue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 */
Ingo Molnardd41f592007-07-09 18:51:59 +02001943static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944{
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05001945 if (task_contributes_to_load(p))
Ingo Molnardd41f592007-07-09 18:51:59 +02001946 rq->nr_uninterruptible--;
1947
Ingo Molnar8159f872007-08-09 11:16:49 +02001948 enqueue_task(rq, p, wakeup);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001949 inc_nr_running(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950}
1951
1952/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 * deactivate_task - remove a task from the runqueue.
1954 */
Ingo Molnar2e1cb742007-08-09 11:16:49 +02001955static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956{
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05001957 if (task_contributes_to_load(p))
Ingo Molnardd41f592007-07-09 18:51:59 +02001958 rq->nr_uninterruptible++;
1959
Ingo Molnar69be72c2007-08-09 11:16:49 +02001960 dequeue_task(rq, p, sleep);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001961 dec_nr_running(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962}
1963
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964/**
1965 * task_curr - is this task currently executing on a CPU?
1966 * @p: the task in question.
1967 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07001968inline int task_curr(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969{
1970 return cpu_curr(task_cpu(p)) == p;
1971}
1972
Ingo Molnardd41f592007-07-09 18:51:59 +02001973static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1974{
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001975 set_task_rq(p, cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02001976#ifdef CONFIG_SMP
Dmitry Adamushkoce96b5a2007-11-15 20:57:40 +01001977 /*
1978 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1979 * successfuly executed on another CPU. We must ensure that updates of
1980 * per-task data have been completed by this moment.
1981 */
1982 smp_wmb();
Ingo Molnardd41f592007-07-09 18:51:59 +02001983 task_thread_info(p)->cpu = cpu;
Ingo Molnardd41f592007-07-09 18:51:59 +02001984#endif
Peter Williams2dd73a42006-06-27 02:54:34 -07001985}
1986
Steven Rostedtcb469842008-01-25 21:08:22 +01001987static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1988 const struct sched_class *prev_class,
1989 int oldprio, int running)
1990{
1991 if (prev_class != p->sched_class) {
1992 if (prev_class->switched_from)
1993 prev_class->switched_from(rq, p, running);
1994 p->sched_class->switched_to(rq, p, running);
1995 } else
1996 p->sched_class->prio_changed(rq, p, oldprio, running);
1997}
1998
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999#ifdef CONFIG_SMP
Ingo Molnarcc367732007-10-15 17:00:18 +02002000/*
2001 * Is this task likely cache-hot:
2002 */
Gregory Haskinse7693a32008-01-25 21:08:09 +01002003static int
Ingo Molnarcc367732007-10-15 17:00:18 +02002004task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2005{
2006 s64 delta;
2007
Ingo Molnarf540a602008-03-15 17:10:34 +01002008 /*
2009 * Buddy candidates are cache hot:
2010 */
Peter Zijlstra47932412008-11-04 21:25:09 +01002011 if (sched_feat(CACHE_HOT_BUDDY) &&
2012 (&p->se == cfs_rq_of(&p->se)->next ||
2013 &p->se == cfs_rq_of(&p->se)->last))
Ingo Molnarf540a602008-03-15 17:10:34 +01002014 return 1;
2015
Ingo Molnarcc367732007-10-15 17:00:18 +02002016 if (p->sched_class != &fair_sched_class)
2017 return 0;
2018
Ingo Molnar6bc16652007-10-15 17:00:18 +02002019 if (sysctl_sched_migration_cost == -1)
2020 return 1;
2021 if (sysctl_sched_migration_cost == 0)
2022 return 0;
2023
Ingo Molnarcc367732007-10-15 17:00:18 +02002024 delta = now - p->se.exec_start;
2025
2026 return delta < (s64)sysctl_sched_migration_cost;
2027}
2028
2029
Ingo Molnardd41f592007-07-09 18:51:59 +02002030void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
Ingo Molnarc65cc872007-07-09 18:51:58 +02002031{
Ingo Molnardd41f592007-07-09 18:51:59 +02002032 int old_cpu = task_cpu(p);
2033 struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
Srivatsa Vaddagiri2830cf82007-10-15 17:00:12 +02002034 struct cfs_rq *old_cfsrq = task_cfs_rq(p),
2035 *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
Ingo Molnarbbdba7c2007-10-15 17:00:06 +02002036 u64 clock_offset;
Ingo Molnardd41f592007-07-09 18:51:59 +02002037
2038 clock_offset = old_rq->clock - new_rq->clock;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02002039
Mathieu Desnoyersde1d7282009-05-05 16:49:59 +08002040 trace_sched_migrate_task(p, new_cpu);
Peter Zijlstracbc34ed2008-12-10 08:08:22 +01002041
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02002042#ifdef CONFIG_SCHEDSTATS
2043 if (p->se.wait_start)
2044 p->se.wait_start -= clock_offset;
Ingo Molnardd41f592007-07-09 18:51:59 +02002045 if (p->se.sleep_start)
2046 p->se.sleep_start -= clock_offset;
2047 if (p->se.block_start)
2048 p->se.block_start -= clock_offset;
Ingo Molnar6c594c22008-12-14 12:34:15 +01002049#endif
Ingo Molnarcc367732007-10-15 17:00:18 +02002050 if (old_cpu != new_cpu) {
Ingo Molnar6c594c22008-12-14 12:34:15 +01002051 p->se.nr_migrations++;
2052#ifdef CONFIG_SCHEDSTATS
Ingo Molnarcc367732007-10-15 17:00:18 +02002053 if (task_hot(p, old_rq->clock, NULL))
2054 schedstat_inc(p, se.nr_forced2_migrations);
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02002055#endif
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002056 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
Peter Zijlstrae5289d42009-06-19 13:22:51 +02002057 1, 1, NULL, 0);
Ingo Molnar6c594c22008-12-14 12:34:15 +01002058 }
Srivatsa Vaddagiri2830cf82007-10-15 17:00:12 +02002059 p->se.vruntime -= old_cfsrq->min_vruntime -
2060 new_cfsrq->min_vruntime;
Ingo Molnardd41f592007-07-09 18:51:59 +02002061
2062 __set_task_cpu(p, new_cpu);
Ingo Molnarc65cc872007-07-09 18:51:58 +02002063}
2064
Ingo Molnar70b97a72006-07-03 00:25:42 -07002065struct migration_req {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067
Ingo Molnar36c8b582006-07-03 00:25:41 -07002068 struct task_struct *task;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 int dest_cpu;
2070
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 struct completion done;
Ingo Molnar70b97a72006-07-03 00:25:42 -07002072};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073
2074/*
2075 * The task's runqueue lock must be held.
2076 * Returns true if you have to wait for migration thread.
2077 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002078static int
Ingo Molnar70b97a72006-07-03 00:25:42 -07002079migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080{
Ingo Molnar70b97a72006-07-03 00:25:42 -07002081 struct rq *rq = task_rq(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082
2083 /*
2084 * If the task is not on a runqueue (and not running), then
2085 * it is sufficient to simply update the task's cpu field.
2086 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002087 if (!p->se.on_rq && !task_running(rq, p)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 set_task_cpu(p, dest_cpu);
2089 return 0;
2090 }
2091
2092 init_completion(&req->done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 req->task = p;
2094 req->dest_cpu = dest_cpu;
2095 list_add(&req->list, &rq->migration_queue);
Ingo Molnar48f24c42006-07-03 00:25:40 -07002096
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 return 1;
2098}
2099
2100/*
Markus Metzgera26b89f2009-04-03 16:43:34 +02002101 * wait_task_context_switch - wait for a thread to complete at least one
2102 * context switch.
2103 *
2104 * @p must not be current.
2105 */
2106void wait_task_context_switch(struct task_struct *p)
2107{
2108 unsigned long nvcsw, nivcsw, flags;
2109 int running;
2110 struct rq *rq;
2111
2112 nvcsw = p->nvcsw;
2113 nivcsw = p->nivcsw;
2114 for (;;) {
2115 /*
2116 * The runqueue is assigned before the actual context
2117 * switch. We need to take the runqueue lock.
2118 *
2119 * We could check initially without the lock but it is
2120 * very likely that we need to take the lock in every
2121 * iteration.
2122 */
2123 rq = task_rq_lock(p, &flags);
2124 running = task_running(rq, p);
2125 task_rq_unlock(rq, &flags);
2126
2127 if (likely(!running))
2128 break;
2129 /*
2130 * The switch count is incremented before the actual
2131 * context switch. We thus wait for two switches to be
2132 * sure at least one completed.
2133 */
2134 if ((p->nvcsw - nvcsw) > 1)
2135 break;
2136 if ((p->nivcsw - nivcsw) > 1)
2137 break;
2138
2139 cpu_relax();
2140 }
2141}
2142
2143/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 * wait_task_inactive - wait for a thread to unschedule.
2145 *
Roland McGrath85ba2d82008-07-25 19:45:58 -07002146 * If @match_state is nonzero, it's the @p->state value just checked and
2147 * not expected to change. If it changes, i.e. @p might have woken up,
2148 * then return zero. When we succeed in waiting for @p to be off its CPU,
2149 * we return a positive number (its total switch count). If a second call
2150 * a short while later returns the same number, the caller can be sure that
2151 * @p has remained unscheduled the whole time.
2152 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 * The caller must ensure that the task *will* unschedule sometime soon,
2154 * else this function might spin for a *long* time. This function can't
2155 * be called with interrupts off, or it may introduce deadlock with
2156 * smp_call_function() if an IPI is sent by the same process we are
2157 * waiting to become inactive.
2158 */
Roland McGrath85ba2d82008-07-25 19:45:58 -07002159unsigned long wait_task_inactive(struct task_struct *p, long match_state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160{
2161 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02002162 int running, on_rq;
Roland McGrath85ba2d82008-07-25 19:45:58 -07002163 unsigned long ncsw;
Ingo Molnar70b97a72006-07-03 00:25:42 -07002164 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165
Andi Kleen3a5c3592007-10-15 17:00:14 +02002166 for (;;) {
2167 /*
2168 * We do the initial early heuristics without holding
2169 * any task-queue locks at all. We'll only try to get
2170 * the runqueue lock when things look like they will
2171 * work out!
2172 */
2173 rq = task_rq(p);
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002174
Andi Kleen3a5c3592007-10-15 17:00:14 +02002175 /*
2176 * If the task is actively running on another CPU
2177 * still, just relax and busy-wait without holding
2178 * any locks.
2179 *
2180 * NOTE! Since we don't hold any locks, it's not
2181 * even sure that "rq" stays as the right runqueue!
2182 * But we don't care, since "task_running()" will
2183 * return false if the runqueue has changed and p
2184 * is actually now running somewhere else!
2185 */
Roland McGrath85ba2d82008-07-25 19:45:58 -07002186 while (task_running(rq, p)) {
2187 if (match_state && unlikely(p->state != match_state))
2188 return 0;
Andi Kleen3a5c3592007-10-15 17:00:14 +02002189 cpu_relax();
Roland McGrath85ba2d82008-07-25 19:45:58 -07002190 }
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002191
Andi Kleen3a5c3592007-10-15 17:00:14 +02002192 /*
2193 * Ok, time to look more closely! We need the rq
2194 * lock now, to be *sure*. If we're wrong, we'll
2195 * just go back and repeat.
2196 */
2197 rq = task_rq_lock(p, &flags);
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -04002198 trace_sched_wait_task(rq, p);
Andi Kleen3a5c3592007-10-15 17:00:14 +02002199 running = task_running(rq, p);
2200 on_rq = p->se.on_rq;
Roland McGrath85ba2d82008-07-25 19:45:58 -07002201 ncsw = 0;
Oleg Nesterovf31e11d2008-08-20 16:54:44 -07002202 if (!match_state || p->state == match_state)
Oleg Nesterov93dcf552008-08-20 16:54:44 -07002203 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
Andi Kleen3a5c3592007-10-15 17:00:14 +02002204 task_rq_unlock(rq, &flags);
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002205
Andi Kleen3a5c3592007-10-15 17:00:14 +02002206 /*
Roland McGrath85ba2d82008-07-25 19:45:58 -07002207 * If it changed from the expected state, bail out now.
2208 */
2209 if (unlikely(!ncsw))
2210 break;
2211
2212 /*
Andi Kleen3a5c3592007-10-15 17:00:14 +02002213 * Was it really running after all now that we
2214 * checked with the proper locks actually held?
2215 *
2216 * Oops. Go back and try again..
2217 */
2218 if (unlikely(running)) {
2219 cpu_relax();
2220 continue;
2221 }
2222
2223 /*
2224 * It's not enough that it's not actively running,
2225 * it must be off the runqueue _entirely_, and not
2226 * preempted!
2227 *
Luis Henriques80dd99b2009-03-16 19:58:09 +00002228 * So if it was still runnable (but just not actively
Andi Kleen3a5c3592007-10-15 17:00:14 +02002229 * running right now), it's preempted, and we should
2230 * yield - it could be a while.
2231 */
2232 if (unlikely(on_rq)) {
2233 schedule_timeout_uninterruptible(1);
2234 continue;
2235 }
2236
2237 /*
2238 * Ahh, all good. It wasn't running, and it wasn't
2239 * runnable, which means that it will never become
2240 * running in the future either. We're all done!
2241 */
2242 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 }
Roland McGrath85ba2d82008-07-25 19:45:58 -07002244
2245 return ncsw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246}
2247
2248/***
2249 * kick_process - kick a running thread to enter/exit the kernel
2250 * @p: the to-be-kicked thread
2251 *
2252 * Cause a process which is running on another CPU to enter
2253 * kernel-mode, without any delay. (to get signals handled.)
2254 *
2255 * NOTE: this function doesnt have to take the runqueue lock,
2256 * because all it wants to ensure is that the remote task enters
2257 * the kernel. If the IPI races and the task has been migrated
2258 * to another CPU then no harm is done and the purpose has been
2259 * achieved as well.
2260 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002261void kick_process(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262{
2263 int cpu;
2264
2265 preempt_disable();
2266 cpu = task_cpu(p);
2267 if ((cpu != smp_processor_id()) && task_curr(p))
2268 smp_send_reschedule(cpu);
2269 preempt_enable();
2270}
Rusty Russellb43e3522009-06-12 22:27:00 -06002271EXPORT_SYMBOL_GPL(kick_process);
Nick Piggin476d1392005-06-25 14:57:29 -07002272#endif /* CONFIG_SMP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273
Thomas Gleixner0793a612008-12-04 20:12:29 +01002274/**
2275 * task_oncpu_function_call - call a function on the cpu on which a task runs
2276 * @p: the task to evaluate
2277 * @func: the function to be called
2278 * @info: the function call argument
2279 *
2280 * Calls the function @func when the task is currently running. This might
2281 * be on the current CPU, which just calls the function directly
2282 */
2283void task_oncpu_function_call(struct task_struct *p,
2284 void (*func) (void *info), void *info)
2285{
2286 int cpu;
2287
2288 preempt_disable();
2289 cpu = task_cpu(p);
2290 if (task_curr(p))
2291 smp_call_function_single(cpu, func, info, 1);
2292 preempt_enable();
2293}
2294
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295/***
2296 * try_to_wake_up - wake up a thread
2297 * @p: the to-be-woken-up thread
2298 * @state: the mask of task states that can be woken
2299 * @sync: do a synchronous wakeup?
2300 *
2301 * Put it on the run-queue if it's not already there. The "current"
2302 * thread is always on the run-queue (except when the actual
2303 * re-schedule is in progress), and as such you're allowed to do
2304 * the simpler "current->state = TASK_RUNNING" to mark yourself
2305 * runnable without the overhead of this.
2306 *
2307 * returns failure only if the task is already active.
2308 */
Peter Zijlstra7d478722009-09-14 19:55:44 +02002309static int try_to_wake_up(struct task_struct *p, unsigned int state,
2310 int wake_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311{
Ingo Molnarcc367732007-10-15 17:00:18 +02002312 int cpu, orig_cpu, this_cpu, success = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 unsigned long flags;
Mike Galbraithf5dc3752009-10-09 08:35:03 +02002314 struct rq *rq, *orig_rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315
Ingo Molnarb85d0662008-03-16 20:03:22 +01002316 if (!sched_feat(SYNC_WAKEUPS))
Peter Zijlstra7d478722009-09-14 19:55:44 +02002317 wake_flags &= ~WF_SYNC;
Ingo Molnarb85d0662008-03-16 20:03:22 +01002318
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002319 this_cpu = get_cpu();
Peter Zijlstra2398f2c2008-06-27 13:41:35 +02002320
Linus Torvalds04e2f172008-02-23 18:05:03 -08002321 smp_wmb();
Mike Galbraithf5dc3752009-10-09 08:35:03 +02002322 rq = orig_rq = task_rq_lock(p, &flags);
Mike Galbraith03e89e42008-12-16 08:45:30 +01002323 update_rq_clock(rq);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002324 if (!(p->state & state))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 goto out;
2326
Ingo Molnardd41f592007-07-09 18:51:59 +02002327 if (p->se.on_rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 goto out_running;
2329
2330 cpu = task_cpu(p);
Ingo Molnarcc367732007-10-15 17:00:18 +02002331 orig_cpu = cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332
2333#ifdef CONFIG_SMP
2334 if (unlikely(task_running(rq, p)))
2335 goto out_activate;
2336
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002337 /*
2338 * In order to handle concurrent wakeups and release the rq->lock
2339 * we put the task in TASK_WAKING state.
Ingo Molnareb240732009-09-16 21:09:13 +02002340 *
2341 * First fix up the nr_uninterruptible count:
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002342 */
Ingo Molnareb240732009-09-16 21:09:13 +02002343 if (task_contributes_to_load(p))
2344 rq->nr_uninterruptible--;
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002345 p->state = TASK_WAKING;
2346 task_rq_unlock(rq, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347
Peter Zijlstra7d478722009-09-14 19:55:44 +02002348 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002349 if (cpu != orig_cpu)
2350 set_task_cpu(p, cpu);
2351
2352 rq = task_rq_lock(p, &flags);
Mike Galbraithf5dc3752009-10-09 08:35:03 +02002353
2354 if (rq != orig_rq)
2355 update_rq_clock(rq);
2356
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002357 WARN_ON(p->state != TASK_WAKING);
2358 cpu = task_cpu(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359
Gregory Haskinse7693a32008-01-25 21:08:09 +01002360#ifdef CONFIG_SCHEDSTATS
2361 schedstat_inc(rq, ttwu_count);
2362 if (cpu == this_cpu)
2363 schedstat_inc(rq, ttwu_local);
2364 else {
2365 struct sched_domain *sd;
2366 for_each_domain(this_cpu, sd) {
Rusty Russell758b2cd2008-11-25 02:35:04 +10302367 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
Gregory Haskinse7693a32008-01-25 21:08:09 +01002368 schedstat_inc(sd, ttwu_wake_remote);
2369 break;
2370 }
2371 }
2372 }
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02002373#endif /* CONFIG_SCHEDSTATS */
Gregory Haskinse7693a32008-01-25 21:08:09 +01002374
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375out_activate:
2376#endif /* CONFIG_SMP */
Ingo Molnarcc367732007-10-15 17:00:18 +02002377 schedstat_inc(p, se.nr_wakeups);
Peter Zijlstra7d478722009-09-14 19:55:44 +02002378 if (wake_flags & WF_SYNC)
Ingo Molnarcc367732007-10-15 17:00:18 +02002379 schedstat_inc(p, se.nr_wakeups_sync);
2380 if (orig_cpu != cpu)
2381 schedstat_inc(p, se.nr_wakeups_migrate);
2382 if (cpu == this_cpu)
2383 schedstat_inc(p, se.nr_wakeups_local);
2384 else
2385 schedstat_inc(p, se.nr_wakeups_remote);
Ingo Molnardd41f592007-07-09 18:51:59 +02002386 activate_task(rq, p, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387 success = 1;
2388
Peter Zijlstra831451a2009-01-14 12:39:18 +01002389 /*
2390 * Only attribute actual wakeups done by this task.
2391 */
2392 if (!in_interrupt()) {
2393 struct sched_entity *se = &current->se;
2394 u64 sample = se->sum_exec_runtime;
2395
2396 if (se->last_wakeup)
2397 sample -= se->last_wakeup;
2398 else
2399 sample -= se->start_runtime;
2400 update_avg(&se->avg_wakeup, sample);
2401
2402 se->last_wakeup = se->sum_exec_runtime;
2403 }
2404
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405out_running:
Peter Zijlstra468a15b2008-12-16 08:07:03 +01002406 trace_sched_wakeup(rq, p, success);
Peter Zijlstra7d478722009-09-14 19:55:44 +02002407 check_preempt_curr(rq, p, wake_flags);
Ingo Molnar4ae7d5c2008-03-19 01:42:00 +01002408
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 p->state = TASK_RUNNING;
Steven Rostedt9a897c52008-01-25 21:08:22 +01002410#ifdef CONFIG_SMP
2411 if (p->sched_class->task_wake_up)
2412 p->sched_class->task_wake_up(rq, p);
Mike Galbraitheae0c9d2009-11-10 03:50:02 +01002413
2414 if (unlikely(rq->idle_stamp)) {
2415 u64 delta = rq->clock - rq->idle_stamp;
2416 u64 max = 2*sysctl_sched_migration_cost;
2417
2418 if (delta > max)
2419 rq->avg_idle = max;
2420 else
2421 update_avg(&rq->avg_idle, delta);
2422 rq->idle_stamp = 0;
2423 }
Steven Rostedt9a897c52008-01-25 21:08:22 +01002424#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425out:
2426 task_rq_unlock(rq, &flags);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002427 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428
2429 return success;
2430}
2431
David Howells50fa6102009-04-28 15:01:38 +01002432/**
2433 * wake_up_process - Wake up a specific process
2434 * @p: The process to be woken up.
2435 *
2436 * Attempt to wake up the nominated process and move it to the set of runnable
2437 * processes. Returns 1 if the process was woken up, 0 if it was already
2438 * running.
2439 *
2440 * It may be assumed that this function implies a write memory barrier before
2441 * changing the task state if and only if any tasks are woken up.
2442 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002443int wake_up_process(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444{
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05002445 return try_to_wake_up(p, TASK_ALL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447EXPORT_SYMBOL(wake_up_process);
2448
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002449int wake_up_state(struct task_struct *p, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450{
2451 return try_to_wake_up(p, state, 0);
2452}
2453
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454/*
2455 * Perform scheduler related setup for a newly forked process p.
2456 * p is forked by current.
Ingo Molnardd41f592007-07-09 18:51:59 +02002457 *
2458 * __sched_fork() is basic setup used by init_idle() too:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002460static void __sched_fork(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461{
Ingo Molnardd41f592007-07-09 18:51:59 +02002462 p->se.exec_start = 0;
2463 p->se.sum_exec_runtime = 0;
Ingo Molnarf6cf8912007-08-28 12:53:24 +02002464 p->se.prev_sum_exec_runtime = 0;
Ingo Molnar6c594c22008-12-14 12:34:15 +01002465 p->se.nr_migrations = 0;
Ingo Molnar4ae7d5c2008-03-19 01:42:00 +01002466 p->se.last_wakeup = 0;
2467 p->se.avg_overlap = 0;
Peter Zijlstra831451a2009-01-14 12:39:18 +01002468 p->se.start_runtime = 0;
2469 p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
Peter Zijlstraad4b78b2009-09-16 12:31:31 +02002470 p->se.avg_running = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02002471
2472#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi77935272009-07-09 13:57:20 +02002473 p->se.wait_start = 0;
2474 p->se.wait_max = 0;
2475 p->se.wait_count = 0;
2476 p->se.wait_sum = 0;
2477
2478 p->se.sleep_start = 0;
2479 p->se.sleep_max = 0;
2480 p->se.sum_sleep_runtime = 0;
2481
2482 p->se.block_start = 0;
2483 p->se.block_max = 0;
2484 p->se.exec_max = 0;
2485 p->se.slice_max = 0;
2486
2487 p->se.nr_migrations_cold = 0;
2488 p->se.nr_failed_migrations_affine = 0;
2489 p->se.nr_failed_migrations_running = 0;
2490 p->se.nr_failed_migrations_hot = 0;
2491 p->se.nr_forced_migrations = 0;
2492 p->se.nr_forced2_migrations = 0;
2493
2494 p->se.nr_wakeups = 0;
2495 p->se.nr_wakeups_sync = 0;
2496 p->se.nr_wakeups_migrate = 0;
2497 p->se.nr_wakeups_local = 0;
2498 p->se.nr_wakeups_remote = 0;
2499 p->se.nr_wakeups_affine = 0;
2500 p->se.nr_wakeups_affine_attempts = 0;
2501 p->se.nr_wakeups_passive = 0;
2502 p->se.nr_wakeups_idle = 0;
2503
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02002504#endif
Nick Piggin476d1392005-06-25 14:57:29 -07002505
Peter Zijlstrafa717062008-01-25 21:08:27 +01002506 INIT_LIST_HEAD(&p->rt.run_list);
Ingo Molnardd41f592007-07-09 18:51:59 +02002507 p->se.on_rq = 0;
Peter Zijlstra4a55bd52008-04-19 19:45:00 +02002508 INIT_LIST_HEAD(&p->se.group_node);
Nick Piggin476d1392005-06-25 14:57:29 -07002509
Avi Kivitye107be32007-07-26 13:40:43 +02002510#ifdef CONFIG_PREEMPT_NOTIFIERS
2511 INIT_HLIST_HEAD(&p->preempt_notifiers);
2512#endif
2513
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514 /*
2515 * We mark the process as running here, but have not actually
2516 * inserted it onto the runqueue yet. This guarantees that
2517 * nobody will actually run it, and a signal or other external
2518 * event cannot wake it up and insert it on the runqueue either.
2519 */
2520 p->state = TASK_RUNNING;
Ingo Molnardd41f592007-07-09 18:51:59 +02002521}
2522
2523/*
2524 * fork()/clone()-time setup:
2525 */
2526void sched_fork(struct task_struct *p, int clone_flags)
2527{
2528 int cpu = get_cpu();
2529
2530 __sched_fork(p);
2531
Ingo Molnarb29739f2006-06-27 02:54:51 -07002532 /*
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002533 * Revert to default priority/policy on fork if requested.
2534 */
2535 if (unlikely(p->sched_reset_on_fork)) {
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002536 if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002537 p->policy = SCHED_NORMAL;
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002538 p->normal_prio = p->static_prio;
2539 }
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002540
Mike Galbraith6c697bd2009-06-17 10:48:02 +02002541 if (PRIO_TO_NICE(p->static_prio) < 0) {
2542 p->static_prio = NICE_TO_PRIO(0);
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002543 p->normal_prio = p->static_prio;
Mike Galbraith6c697bd2009-06-17 10:48:02 +02002544 set_load_weight(p);
2545 }
2546
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002547 /*
2548 * We don't need the reset flag anymore after the fork. It has
2549 * fulfilled its duty:
2550 */
2551 p->sched_reset_on_fork = 0;
2552 }
Lennart Poetteringca94c442009-06-15 17:17:47 +02002553
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002554 /*
2555 * Make sure we do not leak PI boosting priority to the child.
2556 */
2557 p->prio = current->normal_prio;
2558
Hiroshi Shimamoto2ddbf952007-10-15 17:00:11 +02002559 if (!rt_prio(p->prio))
2560 p->sched_class = &fair_sched_class;
Ingo Molnarb29739f2006-06-27 02:54:51 -07002561
Peter Zijlstra5f3edc12009-09-10 13:42:00 +02002562#ifdef CONFIG_SMP
2563 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
2564#endif
2565 set_task_cpu(p, cpu);
2566
Chandra Seetharaman52f17b62006-07-14 00:24:38 -07002567#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
Ingo Molnardd41f592007-07-09 18:51:59 +02002568 if (likely(sched_info_on()))
Chandra Seetharaman52f17b62006-07-14 00:24:38 -07002569 memset(&p->sched_info, 0, sizeof(p->sched_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570#endif
Chen, Kenneth Wd6077cb2006-02-14 13:53:10 -08002571#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
Nick Piggin4866cde2005-06-25 14:57:23 -07002572 p->oncpu = 0;
2573#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574#ifdef CONFIG_PREEMPT
Nick Piggin4866cde2005-06-25 14:57:23 -07002575 /* Want to start with kernel preemption disabled. */
Al Viroa1261f52005-11-13 16:06:55 -08002576 task_thread_info(p)->preempt_count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577#endif
Gregory Haskins917b6272008-12-29 09:39:53 -05002578 plist_node_init(&p->pushable_tasks, MAX_PRIO);
2579
Nick Piggin476d1392005-06-25 14:57:29 -07002580 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581}
2582
2583/*
2584 * wake_up_new_task - wake up a newly created task for the first time.
2585 *
2586 * This function will do some initial scheduler statistics housekeeping
2587 * that must be done for every newly created context, then puts the task
2588 * on the runqueue and wakes it.
2589 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002590void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591{
2592 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02002593 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594
2595 rq = task_rq_lock(p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596 BUG_ON(p->state != TASK_RUNNING);
Ingo Molnara8e504d2007-08-09 11:16:47 +02002597 update_rq_clock(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598
Srivatsa Vaddagirib9dca1e2007-10-17 16:55:11 +02002599 if (!p->sched_class->task_new || !current->se.on_rq) {
Ingo Molnardd41f592007-07-09 18:51:59 +02002600 activate_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602 /*
Ingo Molnardd41f592007-07-09 18:51:59 +02002603 * Let the scheduling class do new task startup
2604 * management (if any):
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605 */
Ingo Molnaree0827d2007-08-09 11:16:49 +02002606 p->sched_class->task_new(rq, p);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02002607 inc_nr_running(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608 }
Ingo Molnarc71dd422008-12-19 01:09:51 +01002609 trace_sched_wakeup_new(rq, p, 1);
Peter Zijlstraa7558e02009-09-14 20:02:34 +02002610 check_preempt_curr(rq, p, WF_FORK);
Steven Rostedt9a897c52008-01-25 21:08:22 +01002611#ifdef CONFIG_SMP
2612 if (p->sched_class->task_wake_up)
2613 p->sched_class->task_wake_up(rq, p);
2614#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02002615 task_rq_unlock(rq, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616}
2617
Avi Kivitye107be32007-07-26 13:40:43 +02002618#ifdef CONFIG_PREEMPT_NOTIFIERS
2619
2620/**
Luis Henriques80dd99b2009-03-16 19:58:09 +00002621 * preempt_notifier_register - tell me when current is being preempted & rescheduled
Randy Dunlap421cee22007-07-31 00:37:50 -07002622 * @notifier: notifier struct to register
Avi Kivitye107be32007-07-26 13:40:43 +02002623 */
2624void preempt_notifier_register(struct preempt_notifier *notifier)
2625{
2626 hlist_add_head(&notifier->link, &current->preempt_notifiers);
2627}
2628EXPORT_SYMBOL_GPL(preempt_notifier_register);
2629
2630/**
2631 * preempt_notifier_unregister - no longer interested in preemption notifications
Randy Dunlap421cee22007-07-31 00:37:50 -07002632 * @notifier: notifier struct to unregister
Avi Kivitye107be32007-07-26 13:40:43 +02002633 *
2634 * This is safe to call from within a preemption notifier.
2635 */
2636void preempt_notifier_unregister(struct preempt_notifier *notifier)
2637{
2638 hlist_del(&notifier->link);
2639}
2640EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2641
2642static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2643{
2644 struct preempt_notifier *notifier;
2645 struct hlist_node *node;
2646
2647 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2648 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2649}
2650
2651static void
2652fire_sched_out_preempt_notifiers(struct task_struct *curr,
2653 struct task_struct *next)
2654{
2655 struct preempt_notifier *notifier;
2656 struct hlist_node *node;
2657
2658 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2659 notifier->ops->sched_out(notifier, next);
2660}
2661
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02002662#else /* !CONFIG_PREEMPT_NOTIFIERS */
Avi Kivitye107be32007-07-26 13:40:43 +02002663
2664static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2665{
2666}
2667
2668static void
2669fire_sched_out_preempt_notifiers(struct task_struct *curr,
2670 struct task_struct *next)
2671{
2672}
2673
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02002674#endif /* CONFIG_PREEMPT_NOTIFIERS */
Avi Kivitye107be32007-07-26 13:40:43 +02002675
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676/**
Nick Piggin4866cde2005-06-25 14:57:23 -07002677 * prepare_task_switch - prepare to switch tasks
2678 * @rq: the runqueue preparing to switch
Randy Dunlap421cee22007-07-31 00:37:50 -07002679 * @prev: the current task that is being switched out
Nick Piggin4866cde2005-06-25 14:57:23 -07002680 * @next: the task we are going to switch to.
2681 *
2682 * This is called with the rq lock held and interrupts off. It must
2683 * be paired with a subsequent finish_task_switch after the context
2684 * switch.
2685 *
2686 * prepare_task_switch sets up locking and calls architecture specific
2687 * hooks.
2688 */
Avi Kivitye107be32007-07-26 13:40:43 +02002689static inline void
2690prepare_task_switch(struct rq *rq, struct task_struct *prev,
2691 struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -07002692{
Avi Kivitye107be32007-07-26 13:40:43 +02002693 fire_sched_out_preempt_notifiers(prev, next);
Nick Piggin4866cde2005-06-25 14:57:23 -07002694 prepare_lock_switch(rq, next);
2695 prepare_arch_switch(next);
2696}
2697
2698/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699 * finish_task_switch - clean up after a task-switch
Jeff Garzik344baba2005-09-07 01:15:17 -04002700 * @rq: runqueue associated with task-switch
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701 * @prev: the thread we just switched away from.
2702 *
Nick Piggin4866cde2005-06-25 14:57:23 -07002703 * finish_task_switch must be called after the context switch, paired
2704 * with a prepare_task_switch call before the context switch.
2705 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2706 * and do any other architecture-specific cleanup actions.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707 *
2708 * Note that we may have delayed dropping an mm in context_switch(). If
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01002709 * so, we finish that here outside of the runqueue lock. (Doing it
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 * with the lock held can cause deadlocks; see schedule() for
2711 * details.)
2712 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02002713static void finish_task_switch(struct rq *rq, struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714 __releases(rq->lock)
2715{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716 struct mm_struct *mm = rq->prev_mm;
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002717 long prev_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718
2719 rq->prev_mm = NULL;
2720
2721 /*
2722 * A task struct has one reference for the use as "current".
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002723 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002724 * schedule one last time. The schedule call will never return, and
2725 * the scheduled task must drop that reference.
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002726 * The test for TASK_DEAD must occur while the runqueue locks are
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727 * still held, otherwise prev could be scheduled on another cpu, die
2728 * there before we look at prev->state, and then the reference would
2729 * be dropped twice.
2730 * Manfred Spraul <manfred@colorfullife.com>
2731 */
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002732 prev_state = prev->state;
Nick Piggin4866cde2005-06-25 14:57:23 -07002733 finish_arch_switch(prev);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002734 perf_event_task_sched_in(current, cpu_of(rq));
Nick Piggin4866cde2005-06-25 14:57:23 -07002735 finish_lock_switch(rq, prev);
Steven Rostedte8fa1362008-01-25 21:08:05 +01002736
Avi Kivitye107be32007-07-26 13:40:43 +02002737 fire_sched_in_preempt_notifiers(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 if (mm)
2739 mmdrop(mm);
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002740 if (unlikely(prev_state == TASK_DEAD)) {
bibo maoc6fd91f2006-03-26 01:38:20 -08002741 /*
2742 * Remove function-return probe instances associated with this
2743 * task and put them back on the free list.
Ingo Molnar9761eea2007-07-09 18:52:00 +02002744 */
bibo maoc6fd91f2006-03-26 01:38:20 -08002745 kprobe_flush_task(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 put_task_struct(prev);
bibo maoc6fd91f2006-03-26 01:38:20 -08002747 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748}
2749
Gregory Haskins3f029d32009-07-29 11:08:47 -04002750#ifdef CONFIG_SMP
2751
2752/* assumes rq->lock is held */
2753static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
2754{
2755 if (prev->sched_class->pre_schedule)
2756 prev->sched_class->pre_schedule(rq, prev);
2757}
2758
2759/* rq->lock is NOT held, but preemption is disabled */
2760static inline void post_schedule(struct rq *rq)
2761{
2762 if (rq->post_schedule) {
2763 unsigned long flags;
2764
2765 spin_lock_irqsave(&rq->lock, flags);
2766 if (rq->curr->sched_class->post_schedule)
2767 rq->curr->sched_class->post_schedule(rq);
2768 spin_unlock_irqrestore(&rq->lock, flags);
2769
2770 rq->post_schedule = 0;
2771 }
2772}
2773
2774#else
2775
2776static inline void pre_schedule(struct rq *rq, struct task_struct *p)
2777{
2778}
2779
2780static inline void post_schedule(struct rq *rq)
2781{
2782}
2783
2784#endif
2785
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786/**
2787 * schedule_tail - first thing a freshly forked thread must call.
2788 * @prev: the thread we just switched away from.
2789 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002790asmlinkage void schedule_tail(struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 __releases(rq->lock)
2792{
Ingo Molnar70b97a72006-07-03 00:25:42 -07002793 struct rq *rq = this_rq();
2794
Nick Piggin4866cde2005-06-25 14:57:23 -07002795 finish_task_switch(rq, prev);
Steven Rostedtda19ab52009-07-29 00:21:22 -04002796
Gregory Haskins3f029d32009-07-29 11:08:47 -04002797 /*
2798 * FIXME: do we need to worry about rq being invalidated by the
2799 * task_switch?
2800 */
2801 post_schedule(rq);
Steven Rostedtda19ab52009-07-29 00:21:22 -04002802
Nick Piggin4866cde2005-06-25 14:57:23 -07002803#ifdef __ARCH_WANT_UNLOCKED_CTXSW
2804 /* In this case, finish_task_switch does not reenable preemption */
2805 preempt_enable();
2806#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807 if (current->set_child_tid)
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002808 put_user(task_pid_vnr(current), current->set_child_tid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809}
2810
2811/*
2812 * context_switch - switch to the new MM and the new
2813 * thread's register state.
2814 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002815static inline void
Ingo Molnar70b97a72006-07-03 00:25:42 -07002816context_switch(struct rq *rq, struct task_struct *prev,
Ingo Molnar36c8b582006-07-03 00:25:41 -07002817 struct task_struct *next)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818{
Ingo Molnardd41f592007-07-09 18:51:59 +02002819 struct mm_struct *mm, *oldmm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820
Avi Kivitye107be32007-07-26 13:40:43 +02002821 prepare_task_switch(rq, prev, next);
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -04002822 trace_sched_switch(rq, prev, next);
Ingo Molnardd41f592007-07-09 18:51:59 +02002823 mm = next->mm;
2824 oldmm = prev->active_mm;
Zachary Amsden9226d122007-02-13 13:26:21 +01002825 /*
2826 * For paravirt, this is coupled with an exit in switch_to to
2827 * combine the page table reload and the switch backend into
2828 * one hypercall.
2829 */
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -08002830 arch_start_context_switch(prev);
Zachary Amsden9226d122007-02-13 13:26:21 +01002831
Ingo Molnardd41f592007-07-09 18:51:59 +02002832 if (unlikely(!mm)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 next->active_mm = oldmm;
2834 atomic_inc(&oldmm->mm_count);
2835 enter_lazy_tlb(oldmm, next);
2836 } else
2837 switch_mm(oldmm, mm, next);
2838
Ingo Molnardd41f592007-07-09 18:51:59 +02002839 if (unlikely(!prev->mm)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840 prev->active_mm = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841 rq->prev_mm = oldmm;
2842 }
Ingo Molnar3a5f5e42006-07-14 00:24:27 -07002843 /*
2844 * Since the runqueue lock will be released by the next
2845 * task (which is an invalid locking op but in the case
2846 * of the scheduler it's an obvious special-case), so we
2847 * do an early lockdep release here:
2848 */
2849#ifndef __ARCH_WANT_UNLOCKED_CTXSW
Ingo Molnar8a25d5d2006-07-03 00:24:54 -07002850 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
Ingo Molnar3a5f5e42006-07-14 00:24:27 -07002851#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852
2853 /* Here we just switch the register state and the stack. */
2854 switch_to(prev, next, prev);
2855
Ingo Molnardd41f592007-07-09 18:51:59 +02002856 barrier();
2857 /*
2858 * this_rq must be evaluated again because prev may have moved
2859 * CPUs since it called schedule(), thus the 'rq' on its stack
2860 * frame will be invalid.
2861 */
2862 finish_task_switch(this_rq(), prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863}
2864
2865/*
2866 * nr_running, nr_uninterruptible and nr_context_switches:
2867 *
2868 * externally visible scheduler statistics: current number of runnable
2869 * threads, current number of uninterruptible-sleeping threads, total
2870 * number of context switches performed since bootup.
2871 */
2872unsigned long nr_running(void)
2873{
2874 unsigned long i, sum = 0;
2875
2876 for_each_online_cpu(i)
2877 sum += cpu_rq(i)->nr_running;
2878
2879 return sum;
2880}
2881
2882unsigned long nr_uninterruptible(void)
2883{
2884 unsigned long i, sum = 0;
2885
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08002886 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887 sum += cpu_rq(i)->nr_uninterruptible;
2888
2889 /*
2890 * Since we read the counters lockless, it might be slightly
2891 * inaccurate. Do not allow it to go below zero though:
2892 */
2893 if (unlikely((long)sum < 0))
2894 sum = 0;
2895
2896 return sum;
2897}
2898
2899unsigned long long nr_context_switches(void)
2900{
Steven Rostedtcc94abf2006-06-27 02:54:31 -07002901 int i;
2902 unsigned long long sum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08002904 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905 sum += cpu_rq(i)->nr_switches;
2906
2907 return sum;
2908}
2909
2910unsigned long nr_iowait(void)
2911{
2912 unsigned long i, sum = 0;
2913
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08002914 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915 sum += atomic_read(&cpu_rq(i)->nr_iowait);
2916
2917 return sum;
2918}
2919
Arjan van de Ven69d25872009-09-21 17:04:08 -07002920unsigned long nr_iowait_cpu(void)
2921{
2922 struct rq *this = this_rq();
2923 return atomic_read(&this->nr_iowait);
2924}
2925
2926unsigned long this_cpu_load(void)
2927{
2928 struct rq *this = this_rq();
2929 return this->cpu_load[0];
2930}
2931
2932
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02002933/* Variables and functions for calc_load */
2934static atomic_long_t calc_load_tasks;
2935static unsigned long calc_load_update;
2936unsigned long avenrun[3];
2937EXPORT_SYMBOL(avenrun);
2938
Thomas Gleixner2d024942009-05-02 20:08:52 +02002939/**
2940 * get_avenrun - get the load average array
2941 * @loads: pointer to dest load array
2942 * @offset: offset to add
2943 * @shift: shift count to shift the result left
2944 *
2945 * These values are estimates at best, so no need for locking.
2946 */
2947void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
2948{
2949 loads[0] = (avenrun[0] + offset) << shift;
2950 loads[1] = (avenrun[1] + offset) << shift;
2951 loads[2] = (avenrun[2] + offset) << shift;
2952}
2953
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02002954static unsigned long
2955calc_load(unsigned long load, unsigned long exp, unsigned long active)
Jack Steinerdb1b1fe2006-03-31 02:31:21 -08002956{
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02002957 load *= exp;
2958 load += active * (FIXED_1 - exp);
2959 return load >> FSHIFT;
2960}
Jack Steinerdb1b1fe2006-03-31 02:31:21 -08002961
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02002962/*
2963 * calc_load - update the avenrun load estimates 10 ticks after the
2964 * CPUs have updated calc_load_tasks.
2965 */
2966void calc_global_load(void)
2967{
2968 unsigned long upd = calc_load_update + 10;
2969 long active;
2970
2971 if (time_before(jiffies, upd))
2972 return;
2973
2974 active = atomic_long_read(&calc_load_tasks);
2975 active = active > 0 ? active * FIXED_1 : 0;
2976
2977 avenrun[0] = calc_load(avenrun[0], EXP_1, active);
2978 avenrun[1] = calc_load(avenrun[1], EXP_5, active);
2979 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
2980
2981 calc_load_update += LOAD_FREQ;
2982}
2983
2984/*
2985 * Either called from update_cpu_load() or from a cpu going idle
2986 */
2987static void calc_load_account_active(struct rq *this_rq)
2988{
2989 long nr_active, delta;
2990
2991 nr_active = this_rq->nr_running;
2992 nr_active += (long) this_rq->nr_uninterruptible;
2993
2994 if (nr_active != this_rq->calc_load_active) {
2995 delta = nr_active - this_rq->calc_load_active;
2996 this_rq->calc_load_active = nr_active;
2997 atomic_long_add(delta, &calc_load_tasks);
Jack Steinerdb1b1fe2006-03-31 02:31:21 -08002998 }
Jack Steinerdb1b1fe2006-03-31 02:31:21 -08002999}
3000
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001/*
Ingo Molnardd41f592007-07-09 18:51:59 +02003002 * Update rq->cpu_load[] statistics. This function is usually called every
3003 * scheduler tick (TICK_NSEC).
Ingo Molnar48f24c42006-07-03 00:25:40 -07003004 */
Ingo Molnardd41f592007-07-09 18:51:59 +02003005static void update_cpu_load(struct rq *this_rq)
Ingo Molnar48f24c42006-07-03 00:25:40 -07003006{
Dmitry Adamushko495eca42007-10-15 17:00:06 +02003007 unsigned long this_load = this_rq->load.weight;
Ingo Molnardd41f592007-07-09 18:51:59 +02003008 int i, scale;
3009
3010 this_rq->nr_load_updates++;
Ingo Molnardd41f592007-07-09 18:51:59 +02003011
3012 /* Update our load: */
3013 for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
3014 unsigned long old_load, new_load;
3015
3016 /* scale is effectively 1 << i now, and >> i divides by scale */
3017
3018 old_load = this_rq->cpu_load[i];
3019 new_load = this_load;
Ingo Molnara25707f2007-10-15 17:00:03 +02003020 /*
3021 * Round up the averaging division if load is increasing. This
3022 * prevents us from getting stuck on 9 if the load is 10, for
3023 * example.
3024 */
3025 if (new_load > old_load)
3026 new_load += scale-1;
Ingo Molnardd41f592007-07-09 18:51:59 +02003027 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
3028 }
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003029
3030 if (time_after_eq(jiffies, this_rq->calc_load_update)) {
3031 this_rq->calc_load_update += LOAD_FREQ;
3032 calc_load_account_active(this_rq);
3033 }
Ingo Molnar48f24c42006-07-03 00:25:40 -07003034}
3035
Ingo Molnardd41f592007-07-09 18:51:59 +02003036#ifdef CONFIG_SMP
3037
Ingo Molnar48f24c42006-07-03 00:25:40 -07003038/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039 * double_rq_lock - safely lock two runqueues
3040 *
3041 * Note this does not disable interrupts like task_rq_lock,
3042 * you need to do so manually before calling.
3043 */
Ingo Molnar70b97a72006-07-03 00:25:42 -07003044static void double_rq_lock(struct rq *rq1, struct rq *rq2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 __acquires(rq1->lock)
3046 __acquires(rq2->lock)
3047{
Kirill Korotaev054b9102006-12-10 02:20:11 -08003048 BUG_ON(!irqs_disabled());
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049 if (rq1 == rq2) {
3050 spin_lock(&rq1->lock);
3051 __acquire(rq2->lock); /* Fake it out ;) */
3052 } else {
Chen, Kenneth Wc96d1452006-06-27 02:54:28 -07003053 if (rq1 < rq2) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054 spin_lock(&rq1->lock);
Peter Zijlstra5e710e32008-07-30 13:26:57 +02003055 spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056 } else {
3057 spin_lock(&rq2->lock);
Peter Zijlstra5e710e32008-07-30 13:26:57 +02003058 spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059 }
3060 }
Ingo Molnar6e82a3b2007-08-09 11:16:51 +02003061 update_rq_clock(rq1);
3062 update_rq_clock(rq2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063}
3064
3065/*
3066 * double_rq_unlock - safely unlock two runqueues
3067 *
3068 * Note this does not restore interrupts like task_rq_unlock,
3069 * you need to do so manually after calling.
3070 */
Ingo Molnar70b97a72006-07-03 00:25:42 -07003071static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003072 __releases(rq1->lock)
3073 __releases(rq2->lock)
3074{
3075 spin_unlock(&rq1->lock);
3076 if (rq1 != rq2)
3077 spin_unlock(&rq2->lock);
3078 else
3079 __release(rq2->lock);
3080}
3081
3082/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083 * If dest_cpu is allowed for this process, migrate the task to it.
3084 * This is accomplished by forcing the cpu_allowed mask to only
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01003085 * allow dest_cpu, which will force the cpu onto dest_cpu. Then
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086 * the cpu_allowed mask is restored.
3087 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07003088static void sched_migrate_task(struct task_struct *p, int dest_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003089{
Ingo Molnar70b97a72006-07-03 00:25:42 -07003090 struct migration_req req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003091 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07003092 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003093
3094 rq = task_rq_lock(p, &flags);
Rusty Russell96f874e2008-11-25 02:35:14 +10303095 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
Max Krasnyanskye761b772008-07-15 04:43:49 -07003096 || unlikely(!cpu_active(dest_cpu)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003097 goto out;
3098
3099 /* force the process onto the specified CPU */
3100 if (migrate_task(p, dest_cpu, &req)) {
3101 /* Need to wait for migration thread (might exit: take ref). */
3102 struct task_struct *mt = rq->migration_thread;
Ingo Molnar36c8b582006-07-03 00:25:41 -07003103
Linus Torvalds1da177e2005-04-16 15:20:36 -07003104 get_task_struct(mt);
3105 task_rq_unlock(rq, &flags);
3106 wake_up_process(mt);
3107 put_task_struct(mt);
3108 wait_for_completion(&req.done);
Ingo Molnar36c8b582006-07-03 00:25:41 -07003109
Linus Torvalds1da177e2005-04-16 15:20:36 -07003110 return;
3111 }
3112out:
3113 task_rq_unlock(rq, &flags);
3114}
3115
3116/*
Nick Piggin476d1392005-06-25 14:57:29 -07003117 * sched_exec - execve() is a valuable balancing opportunity, because at
3118 * this point the task has the smallest effective memory and cache footprint.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119 */
3120void sched_exec(void)
3121{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003122 int new_cpu, this_cpu = get_cpu();
Peter Zijlstra5f3edc12009-09-10 13:42:00 +02003123 new_cpu = current->sched_class->select_task_rq(current, SD_BALANCE_EXEC, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124 put_cpu();
Nick Piggin476d1392005-06-25 14:57:29 -07003125 if (new_cpu != this_cpu)
3126 sched_migrate_task(current, new_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003127}
3128
3129/*
3130 * pull_task - move a task from a remote runqueue to the local runqueue.
3131 * Both runqueues must be locked.
3132 */
Ingo Molnardd41f592007-07-09 18:51:59 +02003133static void pull_task(struct rq *src_rq, struct task_struct *p,
3134 struct rq *this_rq, int this_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135{
Ingo Molnar2e1cb742007-08-09 11:16:49 +02003136 deactivate_task(src_rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137 set_task_cpu(p, this_cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02003138 activate_task(this_rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139 /*
3140 * Note that idle threads have a prio of MAX_PRIO, for this test
3141 * to be always true for them.
3142 */
Peter Zijlstra15afe092008-09-20 23:38:02 +02003143 check_preempt_curr(this_rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144}
3145
3146/*
3147 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3148 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08003149static
Ingo Molnar70b97a72006-07-03 00:25:42 -07003150int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
Ingo Molnard15bcfd2007-07-09 18:51:57 +02003151 struct sched_domain *sd, enum cpu_idle_type idle,
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07003152 int *all_pinned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153{
Luis Henriques708dc512009-03-16 19:59:02 +00003154 int tsk_cache_hot = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155 /*
3156 * We do not migrate tasks that are:
3157 * 1) running (obviously), or
3158 * 2) cannot be migrated to this CPU due to cpus_allowed, or
3159 * 3) are cache-hot on their current CPU.
3160 */
Rusty Russell96f874e2008-11-25 02:35:14 +10303161 if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
Ingo Molnarcc367732007-10-15 17:00:18 +02003162 schedstat_inc(p, se.nr_failed_migrations_affine);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163 return 0;
Ingo Molnarcc367732007-10-15 17:00:18 +02003164 }
Nick Piggin81026792005-06-25 14:57:07 -07003165 *all_pinned = 0;
3166
Ingo Molnarcc367732007-10-15 17:00:18 +02003167 if (task_running(rq, p)) {
3168 schedstat_inc(p, se.nr_failed_migrations_running);
Nick Piggin81026792005-06-25 14:57:07 -07003169 return 0;
Ingo Molnarcc367732007-10-15 17:00:18 +02003170 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171
Ingo Molnarda84d962007-10-15 17:00:18 +02003172 /*
3173 * Aggressive migration if:
3174 * 1) task is cache cold, or
3175 * 2) too many balance attempts have failed.
3176 */
3177
Luis Henriques708dc512009-03-16 19:59:02 +00003178 tsk_cache_hot = task_hot(p, rq->clock, sd);
3179 if (!tsk_cache_hot ||
3180 sd->nr_balance_failed > sd->cache_nice_tries) {
Ingo Molnarda84d962007-10-15 17:00:18 +02003181#ifdef CONFIG_SCHEDSTATS
Luis Henriques708dc512009-03-16 19:59:02 +00003182 if (tsk_cache_hot) {
Ingo Molnarda84d962007-10-15 17:00:18 +02003183 schedstat_inc(sd, lb_hot_gained[idle]);
Ingo Molnarcc367732007-10-15 17:00:18 +02003184 schedstat_inc(p, se.nr_forced_migrations);
3185 }
Ingo Molnarda84d962007-10-15 17:00:18 +02003186#endif
3187 return 1;
3188 }
3189
Luis Henriques708dc512009-03-16 19:59:02 +00003190 if (tsk_cache_hot) {
Ingo Molnarcc367732007-10-15 17:00:18 +02003191 schedstat_inc(p, se.nr_failed_migrations_hot);
Ingo Molnarda84d962007-10-15 17:00:18 +02003192 return 0;
Ingo Molnarcc367732007-10-15 17:00:18 +02003193 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194 return 1;
3195}
3196
Peter Williamse1d14842007-10-24 18:23:51 +02003197static unsigned long
3198balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3199 unsigned long max_load_move, struct sched_domain *sd,
3200 enum cpu_idle_type idle, int *all_pinned,
3201 int *this_best_prio, struct rq_iterator *iterator)
Ingo Molnardd41f592007-07-09 18:51:59 +02003202{
Peter Zijlstra051c6762008-06-27 13:41:31 +02003203 int loops = 0, pulled = 0, pinned = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02003204 struct task_struct *p;
3205 long rem_load_move = max_load_move;
3206
Peter Williamse1d14842007-10-24 18:23:51 +02003207 if (max_load_move == 0)
Ingo Molnardd41f592007-07-09 18:51:59 +02003208 goto out;
3209
3210 pinned = 1;
3211
3212 /*
3213 * Start the load-balancing iterator:
3214 */
3215 p = iterator->start(iterator->arg);
3216next:
Peter Zijlstrab82d9fd2007-11-09 22:39:39 +01003217 if (!p || loops++ > sysctl_sched_nr_migrate)
Ingo Molnardd41f592007-07-09 18:51:59 +02003218 goto out;
Peter Zijlstra051c6762008-06-27 13:41:31 +02003219
3220 if ((p->se.load.weight >> 1) > rem_load_move ||
Ingo Molnardd41f592007-07-09 18:51:59 +02003221 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
Ingo Molnardd41f592007-07-09 18:51:59 +02003222 p = iterator->next(iterator->arg);
3223 goto next;
3224 }
3225
3226 pull_task(busiest, p, this_rq, this_cpu);
3227 pulled++;
3228 rem_load_move -= p->se.load.weight;
3229
Gregory Haskins7e96fa52008-12-29 09:39:50 -05003230#ifdef CONFIG_PREEMPT
3231 /*
3232 * NEWIDLE balancing is a source of latency, so preemptible kernels
3233 * will stop after the first task is pulled to minimize the critical
3234 * section.
3235 */
3236 if (idle == CPU_NEWLY_IDLE)
3237 goto out;
3238#endif
3239
Ingo Molnardd41f592007-07-09 18:51:59 +02003240 /*
Peter Zijlstrab82d9fd2007-11-09 22:39:39 +01003241 * We only want to steal up to the prescribed amount of weighted load.
Ingo Molnardd41f592007-07-09 18:51:59 +02003242 */
Peter Williamse1d14842007-10-24 18:23:51 +02003243 if (rem_load_move > 0) {
Peter Williamsa4ac01c2007-08-09 11:16:46 +02003244 if (p->prio < *this_best_prio)
3245 *this_best_prio = p->prio;
Ingo Molnardd41f592007-07-09 18:51:59 +02003246 p = iterator->next(iterator->arg);
3247 goto next;
3248 }
3249out:
3250 /*
Peter Williamse1d14842007-10-24 18:23:51 +02003251 * Right now, this is one of only two places pull_task() is called,
Ingo Molnardd41f592007-07-09 18:51:59 +02003252 * so we can safely collect pull_task() stats here rather than
3253 * inside pull_task().
3254 */
3255 schedstat_add(sd, lb_gained[idle], pulled);
3256
3257 if (all_pinned)
3258 *all_pinned = pinned;
Peter Williamse1d14842007-10-24 18:23:51 +02003259
3260 return max_load_move - rem_load_move;
Ingo Molnardd41f592007-07-09 18:51:59 +02003261}
Ingo Molnar48f24c42006-07-03 00:25:40 -07003262
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263/*
Peter Williams43010652007-08-09 11:16:46 +02003264 * move_tasks tries to move up to max_load_move weighted load from busiest to
3265 * this_rq, as part of a balancing operation within domain "sd".
3266 * Returns 1 if successful and 0 otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267 *
3268 * Called with both runqueues locked.
3269 */
Ingo Molnar70b97a72006-07-03 00:25:42 -07003270static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
Peter Williams43010652007-08-09 11:16:46 +02003271 unsigned long max_load_move,
Ingo Molnard15bcfd2007-07-09 18:51:57 +02003272 struct sched_domain *sd, enum cpu_idle_type idle,
Peter Williams2dd73a42006-06-27 02:54:34 -07003273 int *all_pinned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274{
Ingo Molnar5522d5d2007-10-15 17:00:12 +02003275 const struct sched_class *class = sched_class_highest;
Peter Williams43010652007-08-09 11:16:46 +02003276 unsigned long total_load_moved = 0;
Peter Williamsa4ac01c2007-08-09 11:16:46 +02003277 int this_best_prio = this_rq->curr->prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278
Ingo Molnardd41f592007-07-09 18:51:59 +02003279 do {
Peter Williams43010652007-08-09 11:16:46 +02003280 total_load_moved +=
3281 class->load_balance(this_rq, this_cpu, busiest,
Peter Williamse1d14842007-10-24 18:23:51 +02003282 max_load_move - total_load_moved,
Peter Williamsa4ac01c2007-08-09 11:16:46 +02003283 sd, idle, all_pinned, &this_best_prio);
Ingo Molnardd41f592007-07-09 18:51:59 +02003284 class = class->next;
Gregory Haskinsc4acb2c2008-06-27 14:29:55 -06003285
Gregory Haskins7e96fa52008-12-29 09:39:50 -05003286#ifdef CONFIG_PREEMPT
3287 /*
3288 * NEWIDLE balancing is a source of latency, so preemptible
3289 * kernels will stop after the first task is pulled to minimize
3290 * the critical section.
3291 */
Gregory Haskinsc4acb2c2008-06-27 14:29:55 -06003292 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
3293 break;
Gregory Haskins7e96fa52008-12-29 09:39:50 -05003294#endif
Peter Williams43010652007-08-09 11:16:46 +02003295 } while (class && max_load_move > total_load_moved);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296
Peter Williams43010652007-08-09 11:16:46 +02003297 return total_load_moved > 0;
3298}
3299
Peter Williamse1d14842007-10-24 18:23:51 +02003300static int
3301iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
3302 struct sched_domain *sd, enum cpu_idle_type idle,
3303 struct rq_iterator *iterator)
3304{
3305 struct task_struct *p = iterator->start(iterator->arg);
3306 int pinned = 0;
3307
3308 while (p) {
3309 if (can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
3310 pull_task(busiest, p, this_rq, this_cpu);
3311 /*
3312 * Right now, this is only the second place pull_task()
3313 * is called, so we can safely collect pull_task()
3314 * stats here rather than inside pull_task().
3315 */
3316 schedstat_inc(sd, lb_gained[idle]);
3317
3318 return 1;
3319 }
3320 p = iterator->next(iterator->arg);
3321 }
3322
3323 return 0;
3324}
3325
Peter Williams43010652007-08-09 11:16:46 +02003326/*
3327 * move_one_task tries to move exactly one task from busiest to this_rq, as
3328 * part of active balancing operations within "domain".
3329 * Returns 1 if successful and 0 otherwise.
3330 *
3331 * Called with both runqueues locked.
3332 */
3333static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
3334 struct sched_domain *sd, enum cpu_idle_type idle)
3335{
Ingo Molnar5522d5d2007-10-15 17:00:12 +02003336 const struct sched_class *class;
Peter Williams43010652007-08-09 11:16:46 +02003337
Hiroshi Shimamotocde7e5ca2009-08-18 13:01:01 +09003338 for_each_class(class) {
Peter Williamse1d14842007-10-24 18:23:51 +02003339 if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle))
Peter Williams43010652007-08-09 11:16:46 +02003340 return 1;
Hiroshi Shimamotocde7e5ca2009-08-18 13:01:01 +09003341 }
Peter Williams43010652007-08-09 11:16:46 +02003342
3343 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003344}
Gautham R Shenoy67bb6c02009-03-25 14:43:35 +05303345/********** Helpers for find_busiest_group ************************/
Randy Dunlapd5ac5372009-03-28 21:52:47 -07003346/*
Gautham R Shenoy222d6562009-03-25 14:43:56 +05303347 * sd_lb_stats - Structure to store the statistics of a sched_domain
3348 * during load balancing.
3349 */
3350struct sd_lb_stats {
3351 struct sched_group *busiest; /* Busiest group in this sd */
3352 struct sched_group *this; /* Local group in this sd */
3353 unsigned long total_load; /* Total load of all groups in sd */
3354 unsigned long total_pwr; /* Total power of all groups in sd */
3355 unsigned long avg_load; /* Average load across all groups in sd */
3356
3357 /** Statistics of this group */
3358 unsigned long this_load;
3359 unsigned long this_load_per_task;
3360 unsigned long this_nr_running;
3361
3362 /* Statistics of the busiest group */
3363 unsigned long max_load;
3364 unsigned long busiest_load_per_task;
3365 unsigned long busiest_nr_running;
3366
3367 int group_imb; /* Is there imbalance in this sd */
3368#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3369 int power_savings_balance; /* Is powersave balance needed for this sd */
3370 struct sched_group *group_min; /* Least loaded group in sd */
3371 struct sched_group *group_leader; /* Group which relieves group_min */
3372 unsigned long min_load_per_task; /* load_per_task in group_min */
3373 unsigned long leader_nr_running; /* Nr running of group_leader */
3374 unsigned long min_nr_running; /* Nr running of group_min */
3375#endif
3376};
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377
3378/*
Gautham R Shenoy381be782009-03-25 14:43:46 +05303379 * sg_lb_stats - stats of a sched_group required for load_balancing
3380 */
3381struct sg_lb_stats {
3382 unsigned long avg_load; /*Avg load across the CPUs of the group */
3383 unsigned long group_load; /* Total load over the CPUs of the group */
3384 unsigned long sum_nr_running; /* Nr tasks running in the group */
3385 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
3386 unsigned long group_capacity;
3387 int group_imb; /* Is there an imbalance in the group ? */
3388};
3389
3390/**
Gautham R Shenoy67bb6c02009-03-25 14:43:35 +05303391 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
3392 * @group: The group whose first cpu is to be returned.
3393 */
3394static inline unsigned int group_first_cpu(struct sched_group *group)
3395{
3396 return cpumask_first(sched_group_cpus(group));
3397}
3398
3399/**
3400 * get_sd_load_idx - Obtain the load index for a given sched domain.
3401 * @sd: The sched_domain whose load_idx is to be obtained.
3402 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
3403 */
3404static inline int get_sd_load_idx(struct sched_domain *sd,
3405 enum cpu_idle_type idle)
3406{
3407 int load_idx;
3408
3409 switch (idle) {
3410 case CPU_NOT_IDLE:
3411 load_idx = sd->busy_idx;
3412 break;
3413
3414 case CPU_NEWLY_IDLE:
3415 load_idx = sd->newidle_idx;
3416 break;
3417 default:
3418 load_idx = sd->idle_idx;
3419 break;
3420 }
3421
3422 return load_idx;
3423}
Gautham R Shenoy1f8c5532009-03-25 14:43:51 +05303424
3425
Gautham R Shenoyc071df12009-03-25 14:44:22 +05303426#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3427/**
3428 * init_sd_power_savings_stats - Initialize power savings statistics for
3429 * the given sched_domain, during load balancing.
3430 *
3431 * @sd: Sched domain whose power-savings statistics are to be initialized.
3432 * @sds: Variable containing the statistics for sd.
3433 * @idle: Idle status of the CPU at which we're performing load-balancing.
3434 */
3435static inline void init_sd_power_savings_stats(struct sched_domain *sd,
3436 struct sd_lb_stats *sds, enum cpu_idle_type idle)
3437{
3438 /*
3439 * Busy processors will not participate in power savings
3440 * balance.
3441 */
3442 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
3443 sds->power_savings_balance = 0;
3444 else {
3445 sds->power_savings_balance = 1;
3446 sds->min_nr_running = ULONG_MAX;
3447 sds->leader_nr_running = 0;
3448 }
3449}
3450
3451/**
3452 * update_sd_power_savings_stats - Update the power saving stats for a
3453 * sched_domain while performing load balancing.
3454 *
3455 * @group: sched_group belonging to the sched_domain under consideration.
3456 * @sds: Variable containing the statistics of the sched_domain
3457 * @local_group: Does group contain the CPU for which we're performing
3458 * load balancing ?
3459 * @sgs: Variable containing the statistics of the group.
3460 */
3461static inline void update_sd_power_savings_stats(struct sched_group *group,
3462 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
3463{
3464
3465 if (!sds->power_savings_balance)
3466 return;
3467
3468 /*
3469 * If the local group is idle or completely loaded
3470 * no need to do power savings balance at this domain
3471 */
3472 if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
3473 !sds->this_nr_running))
3474 sds->power_savings_balance = 0;
3475
3476 /*
3477 * If a group is already running at full capacity or idle,
3478 * don't include that group in power savings calculations
3479 */
3480 if (!sds->power_savings_balance ||
3481 sgs->sum_nr_running >= sgs->group_capacity ||
3482 !sgs->sum_nr_running)
3483 return;
3484
3485 /*
3486 * Calculate the group which has the least non-idle load.
3487 * This is the group from where we need to pick up the load
3488 * for saving power
3489 */
3490 if ((sgs->sum_nr_running < sds->min_nr_running) ||
3491 (sgs->sum_nr_running == sds->min_nr_running &&
3492 group_first_cpu(group) > group_first_cpu(sds->group_min))) {
3493 sds->group_min = group;
3494 sds->min_nr_running = sgs->sum_nr_running;
3495 sds->min_load_per_task = sgs->sum_weighted_load /
3496 sgs->sum_nr_running;
3497 }
3498
3499 /*
3500 * Calculate the group which is almost near its
3501 * capacity but still has some space to pick up some load
3502 * from other group and save more power
3503 */
Gautham R Shenoyd899a782009-09-02 16:59:10 +05303504 if (sgs->sum_nr_running + 1 > sgs->group_capacity)
Gautham R Shenoyc071df12009-03-25 14:44:22 +05303505 return;
3506
3507 if (sgs->sum_nr_running > sds->leader_nr_running ||
3508 (sgs->sum_nr_running == sds->leader_nr_running &&
3509 group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
3510 sds->group_leader = group;
3511 sds->leader_nr_running = sgs->sum_nr_running;
3512 }
3513}
3514
3515/**
Randy Dunlapd5ac5372009-03-28 21:52:47 -07003516 * check_power_save_busiest_group - see if there is potential for some power-savings balance
Gautham R Shenoyc071df12009-03-25 14:44:22 +05303517 * @sds: Variable containing the statistics of the sched_domain
3518 * under consideration.
3519 * @this_cpu: Cpu at which we're currently performing load-balancing.
3520 * @imbalance: Variable to store the imbalance.
3521 *
Randy Dunlapd5ac5372009-03-28 21:52:47 -07003522 * Description:
3523 * Check if we have potential to perform some power-savings balance.
3524 * If yes, set the busiest group to be the least loaded group in the
3525 * sched_domain, so that it's CPUs can be put to idle.
3526 *
Gautham R Shenoyc071df12009-03-25 14:44:22 +05303527 * Returns 1 if there is potential to perform power-savings balance.
3528 * Else returns 0.
3529 */
3530static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
3531 int this_cpu, unsigned long *imbalance)
3532{
3533 if (!sds->power_savings_balance)
3534 return 0;
3535
3536 if (sds->this != sds->group_leader ||
3537 sds->group_leader == sds->group_min)
3538 return 0;
3539
3540 *imbalance = sds->min_load_per_task;
3541 sds->busiest = sds->group_min;
3542
Gautham R Shenoyc071df12009-03-25 14:44:22 +05303543 return 1;
3544
3545}
3546#else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3547static inline void init_sd_power_savings_stats(struct sched_domain *sd,
3548 struct sd_lb_stats *sds, enum cpu_idle_type idle)
3549{
3550 return;
3551}
3552
3553static inline void update_sd_power_savings_stats(struct sched_group *group,
3554 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
3555{
3556 return;
3557}
3558
3559static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
3560 int this_cpu, unsigned long *imbalance)
3561{
3562 return 0;
3563}
3564#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3565
Peter Zijlstrad6a59aa2009-09-02 13:28:02 +02003566
3567unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
3568{
3569 return SCHED_LOAD_SCALE;
3570}
3571
3572unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
3573{
3574 return default_scale_freq_power(sd, cpu);
3575}
3576
3577unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
Peter Zijlstraab292302009-09-01 10:34:36 +02003578{
3579 unsigned long weight = cpumask_weight(sched_domain_span(sd));
3580 unsigned long smt_gain = sd->smt_gain;
3581
3582 smt_gain /= weight;
3583
3584 return smt_gain;
3585}
3586
Peter Zijlstrad6a59aa2009-09-02 13:28:02 +02003587unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
3588{
3589 return default_scale_smt_power(sd, cpu);
3590}
3591
Peter Zijlstrae9e92502009-09-01 10:34:37 +02003592unsigned long scale_rt_power(int cpu)
3593{
3594 struct rq *rq = cpu_rq(cpu);
3595 u64 total, available;
3596
3597 sched_avg_update(rq);
3598
3599 total = sched_avg_period() + (rq->clock - rq->age_stamp);
3600 available = total - rq->rt_avg;
3601
3602 if (unlikely((s64)total < SCHED_LOAD_SCALE))
3603 total = SCHED_LOAD_SCALE;
3604
3605 total >>= SCHED_LOAD_SHIFT;
3606
3607 return div_u64(available, total);
3608}
3609
Peter Zijlstraab292302009-09-01 10:34:36 +02003610static void update_cpu_power(struct sched_domain *sd, int cpu)
3611{
3612 unsigned long weight = cpumask_weight(sched_domain_span(sd));
3613 unsigned long power = SCHED_LOAD_SCALE;
3614 struct sched_group *sdg = sd->groups;
Peter Zijlstraab292302009-09-01 10:34:36 +02003615
Peter Zijlstra8e6598a2009-09-03 13:20:03 +02003616 if (sched_feat(ARCH_POWER))
3617 power *= arch_scale_freq_power(sd, cpu);
3618 else
3619 power *= default_scale_freq_power(sd, cpu);
3620
Peter Zijlstrad6a59aa2009-09-02 13:28:02 +02003621 power >>= SCHED_LOAD_SHIFT;
Peter Zijlstraab292302009-09-01 10:34:36 +02003622
3623 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
Peter Zijlstra8e6598a2009-09-03 13:20:03 +02003624 if (sched_feat(ARCH_POWER))
3625 power *= arch_scale_smt_power(sd, cpu);
3626 else
3627 power *= default_scale_smt_power(sd, cpu);
3628
Peter Zijlstraab292302009-09-01 10:34:36 +02003629 power >>= SCHED_LOAD_SHIFT;
3630 }
3631
Peter Zijlstrae9e92502009-09-01 10:34:37 +02003632 power *= scale_rt_power(cpu);
3633 power >>= SCHED_LOAD_SHIFT;
3634
3635 if (!power)
3636 power = 1;
Peter Zijlstraab292302009-09-01 10:34:36 +02003637
Peter Zijlstra18a38852009-09-01 10:34:39 +02003638 sdg->cpu_power = power;
Peter Zijlstraab292302009-09-01 10:34:36 +02003639}
3640
3641static void update_group_power(struct sched_domain *sd, int cpu)
Peter Zijlstracc9fba72009-09-01 10:34:34 +02003642{
3643 struct sched_domain *child = sd->child;
3644 struct sched_group *group, *sdg = sd->groups;
Ingo Molnard7ea17a2009-09-04 11:49:25 +02003645 unsigned long power;
Peter Zijlstracc9fba72009-09-01 10:34:34 +02003646
3647 if (!child) {
Peter Zijlstraab292302009-09-01 10:34:36 +02003648 update_cpu_power(sd, cpu);
Peter Zijlstracc9fba72009-09-01 10:34:34 +02003649 return;
3650 }
3651
Ingo Molnard7ea17a2009-09-04 11:49:25 +02003652 power = 0;
Peter Zijlstracc9fba72009-09-01 10:34:34 +02003653
3654 group = child->groups;
3655 do {
Ingo Molnard7ea17a2009-09-04 11:49:25 +02003656 power += group->cpu_power;
Peter Zijlstracc9fba72009-09-01 10:34:34 +02003657 group = group->next;
3658 } while (group != child->groups);
Ingo Molnard7ea17a2009-09-04 11:49:25 +02003659
3660 sdg->cpu_power = power;
Peter Zijlstracc9fba72009-09-01 10:34:34 +02003661}
Gautham R Shenoyc071df12009-03-25 14:44:22 +05303662
Gautham R Shenoy1f8c5532009-03-25 14:43:51 +05303663/**
3664 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
Randy Dunlape17b38b2009-10-11 19:12:00 -07003665 * @sd: The sched_domain whose statistics are to be updated.
Gautham R Shenoy1f8c5532009-03-25 14:43:51 +05303666 * @group: sched_group whose statistics are to be updated.
3667 * @this_cpu: Cpu for which load balance is currently performed.
3668 * @idle: Idle status of this_cpu
3669 * @load_idx: Load index of sched_domain of this_cpu for load calc.
3670 * @sd_idle: Idle status of the sched_domain containing group.
3671 * @local_group: Does group contain this_cpu.
3672 * @cpus: Set of cpus considered for load balancing.
3673 * @balance: Should we balance.
3674 * @sgs: variable to hold the statistics for this group.
3675 */
Peter Zijlstracc9fba72009-09-01 10:34:34 +02003676static inline void update_sg_lb_stats(struct sched_domain *sd,
3677 struct sched_group *group, int this_cpu,
Gautham R Shenoy1f8c5532009-03-25 14:43:51 +05303678 enum cpu_idle_type idle, int load_idx, int *sd_idle,
3679 int local_group, const struct cpumask *cpus,
3680 int *balance, struct sg_lb_stats *sgs)
3681{
3682 unsigned long load, max_cpu_load, min_cpu_load;
3683 int i;
3684 unsigned int balance_cpu = -1, first_idle_cpu = 0;
3685 unsigned long sum_avg_load_per_task;
3686 unsigned long avg_load_per_task;
3687
Peter Zijlstracc9fba72009-09-01 10:34:34 +02003688 if (local_group) {
Gautham R Shenoy1f8c5532009-03-25 14:43:51 +05303689 balance_cpu = group_first_cpu(group);
Peter Zijlstracc9fba72009-09-01 10:34:34 +02003690 if (balance_cpu == this_cpu)
Peter Zijlstraab292302009-09-01 10:34:36 +02003691 update_group_power(sd, this_cpu);
Peter Zijlstracc9fba72009-09-01 10:34:34 +02003692 }
Gautham R Shenoy1f8c5532009-03-25 14:43:51 +05303693
3694 /* Tally up the load of all CPUs in the group */
3695 sum_avg_load_per_task = avg_load_per_task = 0;
3696 max_cpu_load = 0;
3697 min_cpu_load = ~0UL;
3698
3699 for_each_cpu_and(i, sched_group_cpus(group), cpus) {
3700 struct rq *rq = cpu_rq(i);
3701
3702 if (*sd_idle && rq->nr_running)
3703 *sd_idle = 0;
3704
3705 /* Bias balancing toward cpus of our domain */
3706 if (local_group) {
3707 if (idle_cpu(i) && !first_idle_cpu) {
3708 first_idle_cpu = 1;
3709 balance_cpu = i;
3710 }
3711
3712 load = target_load(i, load_idx);
3713 } else {
3714 load = source_load(i, load_idx);
3715 if (load > max_cpu_load)
3716 max_cpu_load = load;
3717 if (min_cpu_load > load)
3718 min_cpu_load = load;
3719 }
3720
3721 sgs->group_load += load;
3722 sgs->sum_nr_running += rq->nr_running;
3723 sgs->sum_weighted_load += weighted_cpuload(i);
3724
3725 sum_avg_load_per_task += cpu_avg_load_per_task(i);
3726 }
3727
3728 /*
3729 * First idle cpu or the first cpu(busiest) in this sched group
3730 * is eligible for doing load balancing at this and above
3731 * domains. In the newly idle case, we will allow all the cpu's
3732 * to do the newly idle load balance.
3733 */
3734 if (idle != CPU_NEWLY_IDLE && local_group &&
3735 balance_cpu != this_cpu && balance) {
3736 *balance = 0;
3737 return;
3738 }
3739
3740 /* Adjust by relative CPU power of the group */
Peter Zijlstra18a38852009-09-01 10:34:39 +02003741 sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
Gautham R Shenoy1f8c5532009-03-25 14:43:51 +05303742
3743
3744 /*
3745 * Consider the group unbalanced when the imbalance is larger
3746 * than the average weight of two tasks.
3747 *
3748 * APZ: with cgroup the avg task weight can vary wildly and
3749 * might not be a suitable number - should we keep a
3750 * normalized nr_running number somewhere that negates
3751 * the hierarchy?
3752 */
Peter Zijlstra18a38852009-09-01 10:34:39 +02003753 avg_load_per_task = (sum_avg_load_per_task * SCHED_LOAD_SCALE) /
3754 group->cpu_power;
Gautham R Shenoy1f8c5532009-03-25 14:43:51 +05303755
3756 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
3757 sgs->group_imb = 1;
3758
Peter Zijlstrabdb94aa2009-09-01 10:34:38 +02003759 sgs->group_capacity =
Peter Zijlstra18a38852009-09-01 10:34:39 +02003760 DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
Gautham R Shenoy1f8c5532009-03-25 14:43:51 +05303761}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762
Gautham R Shenoy37abe192009-03-25 14:44:01 +05303763/**
3764 * update_sd_lb_stats - Update sched_group's statistics for load balancing.
3765 * @sd: sched_domain whose statistics are to be updated.
3766 * @this_cpu: Cpu for which load balance is currently performed.
3767 * @idle: Idle status of this_cpu
3768 * @sd_idle: Idle status of the sched_domain containing group.
3769 * @cpus: Set of cpus considered for load balancing.
3770 * @balance: Should we balance.
3771 * @sds: variable to hold the statistics for this sched_domain.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003772 */
Gautham R Shenoy37abe192009-03-25 14:44:01 +05303773static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
3774 enum cpu_idle_type idle, int *sd_idle,
3775 const struct cpumask *cpus, int *balance,
3776 struct sd_lb_stats *sds)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003777{
Peter Zijlstrab5d978e2009-09-01 10:34:33 +02003778 struct sched_domain *child = sd->child;
Gautham R Shenoy222d6562009-03-25 14:43:56 +05303779 struct sched_group *group = sd->groups;
Gautham R Shenoy37abe192009-03-25 14:44:01 +05303780 struct sg_lb_stats sgs;
Peter Zijlstrab5d978e2009-09-01 10:34:33 +02003781 int load_idx, prefer_sibling = 0;
3782
3783 if (child && child->flags & SD_PREFER_SIBLING)
3784 prefer_sibling = 1;
Gautham R Shenoy222d6562009-03-25 14:43:56 +05303785
Gautham R Shenoyc071df12009-03-25 14:44:22 +05303786 init_sd_power_savings_stats(sd, sds, idle);
Gautham R Shenoy67bb6c02009-03-25 14:43:35 +05303787 load_idx = get_sd_load_idx(sd, idle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003788
3789 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003790 int local_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791
Rusty Russell758b2cd2008-11-25 02:35:04 +10303792 local_group = cpumask_test_cpu(this_cpu,
3793 sched_group_cpus(group));
Gautham R Shenoy381be782009-03-25 14:43:46 +05303794 memset(&sgs, 0, sizeof(sgs));
Peter Zijlstracc9fba72009-09-01 10:34:34 +02003795 update_sg_lb_stats(sd, group, this_cpu, idle, load_idx, sd_idle,
Gautham R Shenoy1f8c5532009-03-25 14:43:51 +05303796 local_group, cpus, balance, &sgs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003797
Gautham R Shenoy37abe192009-03-25 14:44:01 +05303798 if (local_group && balance && !(*balance))
3799 return;
Siddha, Suresh B783609c2006-12-10 02:20:33 -08003800
Gautham R Shenoy37abe192009-03-25 14:44:01 +05303801 sds->total_load += sgs.group_load;
Peter Zijlstra18a38852009-09-01 10:34:39 +02003802 sds->total_pwr += group->cpu_power;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003803
Peter Zijlstrab5d978e2009-09-01 10:34:33 +02003804 /*
3805 * In case the child domain prefers tasks go to siblings
3806 * first, lower the group capacity to one so that we'll try
3807 * and move all the excess tasks away.
3808 */
3809 if (prefer_sibling)
Peter Zijlstrabdb94aa2009-09-01 10:34:38 +02003810 sgs.group_capacity = min(sgs.group_capacity, 1UL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003811
Linus Torvalds1da177e2005-04-16 15:20:36 -07003812 if (local_group) {
Gautham R Shenoy37abe192009-03-25 14:44:01 +05303813 sds->this_load = sgs.avg_load;
3814 sds->this = group;
3815 sds->this_nr_running = sgs.sum_nr_running;
3816 sds->this_load_per_task = sgs.sum_weighted_load;
3817 } else if (sgs.avg_load > sds->max_load &&
Gautham R Shenoy381be782009-03-25 14:43:46 +05303818 (sgs.sum_nr_running > sgs.group_capacity ||
3819 sgs.group_imb)) {
Gautham R Shenoy37abe192009-03-25 14:44:01 +05303820 sds->max_load = sgs.avg_load;
3821 sds->busiest = group;
3822 sds->busiest_nr_running = sgs.sum_nr_running;
3823 sds->busiest_load_per_task = sgs.sum_weighted_load;
3824 sds->group_imb = sgs.group_imb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825 }
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07003826
Gautham R Shenoyc071df12009-03-25 14:44:22 +05303827 update_sd_power_savings_stats(group, sds, local_group, &sgs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828 group = group->next;
3829 } while (group != sd->groups);
Gautham R Shenoy37abe192009-03-25 14:44:01 +05303830}
Gautham R Shenoy2e6f44a2009-03-25 14:44:06 +05303831
3832/**
3833 * fix_small_imbalance - Calculate the minor imbalance that exists
Gautham R Shenoydbc523a2009-03-25 14:44:12 +05303834 * amongst the groups of a sched_domain, during
3835 * load balancing.
Gautham R Shenoy2e6f44a2009-03-25 14:44:06 +05303836 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
3837 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
3838 * @imbalance: Variable to store the imbalance.
3839 */
3840static inline void fix_small_imbalance(struct sd_lb_stats *sds,
3841 int this_cpu, unsigned long *imbalance)
3842{
3843 unsigned long tmp, pwr_now = 0, pwr_move = 0;
3844 unsigned int imbn = 2;
3845
3846 if (sds->this_nr_running) {
3847 sds->this_load_per_task /= sds->this_nr_running;
3848 if (sds->busiest_load_per_task >
3849 sds->this_load_per_task)
3850 imbn = 1;
3851 } else
3852 sds->this_load_per_task =
3853 cpu_avg_load_per_task(this_cpu);
3854
3855 if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
3856 sds->busiest_load_per_task * imbn) {
3857 *imbalance = sds->busiest_load_per_task;
3858 return;
3859 }
3860
3861 /*
3862 * OK, we don't have enough imbalance to justify moving tasks,
3863 * however we may be able to increase total CPU power used by
3864 * moving them.
3865 */
3866
Peter Zijlstra18a38852009-09-01 10:34:39 +02003867 pwr_now += sds->busiest->cpu_power *
Gautham R Shenoy2e6f44a2009-03-25 14:44:06 +05303868 min(sds->busiest_load_per_task, sds->max_load);
Peter Zijlstra18a38852009-09-01 10:34:39 +02003869 pwr_now += sds->this->cpu_power *
Gautham R Shenoy2e6f44a2009-03-25 14:44:06 +05303870 min(sds->this_load_per_task, sds->this_load);
3871 pwr_now /= SCHED_LOAD_SCALE;
3872
3873 /* Amount of load we'd subtract */
Peter Zijlstra18a38852009-09-01 10:34:39 +02003874 tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
3875 sds->busiest->cpu_power;
Gautham R Shenoy2e6f44a2009-03-25 14:44:06 +05303876 if (sds->max_load > tmp)
Peter Zijlstra18a38852009-09-01 10:34:39 +02003877 pwr_move += sds->busiest->cpu_power *
Gautham R Shenoy2e6f44a2009-03-25 14:44:06 +05303878 min(sds->busiest_load_per_task, sds->max_load - tmp);
3879
3880 /* Amount of load we'd add */
Peter Zijlstra18a38852009-09-01 10:34:39 +02003881 if (sds->max_load * sds->busiest->cpu_power <
Gautham R Shenoy2e6f44a2009-03-25 14:44:06 +05303882 sds->busiest_load_per_task * SCHED_LOAD_SCALE)
Peter Zijlstra18a38852009-09-01 10:34:39 +02003883 tmp = (sds->max_load * sds->busiest->cpu_power) /
3884 sds->this->cpu_power;
Gautham R Shenoy2e6f44a2009-03-25 14:44:06 +05303885 else
Peter Zijlstra18a38852009-09-01 10:34:39 +02003886 tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
3887 sds->this->cpu_power;
3888 pwr_move += sds->this->cpu_power *
Gautham R Shenoy2e6f44a2009-03-25 14:44:06 +05303889 min(sds->this_load_per_task, sds->this_load + tmp);
3890 pwr_move /= SCHED_LOAD_SCALE;
3891
3892 /* Move if we gain throughput */
3893 if (pwr_move > pwr_now)
3894 *imbalance = sds->busiest_load_per_task;
3895}
Gautham R Shenoydbc523a2009-03-25 14:44:12 +05303896
3897/**
3898 * calculate_imbalance - Calculate the amount of imbalance present within the
3899 * groups of a given sched_domain during load balance.
3900 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
3901 * @this_cpu: Cpu for which currently load balance is being performed.
3902 * @imbalance: The variable to store the imbalance.
3903 */
3904static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3905 unsigned long *imbalance)
3906{
3907 unsigned long max_pull;
3908 /*
3909 * In the presence of smp nice balancing, certain scenarios can have
3910 * max load less than avg load(as we skip the groups at or below
3911 * its cpu_power, while calculating max_load..)
3912 */
3913 if (sds->max_load < sds->avg_load) {
3914 *imbalance = 0;
3915 return fix_small_imbalance(sds, this_cpu, imbalance);
3916 }
3917
3918 /* Don't want to pull so many tasks that a group would go idle */
3919 max_pull = min(sds->max_load - sds->avg_load,
3920 sds->max_load - sds->busiest_load_per_task);
3921
3922 /* How much load to actually move to equalise the imbalance */
Peter Zijlstra18a38852009-09-01 10:34:39 +02003923 *imbalance = min(max_pull * sds->busiest->cpu_power,
3924 (sds->avg_load - sds->this_load) * sds->this->cpu_power)
Gautham R Shenoydbc523a2009-03-25 14:44:12 +05303925 / SCHED_LOAD_SCALE;
3926
3927 /*
3928 * if *imbalance is less than the average load per runnable task
3929 * there is no gaurantee that any tasks will be moved so we'll have
3930 * a think about bumping its value to force at least one task to be
3931 * moved
3932 */
3933 if (*imbalance < sds->busiest_load_per_task)
3934 return fix_small_imbalance(sds, this_cpu, imbalance);
3935
3936}
Gautham R Shenoy37abe192009-03-25 14:44:01 +05303937/******* find_busiest_group() helpers end here *********************/
3938
Gautham R Shenoyb7bb4c92009-03-25 14:44:27 +05303939/**
3940 * find_busiest_group - Returns the busiest group within the sched_domain
3941 * if there is an imbalance. If there isn't an imbalance, and
3942 * the user has opted for power-savings, it returns a group whose
3943 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
3944 * such a group exists.
3945 *
3946 * Also calculates the amount of weighted load which should be moved
3947 * to restore balance.
3948 *
3949 * @sd: The sched_domain whose busiest group is to be returned.
3950 * @this_cpu: The cpu for which load balancing is currently being performed.
3951 * @imbalance: Variable which stores amount of weighted load which should
3952 * be moved to restore balance/put a group to idle.
3953 * @idle: The idle status of this_cpu.
3954 * @sd_idle: The idleness of sd
3955 * @cpus: The set of CPUs under consideration for load-balancing.
3956 * @balance: Pointer to a variable indicating if this_cpu
3957 * is the appropriate cpu to perform load balancing at this_level.
3958 *
3959 * Returns: - the busiest group if imbalance exists.
3960 * - If no imbalance and user has opted for power-savings balance,
3961 * return the least loaded group whose CPUs can be
3962 * put to idle by rebalancing its tasks onto our group.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963 */
3964static struct sched_group *
3965find_busiest_group(struct sched_domain *sd, int this_cpu,
3966 unsigned long *imbalance, enum cpu_idle_type idle,
3967 int *sd_idle, const struct cpumask *cpus, int *balance)
3968{
Gautham R Shenoy37abe192009-03-25 14:44:01 +05303969 struct sd_lb_stats sds;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003970
Gautham R Shenoy37abe192009-03-25 14:44:01 +05303971 memset(&sds, 0, sizeof(sds));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003972
Gautham R Shenoy37abe192009-03-25 14:44:01 +05303973 /*
3974 * Compute the various statistics relavent for load balancing at
3975 * this level.
3976 */
3977 update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
3978 balance, &sds);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979
Gautham R Shenoyb7bb4c92009-03-25 14:44:27 +05303980 /* Cases where imbalance does not exist from POV of this_cpu */
3981 /* 1) this_cpu is not the appropriate cpu to perform load balancing
3982 * at this level.
3983 * 2) There is no busy sibling group to pull from.
3984 * 3) This group is the busiest group.
3985 * 4) This group is more busy than the avg busieness at this
3986 * sched_domain.
3987 * 5) The imbalance is within the specified limit.
3988 * 6) Any rebalance would lead to ping-pong
3989 */
Gautham R Shenoy37abe192009-03-25 14:44:01 +05303990 if (balance && !(*balance))
3991 goto ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992
Gautham R Shenoyb7bb4c92009-03-25 14:44:27 +05303993 if (!sds.busiest || sds.busiest_nr_running == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994 goto out_balanced;
3995
Gautham R Shenoyb7bb4c92009-03-25 14:44:27 +05303996 if (sds.this_load >= sds.max_load)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003997 goto out_balanced;
3998
Gautham R Shenoy222d6562009-03-25 14:43:56 +05303999 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004000
Gautham R Shenoyb7bb4c92009-03-25 14:44:27 +05304001 if (sds.this_load >= sds.avg_load)
4002 goto out_balanced;
4003
4004 if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004005 goto out_balanced;
4006
Gautham R Shenoy222d6562009-03-25 14:43:56 +05304007 sds.busiest_load_per_task /= sds.busiest_nr_running;
4008 if (sds.group_imb)
4009 sds.busiest_load_per_task =
4010 min(sds.busiest_load_per_task, sds.avg_load);
Ken Chen908a7c12007-10-17 16:55:11 +02004011
Linus Torvalds1da177e2005-04-16 15:20:36 -07004012 /*
4013 * We're trying to get all the cpus to the average_load, so we don't
4014 * want to push ourselves above the average load, nor do we wish to
4015 * reduce the max loaded cpu below the average load, as either of these
4016 * actions would just result in more rebalancing later, and ping-pong
4017 * tasks around. Thus we look for the minimum possible imbalance.
4018 * Negative imbalances (*we* are more loaded than anyone else) will
4019 * be counted as no imbalance for these purposes -- we can't fix that
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004020 * by pulling tasks to us. Be careful of negative numbers as they'll
Linus Torvalds1da177e2005-04-16 15:20:36 -07004021 * appear as very large values with unsigned longs.
4022 */
Gautham R Shenoy222d6562009-03-25 14:43:56 +05304023 if (sds.max_load <= sds.busiest_load_per_task)
Peter Williams2dd73a42006-06-27 02:54:34 -07004024 goto out_balanced;
4025
Gautham R Shenoydbc523a2009-03-25 14:44:12 +05304026 /* Looks like there is an imbalance. Compute it */
4027 calculate_imbalance(&sds, this_cpu, imbalance);
Gautham R Shenoy222d6562009-03-25 14:43:56 +05304028 return sds.busiest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004029
4030out_balanced:
Gautham R Shenoyc071df12009-03-25 14:44:22 +05304031 /*
4032 * There is no obvious imbalance. But check if we can do some balancing
4033 * to save power.
4034 */
4035 if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
4036 return sds.busiest;
Siddha, Suresh B783609c2006-12-10 02:20:33 -08004037ret:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004038 *imbalance = 0;
4039 return NULL;
4040}
4041
4042/*
4043 * find_busiest_queue - find the busiest runqueue among the cpus in group.
4044 */
Ingo Molnar70b97a72006-07-03 00:25:42 -07004045static struct rq *
Ingo Molnard15bcfd2007-07-09 18:51:57 +02004046find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
Rusty Russell96f874e2008-11-25 02:35:14 +10304047 unsigned long imbalance, const struct cpumask *cpus)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004048{
Ingo Molnar70b97a72006-07-03 00:25:42 -07004049 struct rq *busiest = NULL, *rq;
Peter Williams2dd73a42006-06-27 02:54:34 -07004050 unsigned long max_load = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004051 int i;
4052
Rusty Russell758b2cd2008-11-25 02:35:04 +10304053 for_each_cpu(i, sched_group_cpus(group)) {
Peter Zijlstrabdb94aa2009-09-01 10:34:38 +02004054 unsigned long power = power_of(i);
4055 unsigned long capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
Ingo Molnardd41f592007-07-09 18:51:59 +02004056 unsigned long wl;
Christoph Lameter0a2966b2006-09-25 23:30:51 -07004057
Rusty Russell96f874e2008-11-25 02:35:14 +10304058 if (!cpumask_test_cpu(i, cpus))
Christoph Lameter0a2966b2006-09-25 23:30:51 -07004059 continue;
4060
Ingo Molnar48f24c42006-07-03 00:25:40 -07004061 rq = cpu_rq(i);
Peter Zijlstrabdb94aa2009-09-01 10:34:38 +02004062 wl = weighted_cpuload(i) * SCHED_LOAD_SCALE;
4063 wl /= power;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004064
Peter Zijlstrabdb94aa2009-09-01 10:34:38 +02004065 if (capacity && rq->nr_running == 1 && wl > imbalance)
Peter Williams2dd73a42006-06-27 02:54:34 -07004066 continue;
4067
Ingo Molnardd41f592007-07-09 18:51:59 +02004068 if (wl > max_load) {
4069 max_load = wl;
Ingo Molnar48f24c42006-07-03 00:25:40 -07004070 busiest = rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004071 }
4072 }
4073
4074 return busiest;
4075}
4076
4077/*
Nick Piggin77391d72005-06-25 14:57:30 -07004078 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
4079 * so long as it is large enough.
4080 */
4081#define MAX_PINNED_INTERVAL 512
4082
Rusty Russelldf7c8e82009-03-19 15:22:20 +10304083/* Working cpumask for load_balance and load_balance_newidle. */
4084static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
4085
Nick Piggin77391d72005-06-25 14:57:30 -07004086/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004087 * Check this_cpu to ensure it is balanced within domain. Attempt to move
4088 * tasks if there is an imbalance.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004089 */
Ingo Molnar70b97a72006-07-03 00:25:42 -07004090static int load_balance(int this_cpu, struct rq *this_rq,
Ingo Molnard15bcfd2007-07-09 18:51:57 +02004091 struct sched_domain *sd, enum cpu_idle_type idle,
Rusty Russelldf7c8e82009-03-19 15:22:20 +10304092 int *balance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004093{
Peter Williams43010652007-08-09 11:16:46 +02004094 int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004095 struct sched_group *group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004096 unsigned long imbalance;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004097 struct rq *busiest;
Christoph Lameterfe2eea32006-12-10 02:20:21 -08004098 unsigned long flags;
Rusty Russelldf7c8e82009-03-19 15:22:20 +10304099 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
Nick Piggin5969fe02005-09-10 00:26:19 -07004100
Mike Galbraitheae0c9d2009-11-10 03:50:02 +01004101 cpumask_copy(cpus, cpu_online_mask);
Mike Travis7c16ec52008-04-04 18:11:11 -07004102
Siddha, Suresh B89c47102006-10-03 01:14:09 -07004103 /*
4104 * When power savings policy is enabled for the parent domain, idle
4105 * sibling can pick up load irrespective of busy siblings. In this case,
Ingo Molnardd41f592007-07-09 18:51:59 +02004106 * let the state of idle sibling percolate up as CPU_IDLE, instead of
Ingo Molnard15bcfd2007-07-09 18:51:57 +02004107 * portraying it as CPU_NOT_IDLE.
Siddha, Suresh B89c47102006-10-03 01:14:09 -07004108 */
Ingo Molnard15bcfd2007-07-09 18:51:57 +02004109 if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
Siddha, Suresh B89c47102006-10-03 01:14:09 -07004110 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
Nick Piggin5969fe02005-09-10 00:26:19 -07004111 sd_idle = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004112
Ingo Molnar2d723762007-10-15 17:00:12 +02004113 schedstat_inc(sd, lb_count[idle]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004114
Christoph Lameter0a2966b2006-09-25 23:30:51 -07004115redo:
Peter Zijlstrac8cba852008-06-27 13:41:23 +02004116 update_shares(sd);
Christoph Lameter0a2966b2006-09-25 23:30:51 -07004117 group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
Mike Travis7c16ec52008-04-04 18:11:11 -07004118 cpus, balance);
Siddha, Suresh B783609c2006-12-10 02:20:33 -08004119
Chen, Kenneth W06066712006-12-10 02:20:35 -08004120 if (*balance == 0)
Siddha, Suresh B783609c2006-12-10 02:20:33 -08004121 goto out_balanced;
Siddha, Suresh B783609c2006-12-10 02:20:33 -08004122
Linus Torvalds1da177e2005-04-16 15:20:36 -07004123 if (!group) {
4124 schedstat_inc(sd, lb_nobusyg[idle]);
4125 goto out_balanced;
4126 }
4127
Mike Travis7c16ec52008-04-04 18:11:11 -07004128 busiest = find_busiest_queue(group, idle, imbalance, cpus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004129 if (!busiest) {
4130 schedstat_inc(sd, lb_nobusyq[idle]);
4131 goto out_balanced;
4132 }
4133
Nick Piggindb935db2005-06-25 14:57:11 -07004134 BUG_ON(busiest == this_rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004135
4136 schedstat_add(sd, lb_imbalance[idle], imbalance);
4137
Peter Williams43010652007-08-09 11:16:46 +02004138 ld_moved = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004139 if (busiest->nr_running > 1) {
4140 /*
4141 * Attempt to move tasks. If find_busiest_group has found
4142 * an imbalance but busiest->nr_running <= 1, the group is
Peter Williams43010652007-08-09 11:16:46 +02004143 * still unbalanced. ld_moved simply stays zero, so it is
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144 * correctly treated as an imbalance.
4145 */
Christoph Lameterfe2eea32006-12-10 02:20:21 -08004146 local_irq_save(flags);
Nick Piggine17224b2005-09-10 00:26:18 -07004147 double_rq_lock(this_rq, busiest);
Peter Williams43010652007-08-09 11:16:46 +02004148 ld_moved = move_tasks(this_rq, this_cpu, busiest,
Ingo Molnar48f24c42006-07-03 00:25:40 -07004149 imbalance, sd, idle, &all_pinned);
Nick Piggine17224b2005-09-10 00:26:18 -07004150 double_rq_unlock(this_rq, busiest);
Christoph Lameterfe2eea32006-12-10 02:20:21 -08004151 local_irq_restore(flags);
Nick Piggin81026792005-06-25 14:57:07 -07004152
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004153 /*
4154 * some other cpu did the load balance for us.
4155 */
Peter Williams43010652007-08-09 11:16:46 +02004156 if (ld_moved && this_cpu != smp_processor_id())
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004157 resched_cpu(this_cpu);
4158
Nick Piggin81026792005-06-25 14:57:07 -07004159 /* All tasks on this runqueue were pinned by CPU affinity */
Christoph Lameter0a2966b2006-09-25 23:30:51 -07004160 if (unlikely(all_pinned)) {
Rusty Russell96f874e2008-11-25 02:35:14 +10304161 cpumask_clear_cpu(cpu_of(busiest), cpus);
4162 if (!cpumask_empty(cpus))
Christoph Lameter0a2966b2006-09-25 23:30:51 -07004163 goto redo;
Nick Piggin81026792005-06-25 14:57:07 -07004164 goto out_balanced;
Christoph Lameter0a2966b2006-09-25 23:30:51 -07004165 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004166 }
Nick Piggin81026792005-06-25 14:57:07 -07004167
Peter Williams43010652007-08-09 11:16:46 +02004168 if (!ld_moved) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004169 schedstat_inc(sd, lb_failed[idle]);
4170 sd->nr_balance_failed++;
4171
4172 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004173
Christoph Lameterfe2eea32006-12-10 02:20:21 -08004174 spin_lock_irqsave(&busiest->lock, flags);
Siddha, Suresh Bfa3b6dd2005-09-10 00:26:21 -07004175
4176 /* don't kick the migration_thread, if the curr
4177 * task on busiest cpu can't be moved to this_cpu
4178 */
Rusty Russell96f874e2008-11-25 02:35:14 +10304179 if (!cpumask_test_cpu(this_cpu,
4180 &busiest->curr->cpus_allowed)) {
Christoph Lameterfe2eea32006-12-10 02:20:21 -08004181 spin_unlock_irqrestore(&busiest->lock, flags);
Siddha, Suresh Bfa3b6dd2005-09-10 00:26:21 -07004182 all_pinned = 1;
4183 goto out_one_pinned;
4184 }
4185
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186 if (!busiest->active_balance) {
4187 busiest->active_balance = 1;
4188 busiest->push_cpu = this_cpu;
Nick Piggin81026792005-06-25 14:57:07 -07004189 active_balance = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004190 }
Christoph Lameterfe2eea32006-12-10 02:20:21 -08004191 spin_unlock_irqrestore(&busiest->lock, flags);
Nick Piggin81026792005-06-25 14:57:07 -07004192 if (active_balance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193 wake_up_process(busiest->migration_thread);
4194
4195 /*
4196 * We've kicked active balancing, reset the failure
4197 * counter.
4198 */
Nick Piggin39507452005-06-25 14:57:09 -07004199 sd->nr_balance_failed = sd->cache_nice_tries+1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004200 }
Nick Piggin81026792005-06-25 14:57:07 -07004201 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202 sd->nr_balance_failed = 0;
4203
Nick Piggin81026792005-06-25 14:57:07 -07004204 if (likely(!active_balance)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004205 /* We were unbalanced, so reset the balancing interval */
4206 sd->balance_interval = sd->min_interval;
Nick Piggin81026792005-06-25 14:57:07 -07004207 } else {
4208 /*
4209 * If we've begun active balancing, start to back off. This
4210 * case may not be covered by the all_pinned logic if there
4211 * is only 1 task on the busy runqueue (because we don't call
4212 * move_tasks).
4213 */
4214 if (sd->balance_interval < sd->max_interval)
4215 sd->balance_interval *= 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004216 }
4217
Peter Williams43010652007-08-09 11:16:46 +02004218 if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
Siddha, Suresh B89c47102006-10-03 01:14:09 -07004219 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
Peter Zijlstrac09595f2008-06-27 13:41:14 +02004220 ld_moved = -1;
4221
4222 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004223
4224out_balanced:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004225 schedstat_inc(sd, lb_balanced[idle]);
4226
Nick Piggin16cfb1c2005-06-25 14:57:08 -07004227 sd->nr_balance_failed = 0;
Siddha, Suresh Bfa3b6dd2005-09-10 00:26:21 -07004228
4229out_one_pinned:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004230 /* tune up the balancing interval */
Nick Piggin77391d72005-06-25 14:57:30 -07004231 if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
4232 (sd->balance_interval < sd->max_interval))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004233 sd->balance_interval *= 2;
4234
Ingo Molnar48f24c42006-07-03 00:25:40 -07004235 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
Siddha, Suresh B89c47102006-10-03 01:14:09 -07004236 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
Peter Zijlstrac09595f2008-06-27 13:41:14 +02004237 ld_moved = -1;
4238 else
4239 ld_moved = 0;
4240out:
Peter Zijlstrac8cba852008-06-27 13:41:23 +02004241 if (ld_moved)
4242 update_shares(sd);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02004243 return ld_moved;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244}
4245
4246/*
4247 * Check this_cpu to ensure it is balanced within domain. Attempt to move
4248 * tasks if there is an imbalance.
4249 *
Ingo Molnard15bcfd2007-07-09 18:51:57 +02004250 * Called from schedule when this_rq is about to become idle (CPU_NEWLY_IDLE).
Linus Torvalds1da177e2005-04-16 15:20:36 -07004251 * this_rq is locked.
4252 */
Ingo Molnar48f24c42006-07-03 00:25:40 -07004253static int
Rusty Russelldf7c8e82009-03-19 15:22:20 +10304254load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004255{
4256 struct sched_group *group;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004257 struct rq *busiest = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004258 unsigned long imbalance;
Peter Williams43010652007-08-09 11:16:46 +02004259 int ld_moved = 0;
Nick Piggin5969fe02005-09-10 00:26:19 -07004260 int sd_idle = 0;
Suresh Siddha969bb4e2007-07-19 21:28:35 +02004261 int all_pinned = 0;
Rusty Russelldf7c8e82009-03-19 15:22:20 +10304262 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
Mike Travis7c16ec52008-04-04 18:11:11 -07004263
Mike Galbraitheae0c9d2009-11-10 03:50:02 +01004264 cpumask_copy(cpus, cpu_online_mask);
Nick Piggin5969fe02005-09-10 00:26:19 -07004265
Siddha, Suresh B89c47102006-10-03 01:14:09 -07004266 /*
4267 * When power savings policy is enabled for the parent domain, idle
4268 * sibling can pick up load irrespective of busy siblings. In this case,
4269 * let the state of idle sibling percolate up as IDLE, instead of
Ingo Molnard15bcfd2007-07-09 18:51:57 +02004270 * portraying it as CPU_NOT_IDLE.
Siddha, Suresh B89c47102006-10-03 01:14:09 -07004271 */
4272 if (sd->flags & SD_SHARE_CPUPOWER &&
4273 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
Nick Piggin5969fe02005-09-10 00:26:19 -07004274 sd_idle = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004275
Ingo Molnar2d723762007-10-15 17:00:12 +02004276 schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
Christoph Lameter0a2966b2006-09-25 23:30:51 -07004277redo:
Peter Zijlstra3e5459b2008-06-27 13:41:24 +02004278 update_shares_locked(this_rq, sd);
Ingo Molnard15bcfd2007-07-09 18:51:57 +02004279 group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
Mike Travis7c16ec52008-04-04 18:11:11 -07004280 &sd_idle, cpus, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004281 if (!group) {
Ingo Molnard15bcfd2007-07-09 18:51:57 +02004282 schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]);
Nick Piggin16cfb1c2005-06-25 14:57:08 -07004283 goto out_balanced;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004284 }
4285
Mike Travis7c16ec52008-04-04 18:11:11 -07004286 busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance, cpus);
Nick Piggindb935db2005-06-25 14:57:11 -07004287 if (!busiest) {
Ingo Molnard15bcfd2007-07-09 18:51:57 +02004288 schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]);
Nick Piggin16cfb1c2005-06-25 14:57:08 -07004289 goto out_balanced;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290 }
4291
Nick Piggindb935db2005-06-25 14:57:11 -07004292 BUG_ON(busiest == this_rq);
4293
Ingo Molnard15bcfd2007-07-09 18:51:57 +02004294 schedstat_add(sd, lb_imbalance[CPU_NEWLY_IDLE], imbalance);
Nick Piggind6d5cfa2005-09-10 00:26:16 -07004295
Peter Williams43010652007-08-09 11:16:46 +02004296 ld_moved = 0;
Nick Piggind6d5cfa2005-09-10 00:26:16 -07004297 if (busiest->nr_running > 1) {
4298 /* Attempt to move tasks */
4299 double_lock_balance(this_rq, busiest);
Ingo Molnar6e82a3b2007-08-09 11:16:51 +02004300 /* this_rq->clock is already updated */
4301 update_rq_clock(busiest);
Peter Williams43010652007-08-09 11:16:46 +02004302 ld_moved = move_tasks(this_rq, this_cpu, busiest,
Suresh Siddha969bb4e2007-07-19 21:28:35 +02004303 imbalance, sd, CPU_NEWLY_IDLE,
4304 &all_pinned);
Peter Zijlstra1b12bbc2008-08-11 09:30:22 +02004305 double_unlock_balance(this_rq, busiest);
Christoph Lameter0a2966b2006-09-25 23:30:51 -07004306
Suresh Siddha969bb4e2007-07-19 21:28:35 +02004307 if (unlikely(all_pinned)) {
Rusty Russell96f874e2008-11-25 02:35:14 +10304308 cpumask_clear_cpu(cpu_of(busiest), cpus);
4309 if (!cpumask_empty(cpus))
Christoph Lameter0a2966b2006-09-25 23:30:51 -07004310 goto redo;
4311 }
Nick Piggind6d5cfa2005-09-10 00:26:16 -07004312 }
4313
Peter Williams43010652007-08-09 11:16:46 +02004314 if (!ld_moved) {
Vaidyanathan Srinivasan36dffab2008-12-20 10:06:38 +05304315 int active_balance = 0;
Vaidyanathan Srinivasanad273b32008-12-18 23:26:36 +05304316
Ingo Molnard15bcfd2007-07-09 18:51:57 +02004317 schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]);
Siddha, Suresh B89c47102006-10-03 01:14:09 -07004318 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
4319 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
Nick Piggin5969fe02005-09-10 00:26:19 -07004320 return -1;
Vaidyanathan Srinivasanad273b32008-12-18 23:26:36 +05304321
4322 if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
4323 return -1;
4324
4325 if (sd->nr_balance_failed++ < 2)
4326 return -1;
4327
4328 /*
4329 * The only task running in a non-idle cpu can be moved to this
4330 * cpu in an attempt to completely freeup the other CPU
4331 * package. The same method used to move task in load_balance()
4332 * have been extended for load_balance_newidle() to speedup
4333 * consolidation at sched_mc=POWERSAVINGS_BALANCE_WAKEUP (2)
4334 *
4335 * The package power saving logic comes from
4336 * find_busiest_group(). If there are no imbalance, then
4337 * f_b_g() will return NULL. However when sched_mc={1,2} then
4338 * f_b_g() will select a group from which a running task may be
4339 * pulled to this cpu in order to make the other package idle.
4340 * If there is no opportunity to make a package idle and if
4341 * there are no imbalance, then f_b_g() will return NULL and no
4342 * action will be taken in load_balance_newidle().
4343 *
4344 * Under normal task pull operation due to imbalance, there
4345 * will be more than one task in the source run queue and
4346 * move_tasks() will succeed. ld_moved will be true and this
4347 * active balance code will not be triggered.
4348 */
4349
4350 /* Lock busiest in correct order while this_rq is held */
4351 double_lock_balance(this_rq, busiest);
4352
4353 /*
4354 * don't kick the migration_thread, if the curr
4355 * task on busiest cpu can't be moved to this_cpu
4356 */
Mike Travis6ca09df2008-12-31 18:08:45 -08004357 if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
Vaidyanathan Srinivasanad273b32008-12-18 23:26:36 +05304358 double_unlock_balance(this_rq, busiest);
4359 all_pinned = 1;
4360 return ld_moved;
4361 }
4362
4363 if (!busiest->active_balance) {
4364 busiest->active_balance = 1;
4365 busiest->push_cpu = this_cpu;
4366 active_balance = 1;
4367 }
4368
4369 double_unlock_balance(this_rq, busiest);
Peter Zijlstrada8d5082009-01-07 15:28:57 +01004370 /*
4371 * Should not call ttwu while holding a rq->lock
4372 */
4373 spin_unlock(&this_rq->lock);
Vaidyanathan Srinivasanad273b32008-12-18 23:26:36 +05304374 if (active_balance)
4375 wake_up_process(busiest->migration_thread);
Peter Zijlstrada8d5082009-01-07 15:28:57 +01004376 spin_lock(&this_rq->lock);
Vaidyanathan Srinivasanad273b32008-12-18 23:26:36 +05304377
Nick Piggin5969fe02005-09-10 00:26:19 -07004378 } else
Nick Piggin16cfb1c2005-06-25 14:57:08 -07004379 sd->nr_balance_failed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004380
Peter Zijlstra3e5459b2008-06-27 13:41:24 +02004381 update_shares_locked(this_rq, sd);
Peter Williams43010652007-08-09 11:16:46 +02004382 return ld_moved;
Nick Piggin16cfb1c2005-06-25 14:57:08 -07004383
4384out_balanced:
Ingo Molnard15bcfd2007-07-09 18:51:57 +02004385 schedstat_inc(sd, lb_balanced[CPU_NEWLY_IDLE]);
Ingo Molnar48f24c42006-07-03 00:25:40 -07004386 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
Siddha, Suresh B89c47102006-10-03 01:14:09 -07004387 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
Nick Piggin5969fe02005-09-10 00:26:19 -07004388 return -1;
Nick Piggin16cfb1c2005-06-25 14:57:08 -07004389 sd->nr_balance_failed = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07004390
Nick Piggin16cfb1c2005-06-25 14:57:08 -07004391 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004392}
4393
4394/*
4395 * idle_balance is called by schedule() if this_cpu is about to become
4396 * idle. Attempts to pull tasks from other CPUs.
4397 */
Ingo Molnar70b97a72006-07-03 00:25:42 -07004398static void idle_balance(int this_cpu, struct rq *this_rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004399{
4400 struct sched_domain *sd;
Vaidyanathan Srinivasanefbe0272008-12-08 20:52:49 +05304401 int pulled_task = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02004402 unsigned long next_balance = jiffies + HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004403
Mike Galbraith1b9508f2009-11-04 17:53:50 +01004404 this_rq->idle_stamp = this_rq->clock;
4405
4406 if (this_rq->avg_idle < sysctl_sched_migration_cost)
4407 return;
4408
Linus Torvalds1da177e2005-04-16 15:20:36 -07004409 for_each_domain(this_cpu, sd) {
Christoph Lameter92c4ca52007-06-23 17:16:33 -07004410 unsigned long interval;
4411
4412 if (!(sd->flags & SD_LOAD_BALANCE))
4413 continue;
4414
4415 if (sd->flags & SD_BALANCE_NEWIDLE)
Ingo Molnar48f24c42006-07-03 00:25:40 -07004416 /* If we've pulled tasks over stop searching: */
Mike Travis7c16ec52008-04-04 18:11:11 -07004417 pulled_task = load_balance_newidle(this_cpu, this_rq,
Rusty Russelldf7c8e82009-03-19 15:22:20 +10304418 sd);
Christoph Lameter92c4ca52007-06-23 17:16:33 -07004419
4420 interval = msecs_to_jiffies(sd->balance_interval);
4421 if (time_after(next_balance, sd->last_balance + interval))
4422 next_balance = sd->last_balance + interval;
Mike Galbraith1b9508f2009-11-04 17:53:50 +01004423 if (pulled_task) {
4424 this_rq->idle_stamp = 0;
Christoph Lameter92c4ca52007-06-23 17:16:33 -07004425 break;
Mike Galbraith1b9508f2009-11-04 17:53:50 +01004426 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004427 }
Ingo Molnardd41f592007-07-09 18:51:59 +02004428 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
Christoph Lameter1bd77f22006-12-10 02:20:27 -08004429 /*
4430 * We are going idle. next_balance may be set based on
4431 * a busy processor. So reset next_balance.
4432 */
4433 this_rq->next_balance = next_balance;
Ingo Molnardd41f592007-07-09 18:51:59 +02004434 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004435}
4436
4437/*
4438 * active_load_balance is run by migration threads. It pushes running tasks
4439 * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
4440 * running on each physical CPU where possible, and avoids physical /
4441 * logical imbalances.
4442 *
4443 * Called with busiest_rq locked.
4444 */
Ingo Molnar70b97a72006-07-03 00:25:42 -07004445static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004446{
Nick Piggin39507452005-06-25 14:57:09 -07004447 int target_cpu = busiest_rq->push_cpu;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004448 struct sched_domain *sd;
4449 struct rq *target_rq;
Nick Piggin39507452005-06-25 14:57:09 -07004450
Ingo Molnar48f24c42006-07-03 00:25:40 -07004451 /* Is there any task to move? */
Nick Piggin39507452005-06-25 14:57:09 -07004452 if (busiest_rq->nr_running <= 1)
Nick Piggin39507452005-06-25 14:57:09 -07004453 return;
4454
4455 target_rq = cpu_rq(target_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004456
4457 /*
Nick Piggin39507452005-06-25 14:57:09 -07004458 * This condition is "impossible", if it occurs
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004459 * we need to fix it. Originally reported by
Nick Piggin39507452005-06-25 14:57:09 -07004460 * Bjorn Helgaas on a 128-cpu setup.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461 */
Nick Piggin39507452005-06-25 14:57:09 -07004462 BUG_ON(busiest_rq == target_rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004463
Nick Piggin39507452005-06-25 14:57:09 -07004464 /* move a task from busiest_rq to target_rq */
4465 double_lock_balance(busiest_rq, target_rq);
Ingo Molnar6e82a3b2007-08-09 11:16:51 +02004466 update_rq_clock(busiest_rq);
4467 update_rq_clock(target_rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468
Nick Piggin39507452005-06-25 14:57:09 -07004469 /* Search for an sd spanning us and the target CPU. */
Chen, Kenneth Wc96d1452006-06-27 02:54:28 -07004470 for_each_domain(target_cpu, sd) {
Nick Piggin39507452005-06-25 14:57:09 -07004471 if ((sd->flags & SD_LOAD_BALANCE) &&
Rusty Russell758b2cd2008-11-25 02:35:04 +10304472 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
Nick Piggin39507452005-06-25 14:57:09 -07004473 break;
Chen, Kenneth Wc96d1452006-06-27 02:54:28 -07004474 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475
Ingo Molnar48f24c42006-07-03 00:25:40 -07004476 if (likely(sd)) {
Ingo Molnar2d723762007-10-15 17:00:12 +02004477 schedstat_inc(sd, alb_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478
Peter Williams43010652007-08-09 11:16:46 +02004479 if (move_one_task(target_rq, target_cpu, busiest_rq,
4480 sd, CPU_IDLE))
Ingo Molnar48f24c42006-07-03 00:25:40 -07004481 schedstat_inc(sd, alb_pushed);
4482 else
4483 schedstat_inc(sd, alb_failed);
4484 }
Peter Zijlstra1b12bbc2008-08-11 09:30:22 +02004485 double_unlock_balance(busiest_rq, target_rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004486}
4487
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004488#ifdef CONFIG_NO_HZ
4489static struct {
4490 atomic_t load_balancer;
Rusty Russell7d1e6a92008-11-25 02:35:09 +10304491 cpumask_var_t cpu_mask;
Gautham R Shenoyf711f602009-04-14 10:25:30 +05304492 cpumask_var_t ilb_grp_nohz_mask;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004493} nohz ____cacheline_aligned = {
4494 .load_balancer = ATOMIC_INIT(-1),
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004495};
4496
Arun R Bharadwajeea08f32009-04-16 12:16:41 +05304497int get_nohz_load_balancer(void)
4498{
4499 return atomic_read(&nohz.load_balancer);
4500}
4501
Gautham R Shenoyf711f602009-04-14 10:25:30 +05304502#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
4503/**
4504 * lowest_flag_domain - Return lowest sched_domain containing flag.
4505 * @cpu: The cpu whose lowest level of sched domain is to
4506 * be returned.
4507 * @flag: The flag to check for the lowest sched_domain
4508 * for the given cpu.
4509 *
4510 * Returns the lowest sched_domain of a cpu which contains the given flag.
4511 */
4512static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
4513{
4514 struct sched_domain *sd;
4515
4516 for_each_domain(cpu, sd)
4517 if (sd && (sd->flags & flag))
4518 break;
4519
4520 return sd;
4521}
4522
4523/**
4524 * for_each_flag_domain - Iterates over sched_domains containing the flag.
4525 * @cpu: The cpu whose domains we're iterating over.
4526 * @sd: variable holding the value of the power_savings_sd
4527 * for cpu.
4528 * @flag: The flag to filter the sched_domains to be iterated.
4529 *
4530 * Iterates over all the scheduler domains for a given cpu that has the 'flag'
4531 * set, starting from the lowest sched_domain to the highest.
4532 */
4533#define for_each_flag_domain(cpu, sd, flag) \
4534 for (sd = lowest_flag_domain(cpu, flag); \
4535 (sd && (sd->flags & flag)); sd = sd->parent)
4536
4537/**
4538 * is_semi_idle_group - Checks if the given sched_group is semi-idle.
4539 * @ilb_group: group to be checked for semi-idleness
4540 *
4541 * Returns: 1 if the group is semi-idle. 0 otherwise.
4542 *
4543 * We define a sched_group to be semi idle if it has atleast one idle-CPU
4544 * and atleast one non-idle CPU. This helper function checks if the given
4545 * sched_group is semi-idle or not.
4546 */
4547static inline int is_semi_idle_group(struct sched_group *ilb_group)
4548{
4549 cpumask_and(nohz.ilb_grp_nohz_mask, nohz.cpu_mask,
4550 sched_group_cpus(ilb_group));
4551
4552 /*
4553 * A sched_group is semi-idle when it has atleast one busy cpu
4554 * and atleast one idle cpu.
4555 */
4556 if (cpumask_empty(nohz.ilb_grp_nohz_mask))
4557 return 0;
4558
4559 if (cpumask_equal(nohz.ilb_grp_nohz_mask, sched_group_cpus(ilb_group)))
4560 return 0;
4561
4562 return 1;
4563}
4564/**
4565 * find_new_ilb - Finds the optimum idle load balancer for nomination.
4566 * @cpu: The cpu which is nominating a new idle_load_balancer.
4567 *
4568 * Returns: Returns the id of the idle load balancer if it exists,
4569 * Else, returns >= nr_cpu_ids.
4570 *
4571 * This algorithm picks the idle load balancer such that it belongs to a
4572 * semi-idle powersavings sched_domain. The idea is to try and avoid
4573 * completely idle packages/cores just for the purpose of idle load balancing
4574 * when there are other idle cpu's which are better suited for that job.
4575 */
4576static int find_new_ilb(int cpu)
4577{
4578 struct sched_domain *sd;
4579 struct sched_group *ilb_group;
4580
4581 /*
4582 * Have idle load balancer selection from semi-idle packages only
4583 * when power-aware load balancing is enabled
4584 */
4585 if (!(sched_smt_power_savings || sched_mc_power_savings))
4586 goto out_done;
4587
4588 /*
4589 * Optimize for the case when we have no idle CPUs or only one
4590 * idle CPU. Don't walk the sched_domain hierarchy in such cases
4591 */
4592 if (cpumask_weight(nohz.cpu_mask) < 2)
4593 goto out_done;
4594
4595 for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
4596 ilb_group = sd->groups;
4597
4598 do {
4599 if (is_semi_idle_group(ilb_group))
4600 return cpumask_first(nohz.ilb_grp_nohz_mask);
4601
4602 ilb_group = ilb_group->next;
4603
4604 } while (ilb_group != sd->groups);
4605 }
4606
4607out_done:
4608 return cpumask_first(nohz.cpu_mask);
4609}
4610#else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
4611static inline int find_new_ilb(int call_cpu)
4612{
Gautham R Shenoy6e29ec52009-04-21 08:40:49 +05304613 return cpumask_first(nohz.cpu_mask);
Gautham R Shenoyf711f602009-04-14 10:25:30 +05304614}
4615#endif
4616
Christoph Lameter7835b982006-12-10 02:20:22 -08004617/*
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004618 * This routine will try to nominate the ilb (idle load balancing)
4619 * owner among the cpus whose ticks are stopped. ilb owner will do the idle
4620 * load balancing on behalf of all those cpus. If all the cpus in the system
4621 * go into this tickless mode, then there will be no ilb owner (as there is
4622 * no need for one) and all the cpus will sleep till the next wakeup event
4623 * arrives...
Christoph Lameter7835b982006-12-10 02:20:22 -08004624 *
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004625 * For the ilb owner, tick is not stopped. And this tick will be used
4626 * for idle load balancing. ilb owner will still be part of
4627 * nohz.cpu_mask..
4628 *
4629 * While stopping the tick, this cpu will become the ilb owner if there
4630 * is no other owner. And will be the owner till that cpu becomes busy
4631 * or if all cpus in the system stop their ticks at which point
4632 * there is no need for ilb owner.
4633 *
4634 * When the ilb owner becomes busy, it nominates another owner, during the
4635 * next busy scheduler_tick()
4636 */
4637int select_nohz_load_balancer(int stop_tick)
4638{
4639 int cpu = smp_processor_id();
4640
4641 if (stop_tick) {
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004642 cpu_rq(cpu)->in_nohz_recently = 1;
4643
Suresh Siddha483b4ee2009-02-04 11:59:44 -08004644 if (!cpu_active(cpu)) {
4645 if (atomic_read(&nohz.load_balancer) != cpu)
4646 return 0;
4647
4648 /*
4649 * If we are going offline and still the leader,
4650 * give up!
4651 */
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004652 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
4653 BUG();
Suresh Siddha483b4ee2009-02-04 11:59:44 -08004654
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004655 return 0;
4656 }
4657
Suresh Siddha483b4ee2009-02-04 11:59:44 -08004658 cpumask_set_cpu(cpu, nohz.cpu_mask);
4659
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004660 /* time for ilb owner also to sleep */
Rusty Russell7d1e6a92008-11-25 02:35:09 +10304661 if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004662 if (atomic_read(&nohz.load_balancer) == cpu)
4663 atomic_set(&nohz.load_balancer, -1);
4664 return 0;
4665 }
4666
4667 if (atomic_read(&nohz.load_balancer) == -1) {
4668 /* make me the ilb owner */
4669 if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1)
4670 return 1;
Gautham R Shenoye790fb02009-04-14 10:25:35 +05304671 } else if (atomic_read(&nohz.load_balancer) == cpu) {
4672 int new_ilb;
4673
4674 if (!(sched_smt_power_savings ||
4675 sched_mc_power_savings))
4676 return 1;
4677 /*
4678 * Check to see if there is a more power-efficient
4679 * ilb.
4680 */
4681 new_ilb = find_new_ilb(cpu);
4682 if (new_ilb < nr_cpu_ids && new_ilb != cpu) {
4683 atomic_set(&nohz.load_balancer, -1);
4684 resched_cpu(new_ilb);
4685 return 0;
4686 }
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004687 return 1;
Gautham R Shenoye790fb02009-04-14 10:25:35 +05304688 }
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004689 } else {
Rusty Russell7d1e6a92008-11-25 02:35:09 +10304690 if (!cpumask_test_cpu(cpu, nohz.cpu_mask))
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004691 return 0;
4692
Rusty Russell7d1e6a92008-11-25 02:35:09 +10304693 cpumask_clear_cpu(cpu, nohz.cpu_mask);
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004694
4695 if (atomic_read(&nohz.load_balancer) == cpu)
4696 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
4697 BUG();
4698 }
4699 return 0;
4700}
4701#endif
4702
4703static DEFINE_SPINLOCK(balancing);
4704
4705/*
Christoph Lameter7835b982006-12-10 02:20:22 -08004706 * It checks each scheduling domain to see if it is due to be balanced,
4707 * and initiates a balancing operation if so.
4708 *
4709 * Balancing parameters are set up in arch_init_sched_domains.
4710 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02004711static void rebalance_domains(int cpu, enum cpu_idle_type idle)
Christoph Lameter7835b982006-12-10 02:20:22 -08004712{
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004713 int balance = 1;
4714 struct rq *rq = cpu_rq(cpu);
Christoph Lameter7835b982006-12-10 02:20:22 -08004715 unsigned long interval;
4716 struct sched_domain *sd;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004717 /* Earliest time when we have to do rebalance again */
Christoph Lameterc9819f42006-12-10 02:20:25 -08004718 unsigned long next_balance = jiffies + 60*HZ;
Suresh Siddhaf549da82007-08-23 15:18:02 +02004719 int update_next_balance = 0;
Dmitry Adamushkod07355f2008-05-12 21:21:15 +02004720 int need_serialize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004721
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004722 for_each_domain(cpu, sd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004723 if (!(sd->flags & SD_LOAD_BALANCE))
4724 continue;
4725
4726 interval = sd->balance_interval;
Ingo Molnard15bcfd2007-07-09 18:51:57 +02004727 if (idle != CPU_IDLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004728 interval *= sd->busy_factor;
4729
4730 /* scale ms to jiffies */
4731 interval = msecs_to_jiffies(interval);
4732 if (unlikely(!interval))
4733 interval = 1;
Ingo Molnardd41f592007-07-09 18:51:59 +02004734 if (interval > HZ*NR_CPUS/10)
4735 interval = HZ*NR_CPUS/10;
4736
Dmitry Adamushkod07355f2008-05-12 21:21:15 +02004737 need_serialize = sd->flags & SD_SERIALIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004738
Dmitry Adamushkod07355f2008-05-12 21:21:15 +02004739 if (need_serialize) {
Christoph Lameter08c183f2006-12-10 02:20:29 -08004740 if (!spin_trylock(&balancing))
4741 goto out;
4742 }
4743
Christoph Lameterc9819f42006-12-10 02:20:25 -08004744 if (time_after_eq(jiffies, sd->last_balance + interval)) {
Rusty Russelldf7c8e82009-03-19 15:22:20 +10304745 if (load_balance(cpu, rq, sd, idle, &balance)) {
Siddha, Suresh Bfa3b6dd2005-09-10 00:26:21 -07004746 /*
4747 * We've pulled tasks over so either we're no
Nick Piggin5969fe02005-09-10 00:26:19 -07004748 * longer idle, or one of our SMT siblings is
4749 * not idle.
4750 */
Ingo Molnard15bcfd2007-07-09 18:51:57 +02004751 idle = CPU_NOT_IDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004752 }
Christoph Lameter1bd77f22006-12-10 02:20:27 -08004753 sd->last_balance = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754 }
Dmitry Adamushkod07355f2008-05-12 21:21:15 +02004755 if (need_serialize)
Christoph Lameter08c183f2006-12-10 02:20:29 -08004756 spin_unlock(&balancing);
4757out:
Suresh Siddhaf549da82007-08-23 15:18:02 +02004758 if (time_after(next_balance, sd->last_balance + interval)) {
Christoph Lameterc9819f42006-12-10 02:20:25 -08004759 next_balance = sd->last_balance + interval;
Suresh Siddhaf549da82007-08-23 15:18:02 +02004760 update_next_balance = 1;
4761 }
Siddha, Suresh B783609c2006-12-10 02:20:33 -08004762
4763 /*
4764 * Stop the load balance at this level. There is another
4765 * CPU in our sched group which is doing load balancing more
4766 * actively.
4767 */
4768 if (!balance)
4769 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004770 }
Suresh Siddhaf549da82007-08-23 15:18:02 +02004771
4772 /*
4773 * next_balance will be updated only when there is a need.
4774 * When the cpu is attached to null domain for ex, it will not be
4775 * updated.
4776 */
4777 if (likely(update_next_balance))
4778 rq->next_balance = next_balance;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004779}
4780
4781/*
4782 * run_rebalance_domains is triggered when needed from the scheduler tick.
4783 * In CONFIG_NO_HZ case, the idle load balance owner will do the
4784 * rebalancing for all the cpus for whom scheduler ticks are stopped.
4785 */
4786static void run_rebalance_domains(struct softirq_action *h)
4787{
Ingo Molnardd41f592007-07-09 18:51:59 +02004788 int this_cpu = smp_processor_id();
4789 struct rq *this_rq = cpu_rq(this_cpu);
4790 enum cpu_idle_type idle = this_rq->idle_at_tick ?
4791 CPU_IDLE : CPU_NOT_IDLE;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004792
Ingo Molnardd41f592007-07-09 18:51:59 +02004793 rebalance_domains(this_cpu, idle);
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004794
4795#ifdef CONFIG_NO_HZ
4796 /*
4797 * If this cpu is the owner for idle load balancing, then do the
4798 * balancing on behalf of the other idle cpus whose ticks are
4799 * stopped.
4800 */
Ingo Molnardd41f592007-07-09 18:51:59 +02004801 if (this_rq->idle_at_tick &&
4802 atomic_read(&nohz.load_balancer) == this_cpu) {
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004803 struct rq *rq;
4804 int balance_cpu;
4805
Rusty Russell7d1e6a92008-11-25 02:35:09 +10304806 for_each_cpu(balance_cpu, nohz.cpu_mask) {
4807 if (balance_cpu == this_cpu)
4808 continue;
4809
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004810 /*
4811 * If this cpu gets work to do, stop the load balancing
4812 * work being done for other cpus. Next load
4813 * balancing owner will pick it up.
4814 */
4815 if (need_resched())
4816 break;
4817
Oleg Nesterovde0cf892007-08-12 18:08:19 +02004818 rebalance_domains(balance_cpu, CPU_IDLE);
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004819
4820 rq = cpu_rq(balance_cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02004821 if (time_after(this_rq->next_balance, rq->next_balance))
4822 this_rq->next_balance = rq->next_balance;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004823 }
4824 }
4825#endif
4826}
4827
Frederic Weisbecker8a0be9e2009-03-05 01:27:02 +01004828static inline int on_null_domain(int cpu)
4829{
4830 return !rcu_dereference(cpu_rq(cpu)->sd);
4831}
4832
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004833/*
4834 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
4835 *
4836 * In case of CONFIG_NO_HZ, this is the place where we nominate a new
4837 * idle load balancing owner or decide to stop the periodic load balancing,
4838 * if the whole system is idle.
4839 */
Ingo Molnardd41f592007-07-09 18:51:59 +02004840static inline void trigger_load_balance(struct rq *rq, int cpu)
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004841{
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004842#ifdef CONFIG_NO_HZ
4843 /*
4844 * If we were in the nohz mode recently and busy at the current
4845 * scheduler tick, then check if we need to nominate new idle
4846 * load balancer.
4847 */
4848 if (rq->in_nohz_recently && !rq->idle_at_tick) {
4849 rq->in_nohz_recently = 0;
4850
4851 if (atomic_read(&nohz.load_balancer) == cpu) {
Rusty Russell7d1e6a92008-11-25 02:35:09 +10304852 cpumask_clear_cpu(cpu, nohz.cpu_mask);
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004853 atomic_set(&nohz.load_balancer, -1);
4854 }
4855
4856 if (atomic_read(&nohz.load_balancer) == -1) {
Gautham R Shenoyf711f602009-04-14 10:25:30 +05304857 int ilb = find_new_ilb(cpu);
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004858
Mike Travis434d53b2008-04-04 18:11:04 -07004859 if (ilb < nr_cpu_ids)
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004860 resched_cpu(ilb);
4861 }
4862 }
4863
4864 /*
4865 * If this cpu is idle and doing idle load balancing for all the
4866 * cpus with ticks stopped, is it time for that to stop?
4867 */
4868 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu &&
Rusty Russell7d1e6a92008-11-25 02:35:09 +10304869 cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004870 resched_cpu(cpu);
4871 return;
4872 }
4873
4874 /*
4875 * If this cpu is idle and the idle load balancing is done by
4876 * someone else, then no need raise the SCHED_SOFTIRQ
4877 */
4878 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu &&
Rusty Russell7d1e6a92008-11-25 02:35:09 +10304879 cpumask_test_cpu(cpu, nohz.cpu_mask))
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004880 return;
4881#endif
Frederic Weisbecker8a0be9e2009-03-05 01:27:02 +01004882 /* Don't need to rebalance while attached to NULL domain */
4883 if (time_after_eq(jiffies, rq->next_balance) &&
4884 likely(!on_null_domain(cpu)))
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07004885 raise_softirq(SCHED_SOFTIRQ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004886}
Ingo Molnardd41f592007-07-09 18:51:59 +02004887
4888#else /* CONFIG_SMP */
4889
Linus Torvalds1da177e2005-04-16 15:20:36 -07004890/*
4891 * on UP we do not need to balance between CPUs:
4892 */
Ingo Molnar70b97a72006-07-03 00:25:42 -07004893static inline void idle_balance(int cpu, struct rq *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004894{
4895}
Ingo Molnardd41f592007-07-09 18:51:59 +02004896
Linus Torvalds1da177e2005-04-16 15:20:36 -07004897#endif
4898
Linus Torvalds1da177e2005-04-16 15:20:36 -07004899DEFINE_PER_CPU(struct kernel_stat, kstat);
4900
4901EXPORT_PER_CPU_SYMBOL(kstat);
4902
4903/*
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09004904 * Return any ns on the sched_clock that have not yet been accounted in
Frank Mayharf06febc2008-09-12 09:54:39 -07004905 * @p in case that task is currently running.
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09004906 *
4907 * Called with task_rq_lock() held on @rq.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004908 */
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09004909static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
4910{
4911 u64 ns = 0;
4912
4913 if (task_current(rq, p)) {
4914 update_rq_clock(rq);
4915 ns = rq->clock - p->se.exec_start;
4916 if ((s64)ns < 0)
4917 ns = 0;
4918 }
4919
4920 return ns;
4921}
4922
Frank Mayharbb34d922008-09-12 09:54:39 -07004923unsigned long long task_delta_exec(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004924{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004925 unsigned long flags;
Ingo Molnar41b86e92007-07-09 18:51:58 +02004926 struct rq *rq;
Frank Mayharbb34d922008-09-12 09:54:39 -07004927 u64 ns = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07004928
Ingo Molnar41b86e92007-07-09 18:51:58 +02004929 rq = task_rq_lock(p, &flags);
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09004930 ns = do_task_delta_exec(p, rq);
4931 task_rq_unlock(rq, &flags);
Ingo Molnar15084872008-09-30 08:28:17 +02004932
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09004933 return ns;
4934}
Frank Mayharf06febc2008-09-12 09:54:39 -07004935
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09004936/*
4937 * Return accounted runtime for the task.
4938 * In case the task is currently running, return the runtime plus current's
4939 * pending runtime that have not been accounted yet.
4940 */
4941unsigned long long task_sched_runtime(struct task_struct *p)
4942{
4943 unsigned long flags;
4944 struct rq *rq;
4945 u64 ns = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07004946
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09004947 rq = task_rq_lock(p, &flags);
4948 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
4949 task_rq_unlock(rq, &flags);
4950
4951 return ns;
4952}
4953
4954/*
4955 * Return sum_exec_runtime for the thread group.
4956 * In case the task is currently running, return the sum plus current's
4957 * pending runtime that have not been accounted yet.
4958 *
4959 * Note that the thread group might have other running tasks as well,
4960 * so the return value not includes other pending runtime that other
4961 * running tasks might have.
4962 */
4963unsigned long long thread_group_sched_runtime(struct task_struct *p)
4964{
4965 struct task_cputime totals;
4966 unsigned long flags;
4967 struct rq *rq;
4968 u64 ns;
4969
4970 rq = task_rq_lock(p, &flags);
4971 thread_group_cputime(p, &totals);
4972 ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004973 task_rq_unlock(rq, &flags);
4974
4975 return ns;
4976}
4977
4978/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004979 * Account user cpu time to a process.
4980 * @p: the process that the cpu time gets accounted to
Linus Torvalds1da177e2005-04-16 15:20:36 -07004981 * @cputime: the cpu time spent in user space since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01004982 * @cputime_scaled: cputime scaled by cpu frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -07004983 */
Martin Schwidefsky457533a2008-12-31 15:11:37 +01004984void account_user_time(struct task_struct *p, cputime_t cputime,
4985 cputime_t cputime_scaled)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004986{
4987 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
4988 cputime64_t tmp;
4989
Martin Schwidefsky457533a2008-12-31 15:11:37 +01004990 /* Add user time to process. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004991 p->utime = cputime_add(p->utime, cputime);
Martin Schwidefsky457533a2008-12-31 15:11:37 +01004992 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
Frank Mayharf06febc2008-09-12 09:54:39 -07004993 account_group_user_time(p, cputime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004994
4995 /* Add user time to cpustat. */
4996 tmp = cputime_to_cputime64(cputime);
4997 if (TASK_NICE(p) > 0)
4998 cpustat->nice = cputime64_add(cpustat->nice, tmp);
4999 else
5000 cpustat->user = cputime64_add(cpustat->user, tmp);
Bharata B Raoef12fef2009-03-31 10:02:22 +05305001
5002 cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
Jonathan Lim49b5cf32008-07-25 01:48:40 -07005003 /* Account for user time used */
5004 acct_update_integrals(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005005}
5006
5007/*
Laurent Vivier94886b82007-10-15 17:00:19 +02005008 * Account guest cpu time to a process.
5009 * @p: the process that the cpu time gets accounted to
5010 * @cputime: the cpu time spent in virtual machine since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01005011 * @cputime_scaled: cputime scaled by cpu frequency
Laurent Vivier94886b82007-10-15 17:00:19 +02005012 */
Martin Schwidefsky457533a2008-12-31 15:11:37 +01005013static void account_guest_time(struct task_struct *p, cputime_t cputime,
5014 cputime_t cputime_scaled)
Laurent Vivier94886b82007-10-15 17:00:19 +02005015{
5016 cputime64_t tmp;
5017 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
5018
5019 tmp = cputime_to_cputime64(cputime);
5020
Martin Schwidefsky457533a2008-12-31 15:11:37 +01005021 /* Add guest time to process. */
Laurent Vivier94886b82007-10-15 17:00:19 +02005022 p->utime = cputime_add(p->utime, cputime);
Martin Schwidefsky457533a2008-12-31 15:11:37 +01005023 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
Frank Mayharf06febc2008-09-12 09:54:39 -07005024 account_group_user_time(p, cputime);
Laurent Vivier94886b82007-10-15 17:00:19 +02005025 p->gtime = cputime_add(p->gtime, cputime);
5026
Martin Schwidefsky457533a2008-12-31 15:11:37 +01005027 /* Add guest time to cpustat. */
Ryota Ozakice0e7b22009-10-24 01:20:10 +09005028 if (TASK_NICE(p) > 0) {
5029 cpustat->nice = cputime64_add(cpustat->nice, tmp);
5030 cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
5031 } else {
5032 cpustat->user = cputime64_add(cpustat->user, tmp);
5033 cpustat->guest = cputime64_add(cpustat->guest, tmp);
5034 }
Laurent Vivier94886b82007-10-15 17:00:19 +02005035}
5036
5037/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07005038 * Account system cpu time to a process.
5039 * @p: the process that the cpu time gets accounted to
5040 * @hardirq_offset: the offset to subtract from hardirq_count()
5041 * @cputime: the cpu time spent in kernel space since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01005042 * @cputime_scaled: cputime scaled by cpu frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -07005043 */
5044void account_system_time(struct task_struct *p, int hardirq_offset,
Martin Schwidefsky457533a2008-12-31 15:11:37 +01005045 cputime_t cputime, cputime_t cputime_scaled)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005046{
5047 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005048 cputime64_t tmp;
5049
Harvey Harrison983ed7a2008-04-24 18:17:55 -07005050 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
Martin Schwidefsky457533a2008-12-31 15:11:37 +01005051 account_guest_time(p, cputime, cputime_scaled);
Harvey Harrison983ed7a2008-04-24 18:17:55 -07005052 return;
5053 }
Laurent Vivier94886b82007-10-15 17:00:19 +02005054
Martin Schwidefsky457533a2008-12-31 15:11:37 +01005055 /* Add system time to process. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005056 p->stime = cputime_add(p->stime, cputime);
Martin Schwidefsky457533a2008-12-31 15:11:37 +01005057 p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
Frank Mayharf06febc2008-09-12 09:54:39 -07005058 account_group_system_time(p, cputime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005059
5060 /* Add system time to cpustat. */
5061 tmp = cputime_to_cputime64(cputime);
5062 if (hardirq_count() - hardirq_offset)
5063 cpustat->irq = cputime64_add(cpustat->irq, tmp);
5064 else if (softirq_count())
5065 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066 else
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01005067 cpustat->system = cputime64_add(cpustat->system, tmp);
5068
Bharata B Raoef12fef2009-03-31 10:02:22 +05305069 cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
5070
Linus Torvalds1da177e2005-04-16 15:20:36 -07005071 /* Account for system time used */
5072 acct_update_integrals(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005073}
5074
5075/*
5076 * Account for involuntary wait time.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005077 * @steal: the cpu time spent in involuntary wait
Linus Torvalds1da177e2005-04-16 15:20:36 -07005078 */
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01005079void account_steal_time(cputime_t cputime)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005080{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005081 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01005082 cputime64_t cputime64 = cputime_to_cputime64(cputime);
5083
5084 cpustat->steal = cputime64_add(cpustat->steal, cputime64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005085}
5086
Christoph Lameter7835b982006-12-10 02:20:22 -08005087/*
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01005088 * Account for idle time.
5089 * @cputime: the cpu time spent in idle wait
Linus Torvalds1da177e2005-04-16 15:20:36 -07005090 */
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01005091void account_idle_time(cputime_t cputime)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005092{
5093 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01005094 cputime64_t cputime64 = cputime_to_cputime64(cputime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005095 struct rq *rq = this_rq();
5096
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01005097 if (atomic_read(&rq->nr_iowait) > 0)
5098 cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
5099 else
5100 cpustat->idle = cputime64_add(cpustat->idle, cputime64);
Christoph Lameter7835b982006-12-10 02:20:22 -08005101}
5102
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01005103#ifndef CONFIG_VIRT_CPU_ACCOUNTING
5104
5105/*
5106 * Account a single tick of cpu time.
5107 * @p: the process that the cpu time gets accounted to
5108 * @user_tick: indicates if the tick is a user or a system tick
5109 */
5110void account_process_tick(struct task_struct *p, int user_tick)
5111{
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02005112 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01005113 struct rq *rq = this_rq();
5114
5115 if (user_tick)
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02005116 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
Eric Dumazetf5f293a2009-04-29 14:44:49 +02005117 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02005118 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01005119 one_jiffy_scaled);
5120 else
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02005121 account_idle_time(cputime_one_jiffy);
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01005122}
5123
5124/*
5125 * Account multiple ticks of steal time.
5126 * @p: the process from which the cpu time has been stolen
5127 * @ticks: number of stolen ticks
5128 */
5129void account_steal_ticks(unsigned long ticks)
5130{
5131 account_steal_time(jiffies_to_cputime(ticks));
5132}
5133
5134/*
5135 * Account multiple ticks of idle time.
5136 * @ticks: number of stolen ticks
5137 */
5138void account_idle_ticks(unsigned long ticks)
5139{
5140 account_idle_time(jiffies_to_cputime(ticks));
5141}
5142
5143#endif
5144
Christoph Lameter7835b982006-12-10 02:20:22 -08005145/*
Balbir Singh49048622008-09-05 18:12:23 +02005146 * Use precise platform statistics if available:
5147 */
5148#ifdef CONFIG_VIRT_CPU_ACCOUNTING
5149cputime_t task_utime(struct task_struct *p)
5150{
5151 return p->utime;
5152}
5153
5154cputime_t task_stime(struct task_struct *p)
5155{
5156 return p->stime;
5157}
5158#else
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09005159
5160#ifndef nsecs_to_cputime
5161# define nsecs_to_cputime(__nsecs) \
5162 msecs_to_cputime(div_u64((__nsecs), NSEC_PER_MSEC))
5163#endif
5164
Balbir Singh49048622008-09-05 18:12:23 +02005165cputime_t task_utime(struct task_struct *p)
5166{
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09005167 cputime_t utime = p->utime, total = utime + p->stime;
Balbir Singh49048622008-09-05 18:12:23 +02005168 u64 temp;
5169
5170 /*
5171 * Use CFS's precise accounting:
5172 */
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09005173 temp = (u64)nsecs_to_cputime(p->se.sum_exec_runtime);
Balbir Singh49048622008-09-05 18:12:23 +02005174
5175 if (total) {
5176 temp *= utime;
5177 do_div(temp, total);
5178 }
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09005179 utime = (cputime_t)temp;
Balbir Singh49048622008-09-05 18:12:23 +02005180
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09005181 p->prev_utime = max(p->prev_utime, utime);
Balbir Singh49048622008-09-05 18:12:23 +02005182 return p->prev_utime;
5183}
5184
5185cputime_t task_stime(struct task_struct *p)
5186{
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09005187 cputime_t stime;
Balbir Singh49048622008-09-05 18:12:23 +02005188
5189 /*
5190 * Use CFS's precise accounting. (we subtract utime from
5191 * the total, to make sure the total observed by userspace
5192 * grows monotonically - apps rely on that):
5193 */
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09005194 stime = nsecs_to_cputime(p->se.sum_exec_runtime) - task_utime(p);
Balbir Singh49048622008-09-05 18:12:23 +02005195
5196 if (stime >= 0)
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09005197 p->prev_stime = max(p->prev_stime, stime);
Balbir Singh49048622008-09-05 18:12:23 +02005198
5199 return p->prev_stime;
5200}
5201#endif
5202
5203inline cputime_t task_gtime(struct task_struct *p)
5204{
5205 return p->gtime;
5206}
5207
5208/*
Christoph Lameter7835b982006-12-10 02:20:22 -08005209 * This function gets called by the timer code, with HZ frequency.
5210 * We call it with interrupts disabled.
5211 *
5212 * It also gets called by the fork code, when changing the parent's
5213 * timeslices.
5214 */
5215void scheduler_tick(void)
5216{
Christoph Lameter7835b982006-12-10 02:20:22 -08005217 int cpu = smp_processor_id();
5218 struct rq *rq = cpu_rq(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02005219 struct task_struct *curr = rq->curr;
Peter Zijlstra3e51f332008-05-03 18:29:28 +02005220
5221 sched_clock_tick();
Christoph Lameter7835b982006-12-10 02:20:22 -08005222
Ingo Molnardd41f592007-07-09 18:51:59 +02005223 spin_lock(&rq->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +02005224 update_rq_clock(rq);
Ingo Molnarf1a438d2007-08-09 11:16:45 +02005225 update_cpu_load(rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01005226 curr->sched_class->task_tick(rq, curr, 0);
Ingo Molnardd41f592007-07-09 18:51:59 +02005227 spin_unlock(&rq->lock);
5228
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005229 perf_event_task_tick(curr, cpu);
Peter Zijlstrae220d2d2009-05-23 18:28:55 +02005230
Christoph Lametere418e1c2006-12-10 02:20:23 -08005231#ifdef CONFIG_SMP
Ingo Molnardd41f592007-07-09 18:51:59 +02005232 rq->idle_at_tick = idle_cpu(cpu);
5233 trigger_load_balance(rq, cpu);
Christoph Lametere418e1c2006-12-10 02:20:23 -08005234#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005235}
5236
Lai Jiangshan132380a2009-04-02 14:18:25 +08005237notrace unsigned long get_parent_ip(unsigned long addr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02005238{
5239 if (in_lock_functions(addr)) {
5240 addr = CALLER_ADDR2;
5241 if (in_lock_functions(addr))
5242 addr = CALLER_ADDR3;
5243 }
5244 return addr;
5245}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005246
Steven Rostedt7e49fcc2009-01-22 19:01:40 -05005247#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
5248 defined(CONFIG_PREEMPT_TRACER))
5249
Srinivasa Ds43627582008-02-23 15:24:04 -08005250void __kprobes add_preempt_count(int val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005251{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02005252#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07005253 /*
5254 * Underflow?
5255 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07005256 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
5257 return;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02005258#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005259 preempt_count() += val;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02005260#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07005261 /*
5262 * Spinlock count overflowing soon?
5263 */
Miguel Ojeda Sandonis33859f72006-12-10 02:20:38 -08005264 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
5265 PREEMPT_MASK - 10);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02005266#endif
5267 if (preempt_count() == val)
5268 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005269}
5270EXPORT_SYMBOL(add_preempt_count);
5271
Srinivasa Ds43627582008-02-23 15:24:04 -08005272void __kprobes sub_preempt_count(int val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005273{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02005274#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07005275 /*
5276 * Underflow?
5277 */
Ingo Molnar01e3eb82009-01-12 13:00:50 +01005278 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07005279 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005280 /*
5281 * Is the spinlock portion underflowing?
5282 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07005283 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
5284 !(preempt_count() & PREEMPT_MASK)))
5285 return;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02005286#endif
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07005287
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02005288 if (preempt_count() == val)
5289 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005290 preempt_count() -= val;
5291}
5292EXPORT_SYMBOL(sub_preempt_count);
5293
5294#endif
5295
5296/*
Ingo Molnardd41f592007-07-09 18:51:59 +02005297 * Print scheduling while atomic bug:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005298 */
Ingo Molnardd41f592007-07-09 18:51:59 +02005299static noinline void __schedule_bug(struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005300{
Satyam Sharma838225b2007-10-24 18:23:50 +02005301 struct pt_regs *regs = get_irq_regs();
5302
5303 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5304 prev->comm, prev->pid, preempt_count());
5305
Ingo Molnardd41f592007-07-09 18:51:59 +02005306 debug_show_held_locks(prev);
Arjan van de Vene21f5b12008-05-23 09:05:58 -07005307 print_modules();
Ingo Molnardd41f592007-07-09 18:51:59 +02005308 if (irqs_disabled())
5309 print_irqtrace_events(prev);
Satyam Sharma838225b2007-10-24 18:23:50 +02005310
5311 if (regs)
5312 show_regs(regs);
5313 else
5314 dump_stack();
Ingo Molnardd41f592007-07-09 18:51:59 +02005315}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005316
Ingo Molnardd41f592007-07-09 18:51:59 +02005317/*
5318 * Various schedule()-time debugging checks and statistics:
5319 */
5320static inline void schedule_debug(struct task_struct *prev)
5321{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005322 /*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005323 * Test if we are atomic. Since do_exit() needs to call into
Linus Torvalds1da177e2005-04-16 15:20:36 -07005324 * schedule() atomically, we ignore that path for now.
5325 * Otherwise, whine if we are scheduling when we should not be.
5326 */
Roel Kluin3f33a7c2008-05-13 23:44:11 +02005327 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
Ingo Molnardd41f592007-07-09 18:51:59 +02005328 __schedule_bug(prev);
5329
Linus Torvalds1da177e2005-04-16 15:20:36 -07005330 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
5331
Ingo Molnar2d723762007-10-15 17:00:12 +02005332 schedstat_inc(this_rq(), sched_count);
Ingo Molnarb8efb562007-10-15 17:00:10 +02005333#ifdef CONFIG_SCHEDSTATS
5334 if (unlikely(prev->lock_depth >= 0)) {
Ingo Molnar2d723762007-10-15 17:00:12 +02005335 schedstat_inc(this_rq(), bkl_count);
5336 schedstat_inc(prev, sched_info.bkl_count);
Ingo Molnarb8efb562007-10-15 17:00:10 +02005337 }
5338#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02005339}
5340
Peter Zijlstraad4b78b2009-09-16 12:31:31 +02005341static void put_prev_task(struct rq *rq, struct task_struct *p)
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01005342{
Peter Zijlstraad4b78b2009-09-16 12:31:31 +02005343 u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime;
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01005344
Peter Zijlstraad4b78b2009-09-16 12:31:31 +02005345 update_avg(&p->se.avg_running, runtime);
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01005346
Peter Zijlstraad4b78b2009-09-16 12:31:31 +02005347 if (p->state == TASK_RUNNING) {
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01005348 /*
5349 * In order to avoid avg_overlap growing stale when we are
5350 * indeed overlapping and hence not getting put to sleep, grow
5351 * the avg_overlap on preemption.
5352 *
5353 * We use the average preemption runtime because that
5354 * correlates to the amount of cache footprint a task can
5355 * build up.
5356 */
Peter Zijlstraad4b78b2009-09-16 12:31:31 +02005357 runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
5358 update_avg(&p->se.avg_overlap, runtime);
5359 } else {
5360 update_avg(&p->se.avg_running, 0);
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01005361 }
Peter Zijlstraad4b78b2009-09-16 12:31:31 +02005362 p->sched_class->put_prev_task(rq, p);
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01005363}
5364
Ingo Molnardd41f592007-07-09 18:51:59 +02005365/*
5366 * Pick up the highest-prio task:
5367 */
5368static inline struct task_struct *
Wang Chenb67802e2009-03-02 13:55:26 +08005369pick_next_task(struct rq *rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02005370{
Ingo Molnar5522d5d2007-10-15 17:00:12 +02005371 const struct sched_class *class;
Ingo Molnardd41f592007-07-09 18:51:59 +02005372 struct task_struct *p;
5373
5374 /*
5375 * Optimization: we know that if all tasks are in
5376 * the fair class we can call that function directly:
5377 */
5378 if (likely(rq->nr_running == rq->cfs.nr_running)) {
Ingo Molnarfb8d4722007-08-09 11:16:48 +02005379 p = fair_sched_class.pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02005380 if (likely(p))
5381 return p;
5382 }
5383
5384 class = sched_class_highest;
5385 for ( ; ; ) {
Ingo Molnarfb8d4722007-08-09 11:16:48 +02005386 p = class->pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02005387 if (p)
5388 return p;
5389 /*
5390 * Will never be NULL as the idle class always
5391 * returns a non-NULL p:
5392 */
5393 class = class->next;
5394 }
5395}
5396
5397/*
5398 * schedule() is the main scheduler function.
5399 */
Peter Zijlstraff743342009-03-13 12:21:26 +01005400asmlinkage void __sched schedule(void)
Ingo Molnardd41f592007-07-09 18:51:59 +02005401{
5402 struct task_struct *prev, *next;
Harvey Harrison67ca7bd2008-02-15 09:56:36 -08005403 unsigned long *switch_count;
Ingo Molnardd41f592007-07-09 18:51:59 +02005404 struct rq *rq;
Peter Zijlstra31656512008-07-18 18:01:23 +02005405 int cpu;
Ingo Molnardd41f592007-07-09 18:51:59 +02005406
Peter Zijlstraff743342009-03-13 12:21:26 +01005407need_resched:
5408 preempt_disable();
Ingo Molnardd41f592007-07-09 18:51:59 +02005409 cpu = smp_processor_id();
5410 rq = cpu_rq(cpu);
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07005411 rcu_sched_qs(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02005412 prev = rq->curr;
5413 switch_count = &prev->nivcsw;
5414
Linus Torvalds1da177e2005-04-16 15:20:36 -07005415 release_kernel_lock(prev);
5416need_resched_nonpreemptible:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005417
Ingo Molnardd41f592007-07-09 18:51:59 +02005418 schedule_debug(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005419
Peter Zijlstra31656512008-07-18 18:01:23 +02005420 if (sched_feat(HRTICK))
Mike Galbraithf333fdc2008-05-12 21:20:55 +02005421 hrtick_clear(rq);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01005422
Peter Zijlstra8cd162c2008-10-15 20:37:23 +02005423 spin_lock_irq(&rq->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +02005424 update_rq_clock(rq);
Ingo Molnar1e819952007-10-15 17:00:13 +02005425 clear_tsk_need_resched(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005426
Ingo Molnardd41f592007-07-09 18:51:59 +02005427 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
Oleg Nesterov16882c12008-06-08 21:20:41 +04005428 if (unlikely(signal_pending_state(prev->state, prev)))
Ingo Molnardd41f592007-07-09 18:51:59 +02005429 prev->state = TASK_RUNNING;
Oleg Nesterov16882c12008-06-08 21:20:41 +04005430 else
Ingo Molnar2e1cb742007-08-09 11:16:49 +02005431 deactivate_task(rq, prev, 1);
Ingo Molnardd41f592007-07-09 18:51:59 +02005432 switch_count = &prev->nvcsw;
5433 }
5434
Gregory Haskins3f029d32009-07-29 11:08:47 -04005435 pre_schedule(rq, prev);
Steven Rostedtf65eda42008-01-25 21:08:07 +01005436
Ingo Molnardd41f592007-07-09 18:51:59 +02005437 if (unlikely(!rq->nr_running))
5438 idle_balance(cpu, rq);
5439
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01005440 put_prev_task(rq, prev);
Wang Chenb67802e2009-03-02 13:55:26 +08005441 next = pick_next_task(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005442
Linus Torvalds1da177e2005-04-16 15:20:36 -07005443 if (likely(prev != next)) {
David Simner673a90a2008-04-29 10:08:59 +01005444 sched_info_switch(prev, next);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005445 perf_event_task_sched_out(prev, next, cpu);
David Simner673a90a2008-04-29 10:08:59 +01005446
Linus Torvalds1da177e2005-04-16 15:20:36 -07005447 rq->nr_switches++;
5448 rq->curr = next;
5449 ++*switch_count;
5450
Ingo Molnardd41f592007-07-09 18:51:59 +02005451 context_switch(rq, prev, next); /* unlocks the rq */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01005452 /*
5453 * the context switch might have flipped the stack from under
5454 * us, hence refresh the local variables.
5455 */
5456 cpu = smp_processor_id();
5457 rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005458 } else
5459 spin_unlock_irq(&rq->lock);
5460
Gregory Haskins3f029d32009-07-29 11:08:47 -04005461 post_schedule(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005462
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01005463 if (unlikely(reacquire_kernel_lock(current) < 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005464 goto need_resched_nonpreemptible;
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01005465
Linus Torvalds1da177e2005-04-16 15:20:36 -07005466 preempt_enable_no_resched();
Peter Zijlstraff743342009-03-13 12:21:26 +01005467 if (need_resched())
Linus Torvalds1da177e2005-04-16 15:20:36 -07005468 goto need_resched;
5469}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005470EXPORT_SYMBOL(schedule);
5471
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01005472#ifdef CONFIG_SMP
5473/*
5474 * Look out! "owner" is an entirely speculative pointer
5475 * access and not reliable.
5476 */
5477int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
5478{
5479 unsigned int cpu;
5480 struct rq *rq;
5481
5482 if (!sched_feat(OWNER_SPIN))
5483 return 0;
5484
5485#ifdef CONFIG_DEBUG_PAGEALLOC
5486 /*
5487 * Need to access the cpu field knowing that
5488 * DEBUG_PAGEALLOC could have unmapped it if
5489 * the mutex owner just released it and exited.
5490 */
5491 if (probe_kernel_address(&owner->cpu, cpu))
5492 goto out;
5493#else
5494 cpu = owner->cpu;
5495#endif
5496
5497 /*
5498 * Even if the access succeeded (likely case),
5499 * the cpu field may no longer be valid.
5500 */
5501 if (cpu >= nr_cpumask_bits)
5502 goto out;
5503
5504 /*
5505 * We need to validate that we can do a
5506 * get_cpu() and that we have the percpu area.
5507 */
5508 if (!cpu_online(cpu))
5509 goto out;
5510
5511 rq = cpu_rq(cpu);
5512
5513 for (;;) {
5514 /*
5515 * Owner changed, break to re-assess state.
5516 */
5517 if (lock->owner != owner)
5518 break;
5519
5520 /*
5521 * Is that owner really running on that cpu?
5522 */
5523 if (task_thread_info(rq->curr) != owner || need_resched())
5524 return 0;
5525
5526 cpu_relax();
5527 }
5528out:
5529 return 1;
5530}
5531#endif
5532
Linus Torvalds1da177e2005-04-16 15:20:36 -07005533#ifdef CONFIG_PREEMPT
5534/*
Andreas Mohr2ed6e342006-07-10 04:43:52 -07005535 * this is the entry point to schedule() from in-kernel preemption
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005536 * off of preempt_enable. Kernel preemptions off return from interrupt
Linus Torvalds1da177e2005-04-16 15:20:36 -07005537 * occur there and call schedule directly.
5538 */
5539asmlinkage void __sched preempt_schedule(void)
5540{
5541 struct thread_info *ti = current_thread_info();
Ingo Molnar6478d882008-01-25 21:08:33 +01005542
Linus Torvalds1da177e2005-04-16 15:20:36 -07005543 /*
5544 * If there is a non-zero preempt_count or interrupts are disabled,
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005545 * we do not want to preempt the current task. Just return..
Linus Torvalds1da177e2005-04-16 15:20:36 -07005546 */
Nick Pigginbeed33a2006-10-11 01:21:52 -07005547 if (likely(ti->preempt_count || irqs_disabled()))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005548 return;
5549
Andi Kleen3a5c3592007-10-15 17:00:14 +02005550 do {
5551 add_preempt_count(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02005552 schedule();
Andi Kleen3a5c3592007-10-15 17:00:14 +02005553 sub_preempt_count(PREEMPT_ACTIVE);
5554
5555 /*
5556 * Check again in case we missed a preemption opportunity
5557 * between schedule and now.
5558 */
5559 barrier();
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08005560 } while (need_resched());
Linus Torvalds1da177e2005-04-16 15:20:36 -07005561}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005562EXPORT_SYMBOL(preempt_schedule);
5563
5564/*
Andreas Mohr2ed6e342006-07-10 04:43:52 -07005565 * this is the entry point to schedule() from kernel preemption
Linus Torvalds1da177e2005-04-16 15:20:36 -07005566 * off of irq context.
5567 * Note, that this is called and return with irqs disabled. This will
5568 * protect us against recursive calling from irq.
5569 */
5570asmlinkage void __sched preempt_schedule_irq(void)
5571{
5572 struct thread_info *ti = current_thread_info();
Ingo Molnar6478d882008-01-25 21:08:33 +01005573
Andreas Mohr2ed6e342006-07-10 04:43:52 -07005574 /* Catch callers which need to be fixed */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005575 BUG_ON(ti->preempt_count || !irqs_disabled());
5576
Andi Kleen3a5c3592007-10-15 17:00:14 +02005577 do {
5578 add_preempt_count(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02005579 local_irq_enable();
5580 schedule();
5581 local_irq_disable();
Andi Kleen3a5c3592007-10-15 17:00:14 +02005582 sub_preempt_count(PREEMPT_ACTIVE);
5583
5584 /*
5585 * Check again in case we missed a preemption opportunity
5586 * between schedule and now.
5587 */
5588 barrier();
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08005589 } while (need_resched());
Linus Torvalds1da177e2005-04-16 15:20:36 -07005590}
5591
5592#endif /* CONFIG_PREEMPT */
5593
Peter Zijlstra63859d42009-09-15 19:14:42 +02005594int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07005595 void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005596{
Peter Zijlstra63859d42009-09-15 19:14:42 +02005597 return try_to_wake_up(curr->private, mode, wake_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005598}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005599EXPORT_SYMBOL(default_wake_function);
5600
5601/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005602 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
5603 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
Linus Torvalds1da177e2005-04-16 15:20:36 -07005604 * number) then we wake all the non-exclusive tasks and one exclusive task.
5605 *
5606 * There are circumstances in which we can try to wake a task which has already
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005607 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
Linus Torvalds1da177e2005-04-16 15:20:36 -07005608 * zero in this (rare) case, and we handle it by continuing to scan the queue.
5609 */
Johannes Weiner78ddb082009-04-14 16:53:05 +02005610static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
Peter Zijlstra63859d42009-09-15 19:14:42 +02005611 int nr_exclusive, int wake_flags, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005612{
Matthias Kaehlcke2e458742007-10-15 17:00:02 +02005613 wait_queue_t *curr, *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005614
Matthias Kaehlcke2e458742007-10-15 17:00:02 +02005615 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
Ingo Molnar48f24c42006-07-03 00:25:40 -07005616 unsigned flags = curr->flags;
5617
Peter Zijlstra63859d42009-09-15 19:14:42 +02005618 if (curr->func(curr, mode, wake_flags, key) &&
Ingo Molnar48f24c42006-07-03 00:25:40 -07005619 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005620 break;
5621 }
5622}
5623
5624/**
5625 * __wake_up - wake up threads blocked on a waitqueue.
5626 * @q: the waitqueue
5627 * @mode: which threads
5628 * @nr_exclusive: how many wake-one or wake-many threads to wake up
Martin Waitz67be2dd2005-05-01 08:59:26 -07005629 * @key: is directly passed to the wakeup function
David Howells50fa6102009-04-28 15:01:38 +01005630 *
5631 * It may be assumed that this function implies a write memory barrier before
5632 * changing the task state if and only if any tasks are woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005633 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08005634void __wake_up(wait_queue_head_t *q, unsigned int mode,
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07005635 int nr_exclusive, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005636{
5637 unsigned long flags;
5638
5639 spin_lock_irqsave(&q->lock, flags);
5640 __wake_up_common(q, mode, nr_exclusive, 0, key);
5641 spin_unlock_irqrestore(&q->lock, flags);
5642}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005643EXPORT_SYMBOL(__wake_up);
5644
5645/*
5646 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
5647 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08005648void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005649{
5650 __wake_up_common(q, mode, 1, 0, NULL);
5651}
5652
Davide Libenzi4ede8162009-03-31 15:24:20 -07005653void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
5654{
5655 __wake_up_common(q, mode, 1, 0, key);
5656}
5657
Linus Torvalds1da177e2005-04-16 15:20:36 -07005658/**
Davide Libenzi4ede8162009-03-31 15:24:20 -07005659 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005660 * @q: the waitqueue
5661 * @mode: which threads
5662 * @nr_exclusive: how many wake-one or wake-many threads to wake up
Davide Libenzi4ede8162009-03-31 15:24:20 -07005663 * @key: opaque value to be passed to wakeup targets
Linus Torvalds1da177e2005-04-16 15:20:36 -07005664 *
5665 * The sync wakeup differs that the waker knows that it will schedule
5666 * away soon, so while the target thread will be woken up, it will not
5667 * be migrated to another CPU - ie. the two threads are 'synchronized'
5668 * with each other. This can prevent needless bouncing between CPUs.
5669 *
5670 * On UP it can prevent extra preemption.
David Howells50fa6102009-04-28 15:01:38 +01005671 *
5672 * It may be assumed that this function implies a write memory barrier before
5673 * changing the task state if and only if any tasks are woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005674 */
Davide Libenzi4ede8162009-03-31 15:24:20 -07005675void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
5676 int nr_exclusive, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005677{
5678 unsigned long flags;
Peter Zijlstra7d478722009-09-14 19:55:44 +02005679 int wake_flags = WF_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005680
5681 if (unlikely(!q))
5682 return;
5683
5684 if (unlikely(!nr_exclusive))
Peter Zijlstra7d478722009-09-14 19:55:44 +02005685 wake_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005686
5687 spin_lock_irqsave(&q->lock, flags);
Peter Zijlstra7d478722009-09-14 19:55:44 +02005688 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005689 spin_unlock_irqrestore(&q->lock, flags);
5690}
Davide Libenzi4ede8162009-03-31 15:24:20 -07005691EXPORT_SYMBOL_GPL(__wake_up_sync_key);
5692
5693/*
5694 * __wake_up_sync - see __wake_up_sync_key()
5695 */
5696void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
5697{
5698 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
5699}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005700EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
5701
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02005702/**
5703 * complete: - signals a single thread waiting on this completion
5704 * @x: holds the state of this particular completion
5705 *
5706 * This will wake up a single thread waiting on this completion. Threads will be
5707 * awakened in the same order in which they were queued.
5708 *
5709 * See also complete_all(), wait_for_completion() and related routines.
David Howells50fa6102009-04-28 15:01:38 +01005710 *
5711 * It may be assumed that this function implies a write memory barrier before
5712 * changing the task state if and only if any tasks are woken up.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02005713 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02005714void complete(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005715{
5716 unsigned long flags;
5717
5718 spin_lock_irqsave(&x->wait.lock, flags);
5719 x->done++;
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05005720 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005721 spin_unlock_irqrestore(&x->wait.lock, flags);
5722}
5723EXPORT_SYMBOL(complete);
5724
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02005725/**
5726 * complete_all: - signals all threads waiting on this completion
5727 * @x: holds the state of this particular completion
5728 *
5729 * This will wake up all threads waiting on this particular completion event.
David Howells50fa6102009-04-28 15:01:38 +01005730 *
5731 * It may be assumed that this function implies a write memory barrier before
5732 * changing the task state if and only if any tasks are woken up.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02005733 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02005734void complete_all(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005735{
5736 unsigned long flags;
5737
5738 spin_lock_irqsave(&x->wait.lock, flags);
5739 x->done += UINT_MAX/2;
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05005740 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005741 spin_unlock_irqrestore(&x->wait.lock, flags);
5742}
5743EXPORT_SYMBOL(complete_all);
5744
Andi Kleen8cbbe862007-10-15 17:00:14 +02005745static inline long __sched
5746do_wait_for_common(struct completion *x, long timeout, int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005747{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005748 if (!x->done) {
5749 DECLARE_WAITQUEUE(wait, current);
5750
5751 wait.flags |= WQ_FLAG_EXCLUSIVE;
5752 __add_wait_queue_tail(&x->wait, &wait);
5753 do {
Oleg Nesterov94d3d822008-08-20 16:54:41 -07005754 if (signal_pending_state(state, current)) {
Oleg Nesterovea71a542008-06-20 18:32:20 +04005755 timeout = -ERESTARTSYS;
5756 break;
Andi Kleen8cbbe862007-10-15 17:00:14 +02005757 }
5758 __set_current_state(state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005759 spin_unlock_irq(&x->wait.lock);
Andi Kleen8cbbe862007-10-15 17:00:14 +02005760 timeout = schedule_timeout(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005761 spin_lock_irq(&x->wait.lock);
Oleg Nesterovea71a542008-06-20 18:32:20 +04005762 } while (!x->done && timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005763 __remove_wait_queue(&x->wait, &wait);
Oleg Nesterovea71a542008-06-20 18:32:20 +04005764 if (!x->done)
5765 return timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005766 }
5767 x->done--;
Oleg Nesterovea71a542008-06-20 18:32:20 +04005768 return timeout ?: 1;
Andi Kleen8cbbe862007-10-15 17:00:14 +02005769}
5770
5771static long __sched
5772wait_for_common(struct completion *x, long timeout, int state)
5773{
5774 might_sleep();
5775
5776 spin_lock_irq(&x->wait.lock);
5777 timeout = do_wait_for_common(x, timeout, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005778 spin_unlock_irq(&x->wait.lock);
Andi Kleen8cbbe862007-10-15 17:00:14 +02005779 return timeout;
5780}
5781
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02005782/**
5783 * wait_for_completion: - waits for completion of a task
5784 * @x: holds the state of this particular completion
5785 *
5786 * This waits to be signaled for completion of a specific task. It is NOT
5787 * interruptible and there is no timeout.
5788 *
5789 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
5790 * and interrupt capability. Also see complete().
5791 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02005792void __sched wait_for_completion(struct completion *x)
Andi Kleen8cbbe862007-10-15 17:00:14 +02005793{
5794 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005795}
5796EXPORT_SYMBOL(wait_for_completion);
5797
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02005798/**
5799 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
5800 * @x: holds the state of this particular completion
5801 * @timeout: timeout value in jiffies
5802 *
5803 * This waits for either a completion of a specific task to be signaled or for a
5804 * specified timeout to expire. The timeout is in jiffies. It is not
5805 * interruptible.
5806 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02005807unsigned long __sched
Linus Torvalds1da177e2005-04-16 15:20:36 -07005808wait_for_completion_timeout(struct completion *x, unsigned long timeout)
5809{
Andi Kleen8cbbe862007-10-15 17:00:14 +02005810 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005811}
5812EXPORT_SYMBOL(wait_for_completion_timeout);
5813
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02005814/**
5815 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
5816 * @x: holds the state of this particular completion
5817 *
5818 * This waits for completion of a specific task to be signaled. It is
5819 * interruptible.
5820 */
Andi Kleen8cbbe862007-10-15 17:00:14 +02005821int __sched wait_for_completion_interruptible(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005822{
Andi Kleen51e97992007-10-18 21:32:55 +02005823 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
5824 if (t == -ERESTARTSYS)
5825 return t;
5826 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005827}
5828EXPORT_SYMBOL(wait_for_completion_interruptible);
5829
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02005830/**
5831 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
5832 * @x: holds the state of this particular completion
5833 * @timeout: timeout value in jiffies
5834 *
5835 * This waits for either a completion of a specific task to be signaled or for a
5836 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
5837 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02005838unsigned long __sched
Linus Torvalds1da177e2005-04-16 15:20:36 -07005839wait_for_completion_interruptible_timeout(struct completion *x,
5840 unsigned long timeout)
5841{
Andi Kleen8cbbe862007-10-15 17:00:14 +02005842 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005843}
5844EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
5845
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02005846/**
5847 * wait_for_completion_killable: - waits for completion of a task (killable)
5848 * @x: holds the state of this particular completion
5849 *
5850 * This waits to be signaled for completion of a specific task. It can be
5851 * interrupted by a kill signal.
5852 */
Matthew Wilcox009e5772007-12-06 12:29:54 -05005853int __sched wait_for_completion_killable(struct completion *x)
5854{
5855 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
5856 if (t == -ERESTARTSYS)
5857 return t;
5858 return 0;
5859}
5860EXPORT_SYMBOL(wait_for_completion_killable);
5861
Dave Chinnerbe4de352008-08-15 00:40:44 -07005862/**
5863 * try_wait_for_completion - try to decrement a completion without blocking
5864 * @x: completion structure
5865 *
5866 * Returns: 0 if a decrement cannot be done without blocking
5867 * 1 if a decrement succeeded.
5868 *
5869 * If a completion is being used as a counting completion,
5870 * attempt to decrement the counter without blocking. This
5871 * enables us to avoid waiting if the resource the completion
5872 * is protecting is not available.
5873 */
5874bool try_wait_for_completion(struct completion *x)
5875{
5876 int ret = 1;
5877
5878 spin_lock_irq(&x->wait.lock);
5879 if (!x->done)
5880 ret = 0;
5881 else
5882 x->done--;
5883 spin_unlock_irq(&x->wait.lock);
5884 return ret;
5885}
5886EXPORT_SYMBOL(try_wait_for_completion);
5887
5888/**
5889 * completion_done - Test to see if a completion has any waiters
5890 * @x: completion structure
5891 *
5892 * Returns: 0 if there are waiters (wait_for_completion() in progress)
5893 * 1 if there are no waiters.
5894 *
5895 */
5896bool completion_done(struct completion *x)
5897{
5898 int ret = 1;
5899
5900 spin_lock_irq(&x->wait.lock);
5901 if (!x->done)
5902 ret = 0;
5903 spin_unlock_irq(&x->wait.lock);
5904 return ret;
5905}
5906EXPORT_SYMBOL(completion_done);
5907
Andi Kleen8cbbe862007-10-15 17:00:14 +02005908static long __sched
5909sleep_on_common(wait_queue_head_t *q, int state, long timeout)
Ingo Molnar0fec1712007-07-09 18:52:01 +02005910{
5911 unsigned long flags;
5912 wait_queue_t wait;
5913
5914 init_waitqueue_entry(&wait, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005915
Andi Kleen8cbbe862007-10-15 17:00:14 +02005916 __set_current_state(state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005917
Andi Kleen8cbbe862007-10-15 17:00:14 +02005918 spin_lock_irqsave(&q->lock, flags);
5919 __add_wait_queue(q, &wait);
5920 spin_unlock(&q->lock);
5921 timeout = schedule_timeout(timeout);
5922 spin_lock_irq(&q->lock);
5923 __remove_wait_queue(q, &wait);
5924 spin_unlock_irqrestore(&q->lock, flags);
5925
5926 return timeout;
5927}
5928
5929void __sched interruptible_sleep_on(wait_queue_head_t *q)
5930{
5931 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005932}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005933EXPORT_SYMBOL(interruptible_sleep_on);
5934
Ingo Molnar0fec1712007-07-09 18:52:01 +02005935long __sched
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07005936interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005937{
Andi Kleen8cbbe862007-10-15 17:00:14 +02005938 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005939}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005940EXPORT_SYMBOL(interruptible_sleep_on_timeout);
5941
Ingo Molnar0fec1712007-07-09 18:52:01 +02005942void __sched sleep_on(wait_queue_head_t *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005943{
Andi Kleen8cbbe862007-10-15 17:00:14 +02005944 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005945}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005946EXPORT_SYMBOL(sleep_on);
5947
Ingo Molnar0fec1712007-07-09 18:52:01 +02005948long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005949{
Andi Kleen8cbbe862007-10-15 17:00:14 +02005950 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005951}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005952EXPORT_SYMBOL(sleep_on_timeout);
5953
Ingo Molnarb29739f2006-06-27 02:54:51 -07005954#ifdef CONFIG_RT_MUTEXES
5955
5956/*
5957 * rt_mutex_setprio - set the current priority of a task
5958 * @p: task
5959 * @prio: prio value (kernel-internal form)
5960 *
5961 * This function changes the 'effective' priority of a task. It does
5962 * not touch ->normal_prio like __setscheduler().
5963 *
5964 * Used by the rt_mutex code to implement priority inheritance logic.
5965 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07005966void rt_mutex_setprio(struct task_struct *p, int prio)
Ingo Molnarb29739f2006-06-27 02:54:51 -07005967{
5968 unsigned long flags;
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02005969 int oldprio, on_rq, running;
Ingo Molnar70b97a72006-07-03 00:25:42 -07005970 struct rq *rq;
Steven Rostedtcb469842008-01-25 21:08:22 +01005971 const struct sched_class *prev_class = p->sched_class;
Ingo Molnarb29739f2006-06-27 02:54:51 -07005972
5973 BUG_ON(prio < 0 || prio > MAX_PRIO);
5974
5975 rq = task_rq_lock(p, &flags);
Ingo Molnara8e504d2007-08-09 11:16:47 +02005976 update_rq_clock(rq);
Ingo Molnarb29739f2006-06-27 02:54:51 -07005977
Andrew Mortond5f9f942007-05-08 20:27:06 -07005978 oldprio = p->prio;
Ingo Molnardd41f592007-07-09 18:51:59 +02005979 on_rq = p->se.on_rq;
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01005980 running = task_current(rq, p);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07005981 if (on_rq)
Ingo Molnar69be72c2007-08-09 11:16:49 +02005982 dequeue_task(rq, p, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07005983 if (running)
5984 p->sched_class->put_prev_task(rq, p);
Ingo Molnardd41f592007-07-09 18:51:59 +02005985
5986 if (rt_prio(prio))
5987 p->sched_class = &rt_sched_class;
5988 else
5989 p->sched_class = &fair_sched_class;
5990
Ingo Molnarb29739f2006-06-27 02:54:51 -07005991 p->prio = prio;
5992
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07005993 if (running)
5994 p->sched_class->set_curr_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02005995 if (on_rq) {
Ingo Molnar8159f872007-08-09 11:16:49 +02005996 enqueue_task(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01005997
5998 check_class_changed(rq, p, prev_class, oldprio, running);
Ingo Molnarb29739f2006-06-27 02:54:51 -07005999 }
6000 task_rq_unlock(rq, &flags);
6001}
6002
6003#endif
6004
Ingo Molnar36c8b582006-07-03 00:25:41 -07006005void set_user_nice(struct task_struct *p, long nice)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006006{
Ingo Molnardd41f592007-07-09 18:51:59 +02006007 int old_prio, delta, on_rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006008 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07006009 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006010
6011 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
6012 return;
6013 /*
6014 * We have to be careful, if called from sys_setpriority(),
6015 * the task might be in the middle of scheduling on another CPU.
6016 */
6017 rq = task_rq_lock(p, &flags);
Ingo Molnara8e504d2007-08-09 11:16:47 +02006018 update_rq_clock(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006019 /*
6020 * The RT priorities are set via sched_setscheduler(), but we still
6021 * allow the 'normal' nice value to be set - but as expected
6022 * it wont have any effect on scheduling until the task is
Ingo Molnardd41f592007-07-09 18:51:59 +02006023 * SCHED_FIFO/SCHED_RR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006024 */
Ingo Molnare05606d2007-07-09 18:51:59 +02006025 if (task_has_rt_policy(p)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006026 p->static_prio = NICE_TO_PRIO(nice);
6027 goto out_unlock;
6028 }
Ingo Molnardd41f592007-07-09 18:51:59 +02006029 on_rq = p->se.on_rq;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02006030 if (on_rq)
Ingo Molnar69be72c2007-08-09 11:16:49 +02006031 dequeue_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006032
Linus Torvalds1da177e2005-04-16 15:20:36 -07006033 p->static_prio = NICE_TO_PRIO(nice);
Peter Williams2dd73a42006-06-27 02:54:34 -07006034 set_load_weight(p);
Ingo Molnarb29739f2006-06-27 02:54:51 -07006035 old_prio = p->prio;
6036 p->prio = effective_prio(p);
6037 delta = p->prio - old_prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006038
Ingo Molnardd41f592007-07-09 18:51:59 +02006039 if (on_rq) {
Ingo Molnar8159f872007-08-09 11:16:49 +02006040 enqueue_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006041 /*
Andrew Mortond5f9f942007-05-08 20:27:06 -07006042 * If the task increased its priority or is running and
6043 * lowered its priority, then reschedule its CPU:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006044 */
Andrew Mortond5f9f942007-05-08 20:27:06 -07006045 if (delta < 0 || (delta > 0 && task_running(rq, p)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006046 resched_task(rq->curr);
6047 }
6048out_unlock:
6049 task_rq_unlock(rq, &flags);
6050}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006051EXPORT_SYMBOL(set_user_nice);
6052
Matt Mackalle43379f2005-05-01 08:59:00 -07006053/*
6054 * can_nice - check if a task can reduce its nice value
6055 * @p: task
6056 * @nice: nice value
6057 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07006058int can_nice(const struct task_struct *p, const int nice)
Matt Mackalle43379f2005-05-01 08:59:00 -07006059{
Matt Mackall024f4742005-08-18 11:24:19 -07006060 /* convert nice value [19,-20] to rlimit style value [1,40] */
6061 int nice_rlim = 20 - nice;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006062
Matt Mackalle43379f2005-05-01 08:59:00 -07006063 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
6064 capable(CAP_SYS_NICE));
6065}
6066
Linus Torvalds1da177e2005-04-16 15:20:36 -07006067#ifdef __ARCH_WANT_SYS_NICE
6068
6069/*
6070 * sys_nice - change the priority of the current process.
6071 * @increment: priority increment
6072 *
6073 * sys_setpriority is a more generic, but much slower function that
6074 * does similar things.
6075 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01006076SYSCALL_DEFINE1(nice, int, increment)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006077{
Ingo Molnar48f24c42006-07-03 00:25:40 -07006078 long nice, retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006079
6080 /*
6081 * Setpriority might change our priority at the same moment.
6082 * We don't have to worry. Conceptually one call occurs first
6083 * and we have a single winner.
6084 */
Matt Mackalle43379f2005-05-01 08:59:00 -07006085 if (increment < -40)
6086 increment = -40;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006087 if (increment > 40)
6088 increment = 40;
6089
Américo Wang2b8f8362009-02-16 18:54:21 +08006090 nice = TASK_NICE(current) + increment;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006091 if (nice < -20)
6092 nice = -20;
6093 if (nice > 19)
6094 nice = 19;
6095
Matt Mackalle43379f2005-05-01 08:59:00 -07006096 if (increment < 0 && !can_nice(current, nice))
6097 return -EPERM;
6098
Linus Torvalds1da177e2005-04-16 15:20:36 -07006099 retval = security_task_setnice(current, nice);
6100 if (retval)
6101 return retval;
6102
6103 set_user_nice(current, nice);
6104 return 0;
6105}
6106
6107#endif
6108
6109/**
6110 * task_prio - return the priority value of a given task.
6111 * @p: the task in question.
6112 *
6113 * This is the priority value as seen by users in /proc.
6114 * RT tasks are offset by -200. Normal tasks are centered
6115 * around 0, value goes from -16 to +15.
6116 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07006117int task_prio(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006118{
6119 return p->prio - MAX_RT_PRIO;
6120}
6121
6122/**
6123 * task_nice - return the nice value of a given task.
6124 * @p: the task in question.
6125 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07006126int task_nice(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006127{
6128 return TASK_NICE(p);
6129}
Pavel Roskin150d8be2008-03-05 16:56:37 -05006130EXPORT_SYMBOL(task_nice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006131
6132/**
6133 * idle_cpu - is a given cpu idle currently?
6134 * @cpu: the processor in question.
6135 */
6136int idle_cpu(int cpu)
6137{
6138 return cpu_curr(cpu) == cpu_rq(cpu)->idle;
6139}
6140
Linus Torvalds1da177e2005-04-16 15:20:36 -07006141/**
6142 * idle_task - return the idle task for a given cpu.
6143 * @cpu: the processor in question.
6144 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07006145struct task_struct *idle_task(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006146{
6147 return cpu_rq(cpu)->idle;
6148}
6149
6150/**
6151 * find_process_by_pid - find a process with a matching PID value.
6152 * @pid: the pid in question.
6153 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02006154static struct task_struct *find_process_by_pid(pid_t pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006155{
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07006156 return pid ? find_task_by_vpid(pid) : current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006157}
6158
6159/* Actually do priority change: must hold rq lock. */
Ingo Molnardd41f592007-07-09 18:51:59 +02006160static void
6161__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006162{
Ingo Molnardd41f592007-07-09 18:51:59 +02006163 BUG_ON(p->se.on_rq);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006164
Linus Torvalds1da177e2005-04-16 15:20:36 -07006165 p->policy = policy;
6166 p->rt_priority = prio;
Ingo Molnarb29739f2006-06-27 02:54:51 -07006167 p->normal_prio = normal_prio(p);
6168 /* we are holding p->pi_lock already */
6169 p->prio = rt_mutex_getprio(p);
Peter Zijlstraffd44db2009-11-10 20:12:01 +01006170 if (rt_prio(p->prio))
6171 p->sched_class = &rt_sched_class;
6172 else
6173 p->sched_class = &fair_sched_class;
Peter Williams2dd73a42006-06-27 02:54:34 -07006174 set_load_weight(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006175}
6176
David Howellsc69e8d92008-11-14 10:39:19 +11006177/*
6178 * check the target process has a UID that matches the current process's
6179 */
6180static bool check_same_owner(struct task_struct *p)
6181{
6182 const struct cred *cred = current_cred(), *pcred;
6183 bool match;
6184
6185 rcu_read_lock();
6186 pcred = __task_cred(p);
6187 match = (cred->euid == pcred->euid ||
6188 cred->euid == pcred->uid);
6189 rcu_read_unlock();
6190 return match;
6191}
6192
Rusty Russell961ccdd2008-06-23 13:55:38 +10006193static int __sched_setscheduler(struct task_struct *p, int policy,
6194 struct sched_param *param, bool user)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006195{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02006196 int retval, oldprio, oldpolicy = -1, on_rq, running;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006197 unsigned long flags;
Steven Rostedtcb469842008-01-25 21:08:22 +01006198 const struct sched_class *prev_class = p->sched_class;
Ingo Molnar70b97a72006-07-03 00:25:42 -07006199 struct rq *rq;
Lennart Poetteringca94c442009-06-15 17:17:47 +02006200 int reset_on_fork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006201
Steven Rostedt66e53932006-06-27 02:54:44 -07006202 /* may grab non-irq protected spin_locks */
6203 BUG_ON(in_interrupt());
Linus Torvalds1da177e2005-04-16 15:20:36 -07006204recheck:
6205 /* double check policy once rq lock held */
Lennart Poetteringca94c442009-06-15 17:17:47 +02006206 if (policy < 0) {
6207 reset_on_fork = p->sched_reset_on_fork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006208 policy = oldpolicy = p->policy;
Lennart Poetteringca94c442009-06-15 17:17:47 +02006209 } else {
6210 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
6211 policy &= ~SCHED_RESET_ON_FORK;
6212
6213 if (policy != SCHED_FIFO && policy != SCHED_RR &&
6214 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
6215 policy != SCHED_IDLE)
6216 return -EINVAL;
6217 }
6218
Linus Torvalds1da177e2005-04-16 15:20:36 -07006219 /*
6220 * Valid priorities for SCHED_FIFO and SCHED_RR are
Ingo Molnardd41f592007-07-09 18:51:59 +02006221 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
6222 * SCHED_BATCH and SCHED_IDLE is 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006223 */
6224 if (param->sched_priority < 0 ||
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07006225 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
Steven Rostedtd46523e2005-07-25 16:28:39 -04006226 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006227 return -EINVAL;
Ingo Molnare05606d2007-07-09 18:51:59 +02006228 if (rt_policy(policy) != (param->sched_priority != 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006229 return -EINVAL;
6230
Olivier Croquette37e4ab32005-06-25 14:57:32 -07006231 /*
6232 * Allow unprivileged RT tasks to decrease priority:
6233 */
Rusty Russell961ccdd2008-06-23 13:55:38 +10006234 if (user && !capable(CAP_SYS_NICE)) {
Ingo Molnare05606d2007-07-09 18:51:59 +02006235 if (rt_policy(policy)) {
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07006236 unsigned long rlim_rtprio;
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07006237
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07006238 if (!lock_task_sighand(p, &flags))
6239 return -ESRCH;
6240 rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
6241 unlock_task_sighand(p, &flags);
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07006242
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07006243 /* can't set/change the rt policy */
6244 if (policy != p->policy && !rlim_rtprio)
6245 return -EPERM;
6246
6247 /* can't increase priority */
6248 if (param->sched_priority > p->rt_priority &&
6249 param->sched_priority > rlim_rtprio)
6250 return -EPERM;
6251 }
Ingo Molnardd41f592007-07-09 18:51:59 +02006252 /*
6253 * Like positive nice levels, dont allow tasks to
6254 * move out of SCHED_IDLE either:
6255 */
6256 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE)
6257 return -EPERM;
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07006258
Olivier Croquette37e4ab32005-06-25 14:57:32 -07006259 /* can't change other user's priorities */
David Howellsc69e8d92008-11-14 10:39:19 +11006260 if (!check_same_owner(p))
Olivier Croquette37e4ab32005-06-25 14:57:32 -07006261 return -EPERM;
Lennart Poetteringca94c442009-06-15 17:17:47 +02006262
6263 /* Normal users shall not reset the sched_reset_on_fork flag */
6264 if (p->sched_reset_on_fork && !reset_on_fork)
6265 return -EPERM;
Olivier Croquette37e4ab32005-06-25 14:57:32 -07006266 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006267
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07006268 if (user) {
Peter Zijlstrab68aa232008-02-13 15:45:40 +01006269#ifdef CONFIG_RT_GROUP_SCHED
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07006270 /*
6271 * Do not allow realtime tasks into groups that have no runtime
6272 * assigned.
6273 */
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02006274 if (rt_bandwidth_enabled() && rt_policy(policy) &&
6275 task_group(p)->rt_bandwidth.rt_runtime == 0)
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07006276 return -EPERM;
Peter Zijlstrab68aa232008-02-13 15:45:40 +01006277#endif
6278
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07006279 retval = security_task_setscheduler(p, policy, param);
6280 if (retval)
6281 return retval;
6282 }
6283
Linus Torvalds1da177e2005-04-16 15:20:36 -07006284 /*
Ingo Molnarb29739f2006-06-27 02:54:51 -07006285 * make sure no PI-waiters arrive (or leave) while we are
6286 * changing the priority of the task:
6287 */
6288 spin_lock_irqsave(&p->pi_lock, flags);
6289 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07006290 * To be able to change p->policy safely, the apropriate
6291 * runqueue lock must be held.
6292 */
Ingo Molnarb29739f2006-06-27 02:54:51 -07006293 rq = __task_rq_lock(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006294 /* recheck policy now with rq lock held */
6295 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
6296 policy = oldpolicy = -1;
Ingo Molnarb29739f2006-06-27 02:54:51 -07006297 __task_rq_unlock(rq);
6298 spin_unlock_irqrestore(&p->pi_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006299 goto recheck;
6300 }
Ingo Molnar2daa3572007-08-09 11:16:51 +02006301 update_rq_clock(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02006302 on_rq = p->se.on_rq;
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01006303 running = task_current(rq, p);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07006304 if (on_rq)
Ingo Molnar2e1cb742007-08-09 11:16:49 +02006305 deactivate_task(rq, p, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07006306 if (running)
6307 p->sched_class->put_prev_task(rq, p);
Dmitry Adamushkof6b53202007-10-15 17:00:08 +02006308
Lennart Poetteringca94c442009-06-15 17:17:47 +02006309 p->sched_reset_on_fork = reset_on_fork;
6310
Linus Torvalds1da177e2005-04-16 15:20:36 -07006311 oldprio = p->prio;
Ingo Molnardd41f592007-07-09 18:51:59 +02006312 __setscheduler(rq, p, policy, param->sched_priority);
Dmitry Adamushkof6b53202007-10-15 17:00:08 +02006313
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07006314 if (running)
6315 p->sched_class->set_curr_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02006316 if (on_rq) {
6317 activate_task(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01006318
6319 check_class_changed(rq, p, prev_class, oldprio, running);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006320 }
Ingo Molnarb29739f2006-06-27 02:54:51 -07006321 __task_rq_unlock(rq);
6322 spin_unlock_irqrestore(&p->pi_lock, flags);
6323
Thomas Gleixner95e02ca2006-06-27 02:55:02 -07006324 rt_mutex_adjust_pi(p);
6325
Linus Torvalds1da177e2005-04-16 15:20:36 -07006326 return 0;
6327}
Rusty Russell961ccdd2008-06-23 13:55:38 +10006328
6329/**
6330 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
6331 * @p: the task in question.
6332 * @policy: new policy.
6333 * @param: structure containing the new RT priority.
6334 *
6335 * NOTE that the task may be already dead.
6336 */
6337int sched_setscheduler(struct task_struct *p, int policy,
6338 struct sched_param *param)
6339{
6340 return __sched_setscheduler(p, policy, param, true);
6341}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006342EXPORT_SYMBOL_GPL(sched_setscheduler);
6343
Rusty Russell961ccdd2008-06-23 13:55:38 +10006344/**
6345 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
6346 * @p: the task in question.
6347 * @policy: new policy.
6348 * @param: structure containing the new RT priority.
6349 *
6350 * Just like sched_setscheduler, only don't bother checking if the
6351 * current context has permission. For example, this is needed in
6352 * stop_machine(): we create temporary high priority worker threads,
6353 * but our caller might not have that capability.
6354 */
6355int sched_setscheduler_nocheck(struct task_struct *p, int policy,
6356 struct sched_param *param)
6357{
6358 return __sched_setscheduler(p, policy, param, false);
6359}
6360
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07006361static int
6362do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006363{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006364 struct sched_param lparam;
6365 struct task_struct *p;
Ingo Molnar36c8b582006-07-03 00:25:41 -07006366 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006367
6368 if (!param || pid < 0)
6369 return -EINVAL;
6370 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
6371 return -EFAULT;
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07006372
6373 rcu_read_lock();
6374 retval = -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006375 p = find_process_by_pid(pid);
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07006376 if (p != NULL)
6377 retval = sched_setscheduler(p, policy, &lparam);
6378 rcu_read_unlock();
Ingo Molnar36c8b582006-07-03 00:25:41 -07006379
Linus Torvalds1da177e2005-04-16 15:20:36 -07006380 return retval;
6381}
6382
6383/**
6384 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
6385 * @pid: the pid in question.
6386 * @policy: new policy.
6387 * @param: structure containing the new RT priority.
6388 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01006389SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
6390 struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006391{
Jason Baronc21761f2006-01-18 17:43:03 -08006392 /* negative values for policy are not valid */
6393 if (policy < 0)
6394 return -EINVAL;
6395
Linus Torvalds1da177e2005-04-16 15:20:36 -07006396 return do_sched_setscheduler(pid, policy, param);
6397}
6398
6399/**
6400 * sys_sched_setparam - set/change the RT priority of a thread
6401 * @pid: the pid in question.
6402 * @param: structure containing the new RT priority.
6403 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01006404SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006405{
6406 return do_sched_setscheduler(pid, -1, param);
6407}
6408
6409/**
6410 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
6411 * @pid: the pid in question.
6412 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01006413SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006414{
Ingo Molnar36c8b582006-07-03 00:25:41 -07006415 struct task_struct *p;
Andi Kleen3a5c3592007-10-15 17:00:14 +02006416 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006417
6418 if (pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02006419 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006420
6421 retval = -ESRCH;
6422 read_lock(&tasklist_lock);
6423 p = find_process_by_pid(pid);
6424 if (p) {
6425 retval = security_task_getscheduler(p);
6426 if (!retval)
Lennart Poetteringca94c442009-06-15 17:17:47 +02006427 retval = p->policy
6428 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006429 }
6430 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006431 return retval;
6432}
6433
6434/**
Lennart Poetteringca94c442009-06-15 17:17:47 +02006435 * sys_sched_getparam - get the RT priority of a thread
Linus Torvalds1da177e2005-04-16 15:20:36 -07006436 * @pid: the pid in question.
6437 * @param: structure containing the RT priority.
6438 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01006439SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006440{
6441 struct sched_param lp;
Ingo Molnar36c8b582006-07-03 00:25:41 -07006442 struct task_struct *p;
Andi Kleen3a5c3592007-10-15 17:00:14 +02006443 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006444
6445 if (!param || pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02006446 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006447
6448 read_lock(&tasklist_lock);
6449 p = find_process_by_pid(pid);
6450 retval = -ESRCH;
6451 if (!p)
6452 goto out_unlock;
6453
6454 retval = security_task_getscheduler(p);
6455 if (retval)
6456 goto out_unlock;
6457
6458 lp.sched_priority = p->rt_priority;
6459 read_unlock(&tasklist_lock);
6460
6461 /*
6462 * This one might sleep, we cannot do it with a spinlock held ...
6463 */
6464 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
6465
Linus Torvalds1da177e2005-04-16 15:20:36 -07006466 return retval;
6467
6468out_unlock:
6469 read_unlock(&tasklist_lock);
6470 return retval;
6471}
6472
Rusty Russell96f874e2008-11-25 02:35:14 +10306473long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006474{
Rusty Russell5a16f3d2008-11-25 02:35:11 +10306475 cpumask_var_t cpus_allowed, new_mask;
Ingo Molnar36c8b582006-07-03 00:25:41 -07006476 struct task_struct *p;
6477 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006478
Gautham R Shenoy95402b32008-01-25 21:08:02 +01006479 get_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006480 read_lock(&tasklist_lock);
6481
6482 p = find_process_by_pid(pid);
6483 if (!p) {
6484 read_unlock(&tasklist_lock);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01006485 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006486 return -ESRCH;
6487 }
6488
6489 /*
6490 * It is not safe to call set_cpus_allowed with the
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006491 * tasklist_lock held. We will bump the task_struct's
Linus Torvalds1da177e2005-04-16 15:20:36 -07006492 * usage count and then drop tasklist_lock.
6493 */
6494 get_task_struct(p);
6495 read_unlock(&tasklist_lock);
6496
Rusty Russell5a16f3d2008-11-25 02:35:11 +10306497 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
6498 retval = -ENOMEM;
6499 goto out_put_task;
6500 }
6501 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
6502 retval = -ENOMEM;
6503 goto out_free_cpus_allowed;
6504 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006505 retval = -EPERM;
David Howellsc69e8d92008-11-14 10:39:19 +11006506 if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006507 goto out_unlock;
6508
David Quigleye7834f82006-06-23 02:03:59 -07006509 retval = security_task_setscheduler(p, 0, NULL);
6510 if (retval)
6511 goto out_unlock;
6512
Rusty Russell5a16f3d2008-11-25 02:35:11 +10306513 cpuset_cpus_allowed(p, cpus_allowed);
6514 cpumask_and(new_mask, in_mask, cpus_allowed);
Paul Menage8707d8b2007-10-18 23:40:22 -07006515 again:
Rusty Russell5a16f3d2008-11-25 02:35:11 +10306516 retval = set_cpus_allowed_ptr(p, new_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006517
Paul Menage8707d8b2007-10-18 23:40:22 -07006518 if (!retval) {
Rusty Russell5a16f3d2008-11-25 02:35:11 +10306519 cpuset_cpus_allowed(p, cpus_allowed);
6520 if (!cpumask_subset(new_mask, cpus_allowed)) {
Paul Menage8707d8b2007-10-18 23:40:22 -07006521 /*
6522 * We must have raced with a concurrent cpuset
6523 * update. Just reset the cpus_allowed to the
6524 * cpuset's cpus_allowed
6525 */
Rusty Russell5a16f3d2008-11-25 02:35:11 +10306526 cpumask_copy(new_mask, cpus_allowed);
Paul Menage8707d8b2007-10-18 23:40:22 -07006527 goto again;
6528 }
6529 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006530out_unlock:
Rusty Russell5a16f3d2008-11-25 02:35:11 +10306531 free_cpumask_var(new_mask);
6532out_free_cpus_allowed:
6533 free_cpumask_var(cpus_allowed);
6534out_put_task:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006535 put_task_struct(p);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01006536 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006537 return retval;
6538}
6539
6540static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
Rusty Russell96f874e2008-11-25 02:35:14 +10306541 struct cpumask *new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006542{
Rusty Russell96f874e2008-11-25 02:35:14 +10306543 if (len < cpumask_size())
6544 cpumask_clear(new_mask);
6545 else if (len > cpumask_size())
6546 len = cpumask_size();
6547
Linus Torvalds1da177e2005-04-16 15:20:36 -07006548 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
6549}
6550
6551/**
6552 * sys_sched_setaffinity - set the cpu affinity of a process
6553 * @pid: pid of the process
6554 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
6555 * @user_mask_ptr: user-space pointer to the new cpu mask
6556 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01006557SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
6558 unsigned long __user *, user_mask_ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006559{
Rusty Russell5a16f3d2008-11-25 02:35:11 +10306560 cpumask_var_t new_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006561 int retval;
6562
Rusty Russell5a16f3d2008-11-25 02:35:11 +10306563 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
6564 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006565
Rusty Russell5a16f3d2008-11-25 02:35:11 +10306566 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
6567 if (retval == 0)
6568 retval = sched_setaffinity(pid, new_mask);
6569 free_cpumask_var(new_mask);
6570 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006571}
6572
Rusty Russell96f874e2008-11-25 02:35:14 +10306573long sched_getaffinity(pid_t pid, struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006574{
Ingo Molnar36c8b582006-07-03 00:25:41 -07006575 struct task_struct *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006576 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006577
Gautham R Shenoy95402b32008-01-25 21:08:02 +01006578 get_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006579 read_lock(&tasklist_lock);
6580
6581 retval = -ESRCH;
6582 p = find_process_by_pid(pid);
6583 if (!p)
6584 goto out_unlock;
6585
David Quigleye7834f82006-06-23 02:03:59 -07006586 retval = security_task_getscheduler(p);
6587 if (retval)
6588 goto out_unlock;
6589
Rusty Russell96f874e2008-11-25 02:35:14 +10306590 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006591
6592out_unlock:
6593 read_unlock(&tasklist_lock);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01006594 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006595
Ulrich Drepper9531b622007-08-09 11:16:46 +02006596 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006597}
6598
6599/**
6600 * sys_sched_getaffinity - get the cpu affinity of a process
6601 * @pid: pid of the process
6602 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
6603 * @user_mask_ptr: user-space pointer to hold the current cpu mask
6604 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01006605SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
6606 unsigned long __user *, user_mask_ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006607{
6608 int ret;
Rusty Russellf17c8602008-11-25 02:35:11 +10306609 cpumask_var_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006610
Rusty Russellf17c8602008-11-25 02:35:11 +10306611 if (len < cpumask_size())
Linus Torvalds1da177e2005-04-16 15:20:36 -07006612 return -EINVAL;
6613
Rusty Russellf17c8602008-11-25 02:35:11 +10306614 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
6615 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006616
Rusty Russellf17c8602008-11-25 02:35:11 +10306617 ret = sched_getaffinity(pid, mask);
6618 if (ret == 0) {
6619 if (copy_to_user(user_mask_ptr, mask, cpumask_size()))
6620 ret = -EFAULT;
6621 else
6622 ret = cpumask_size();
6623 }
6624 free_cpumask_var(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006625
Rusty Russellf17c8602008-11-25 02:35:11 +10306626 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006627}
6628
6629/**
6630 * sys_sched_yield - yield the current processor to other threads.
6631 *
Ingo Molnardd41f592007-07-09 18:51:59 +02006632 * This function yields the current CPU to other tasks. If there are no
6633 * other threads running on this CPU then this function will return.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006634 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01006635SYSCALL_DEFINE0(sched_yield)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006636{
Ingo Molnar70b97a72006-07-03 00:25:42 -07006637 struct rq *rq = this_rq_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006638
Ingo Molnar2d723762007-10-15 17:00:12 +02006639 schedstat_inc(rq, yld_count);
Dmitry Adamushko4530d7a2007-10-15 17:00:08 +02006640 current->sched_class->yield_task(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006641
6642 /*
6643 * Since we are going to call schedule() anyway, there's
6644 * no need to preempt or enable interrupts:
6645 */
6646 __release(rq->lock);
Ingo Molnar8a25d5d2006-07-03 00:24:54 -07006647 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006648 _raw_spin_unlock(&rq->lock);
6649 preempt_enable_no_resched();
6650
6651 schedule();
6652
6653 return 0;
6654}
6655
Peter Zijlstrad86ee482009-07-10 14:57:57 +02006656static inline int should_resched(void)
6657{
6658 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
6659}
6660
Andrew Mortone7b38402006-06-30 01:56:00 -07006661static void __cond_resched(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006662{
Frederic Weisbeckere7aaaa62009-07-16 15:44:29 +02006663 add_preempt_count(PREEMPT_ACTIVE);
6664 schedule();
6665 sub_preempt_count(PREEMPT_ACTIVE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006666}
6667
Herbert Xu02b67cc2008-01-25 21:08:28 +01006668int __sched _cond_resched(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006669{
Peter Zijlstrad86ee482009-07-10 14:57:57 +02006670 if (should_resched()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006671 __cond_resched();
6672 return 1;
6673 }
6674 return 0;
6675}
Herbert Xu02b67cc2008-01-25 21:08:28 +01006676EXPORT_SYMBOL(_cond_resched);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006677
6678/*
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02006679 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006680 * call schedule, and on return reacquire the lock.
6681 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006682 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
Linus Torvalds1da177e2005-04-16 15:20:36 -07006683 * operations here to prevent schedule() from being called twice (once via
6684 * spin_unlock(), once by hand).
6685 */
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02006686int __cond_resched_lock(spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006687{
Peter Zijlstrad86ee482009-07-10 14:57:57 +02006688 int resched = should_resched();
Jan Kara6df3cec2005-06-13 15:52:32 -07006689 int ret = 0;
6690
Peter Zijlstraf607c662009-07-20 19:16:29 +02006691 lockdep_assert_held(lock);
6692
Nick Piggin95c354f2008-01-30 13:31:20 +01006693 if (spin_needbreak(lock) || resched) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006694 spin_unlock(lock);
Peter Zijlstrad86ee482009-07-10 14:57:57 +02006695 if (resched)
Nick Piggin95c354f2008-01-30 13:31:20 +01006696 __cond_resched();
6697 else
6698 cpu_relax();
Jan Kara6df3cec2005-06-13 15:52:32 -07006699 ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006700 spin_lock(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006701 }
Jan Kara6df3cec2005-06-13 15:52:32 -07006702 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006703}
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02006704EXPORT_SYMBOL(__cond_resched_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006705
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02006706int __sched __cond_resched_softirq(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006707{
6708 BUG_ON(!in_softirq());
6709
Peter Zijlstrad86ee482009-07-10 14:57:57 +02006710 if (should_resched()) {
Thomas Gleixner98d82562007-05-23 13:58:18 -07006711 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006712 __cond_resched();
6713 local_bh_disable();
6714 return 1;
6715 }
6716 return 0;
6717}
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02006718EXPORT_SYMBOL(__cond_resched_softirq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006719
Linus Torvalds1da177e2005-04-16 15:20:36 -07006720/**
6721 * yield - yield the current processor to other threads.
6722 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08006723 * This is a shortcut for kernel-space yielding - it marks the
Linus Torvalds1da177e2005-04-16 15:20:36 -07006724 * thread runnable and calls sys_sched_yield().
6725 */
6726void __sched yield(void)
6727{
6728 set_current_state(TASK_RUNNING);
6729 sys_sched_yield();
6730}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006731EXPORT_SYMBOL(yield);
6732
6733/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006734 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
Linus Torvalds1da177e2005-04-16 15:20:36 -07006735 * that process accounting knows that this is a task in IO wait state.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006736 */
6737void __sched io_schedule(void)
6738{
Hitoshi Mitake54d35f22009-06-29 14:44:57 +09006739 struct rq *rq = raw_rq();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006740
Shailabh Nagar0ff92242006-07-14 00:24:37 -07006741 delayacct_blkio_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006742 atomic_inc(&rq->nr_iowait);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07006743 current->in_iowait = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006744 schedule();
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07006745 current->in_iowait = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006746 atomic_dec(&rq->nr_iowait);
Shailabh Nagar0ff92242006-07-14 00:24:37 -07006747 delayacct_blkio_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006748}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006749EXPORT_SYMBOL(io_schedule);
6750
6751long __sched io_schedule_timeout(long timeout)
6752{
Hitoshi Mitake54d35f22009-06-29 14:44:57 +09006753 struct rq *rq = raw_rq();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006754 long ret;
6755
Shailabh Nagar0ff92242006-07-14 00:24:37 -07006756 delayacct_blkio_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006757 atomic_inc(&rq->nr_iowait);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07006758 current->in_iowait = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006759 ret = schedule_timeout(timeout);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07006760 current->in_iowait = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006761 atomic_dec(&rq->nr_iowait);
Shailabh Nagar0ff92242006-07-14 00:24:37 -07006762 delayacct_blkio_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006763 return ret;
6764}
6765
6766/**
6767 * sys_sched_get_priority_max - return maximum RT priority.
6768 * @policy: scheduling class.
6769 *
6770 * this syscall returns the maximum rt_priority that can be used
6771 * by a given scheduling class.
6772 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01006773SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006774{
6775 int ret = -EINVAL;
6776
6777 switch (policy) {
6778 case SCHED_FIFO:
6779 case SCHED_RR:
6780 ret = MAX_USER_RT_PRIO-1;
6781 break;
6782 case SCHED_NORMAL:
Ingo Molnarb0a94992006-01-14 13:20:41 -08006783 case SCHED_BATCH:
Ingo Molnardd41f592007-07-09 18:51:59 +02006784 case SCHED_IDLE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006785 ret = 0;
6786 break;
6787 }
6788 return ret;
6789}
6790
6791/**
6792 * sys_sched_get_priority_min - return minimum RT priority.
6793 * @policy: scheduling class.
6794 *
6795 * this syscall returns the minimum rt_priority that can be used
6796 * by a given scheduling class.
6797 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01006798SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006799{
6800 int ret = -EINVAL;
6801
6802 switch (policy) {
6803 case SCHED_FIFO:
6804 case SCHED_RR:
6805 ret = 1;
6806 break;
6807 case SCHED_NORMAL:
Ingo Molnarb0a94992006-01-14 13:20:41 -08006808 case SCHED_BATCH:
Ingo Molnardd41f592007-07-09 18:51:59 +02006809 case SCHED_IDLE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006810 ret = 0;
6811 }
6812 return ret;
6813}
6814
6815/**
6816 * sys_sched_rr_get_interval - return the default timeslice of a process.
6817 * @pid: pid of the process.
6818 * @interval: userspace pointer to the timeslice value.
6819 *
6820 * this syscall writes the default timeslice value of a given process
6821 * into the user-space timespec buffer. A value of '0' means infinity.
6822 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01006823SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
Heiko Carstens754fe8d2009-01-14 14:14:09 +01006824 struct timespec __user *, interval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006825{
Ingo Molnar36c8b582006-07-03 00:25:41 -07006826 struct task_struct *p;
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02006827 unsigned int time_slice;
Andi Kleen3a5c3592007-10-15 17:00:14 +02006828 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006829 struct timespec t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006830
6831 if (pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02006832 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006833
6834 retval = -ESRCH;
6835 read_lock(&tasklist_lock);
6836 p = find_process_by_pid(pid);
6837 if (!p)
6838 goto out_unlock;
6839
6840 retval = security_task_getscheduler(p);
6841 if (retval)
6842 goto out_unlock;
6843
Peter Williams0d721ce2009-09-21 01:31:53 +00006844 time_slice = p->sched_class->get_rr_interval(p);
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02006845
Linus Torvalds1da177e2005-04-16 15:20:36 -07006846 read_unlock(&tasklist_lock);
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02006847 jiffies_to_timespec(time_slice, &t);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006848 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006849 return retval;
Andi Kleen3a5c3592007-10-15 17:00:14 +02006850
Linus Torvalds1da177e2005-04-16 15:20:36 -07006851out_unlock:
6852 read_unlock(&tasklist_lock);
6853 return retval;
6854}
6855
Steven Rostedt7c731e02008-05-12 21:20:41 +02006856static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
Ingo Molnar36c8b582006-07-03 00:25:41 -07006857
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01006858void sched_show_task(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006859{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006860 unsigned long free = 0;
Ingo Molnar36c8b582006-07-03 00:25:41 -07006861 unsigned state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006862
Linus Torvalds1da177e2005-04-16 15:20:36 -07006863 state = p->state ? __ffs(p->state) + 1 : 0;
Ingo Molnarcc4ea792007-10-18 21:32:56 +02006864 printk(KERN_INFO "%-13.13s %c", p->comm,
Andreas Mohr2ed6e342006-07-10 04:43:52 -07006865 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
Ingo Molnar4bd77322007-07-11 21:21:47 +02006866#if BITS_PER_LONG == 32
Linus Torvalds1da177e2005-04-16 15:20:36 -07006867 if (state == TASK_RUNNING)
Ingo Molnarcc4ea792007-10-18 21:32:56 +02006868 printk(KERN_CONT " running ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006869 else
Ingo Molnarcc4ea792007-10-18 21:32:56 +02006870 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006871#else
6872 if (state == TASK_RUNNING)
Ingo Molnarcc4ea792007-10-18 21:32:56 +02006873 printk(KERN_CONT " running task ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006874 else
Ingo Molnarcc4ea792007-10-18 21:32:56 +02006875 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006876#endif
6877#ifdef CONFIG_DEBUG_STACK_USAGE
Eric Sandeen7c9f8862008-04-22 16:38:23 -05006878 free = stack_not_used(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006879#endif
David Rientjesaa47b7e2009-05-04 01:38:05 -07006880 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
6881 task_pid_nr(p), task_pid_nr(p->real_parent),
6882 (unsigned long)task_thread_info(p)->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006883
Nick Piggin5fb5e6d2008-01-25 21:08:34 +01006884 show_stack(p, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006885}
6886
Ingo Molnare59e2ae2006-12-06 20:35:59 -08006887void show_state_filter(unsigned long state_filter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006888{
Ingo Molnar36c8b582006-07-03 00:25:41 -07006889 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006890
Ingo Molnar4bd77322007-07-11 21:21:47 +02006891#if BITS_PER_LONG == 32
6892 printk(KERN_INFO
6893 " task PC stack pid father\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006894#else
Ingo Molnar4bd77322007-07-11 21:21:47 +02006895 printk(KERN_INFO
6896 " task PC stack pid father\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006897#endif
6898 read_lock(&tasklist_lock);
6899 do_each_thread(g, p) {
6900 /*
6901 * reset the NMI-timeout, listing all files on a slow
6902 * console might take alot of time:
6903 */
6904 touch_nmi_watchdog();
Ingo Molnar39bc89f2007-04-25 20:50:03 -07006905 if (!state_filter || (p->state & state_filter))
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01006906 sched_show_task(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006907 } while_each_thread(g, p);
6908
Jeremy Fitzhardinge04c91672007-05-08 00:28:05 -07006909 touch_all_softlockup_watchdogs();
6910
Ingo Molnardd41f592007-07-09 18:51:59 +02006911#ifdef CONFIG_SCHED_DEBUG
6912 sysrq_sched_debug_show();
6913#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07006914 read_unlock(&tasklist_lock);
Ingo Molnare59e2ae2006-12-06 20:35:59 -08006915 /*
6916 * Only show locks if all tasks are dumped:
6917 */
6918 if (state_filter == -1)
6919 debug_show_all_locks();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006920}
6921
Ingo Molnar1df21052007-07-09 18:51:58 +02006922void __cpuinit init_idle_bootup_task(struct task_struct *idle)
6923{
Ingo Molnardd41f592007-07-09 18:51:59 +02006924 idle->sched_class = &idle_sched_class;
Ingo Molnar1df21052007-07-09 18:51:58 +02006925}
6926
Ingo Molnarf340c0d2005-06-28 16:40:42 +02006927/**
6928 * init_idle - set up an idle thread for a given CPU
6929 * @idle: task in question
6930 * @cpu: cpu the idle task belongs to
6931 *
6932 * NOTE: this function does not set the idle thread's NEED_RESCHED
6933 * flag, to make booting more robust.
6934 */
Nick Piggin5c1e1762006-10-03 01:14:04 -07006935void __cpuinit init_idle(struct task_struct *idle, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006936{
Ingo Molnar70b97a72006-07-03 00:25:42 -07006937 struct rq *rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006938 unsigned long flags;
6939
Ingo Molnar5cbd54e2008-11-12 20:05:50 +01006940 spin_lock_irqsave(&rq->lock, flags);
6941
Ingo Molnardd41f592007-07-09 18:51:59 +02006942 __sched_fork(idle);
6943 idle->se.exec_start = sched_clock();
6944
Ingo Molnarb29739f2006-06-27 02:54:51 -07006945 idle->prio = idle->normal_prio = MAX_PRIO;
Rusty Russell96f874e2008-11-25 02:35:14 +10306946 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
Ingo Molnardd41f592007-07-09 18:51:59 +02006947 __set_task_cpu(idle, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006948
Linus Torvalds1da177e2005-04-16 15:20:36 -07006949 rq->curr = rq->idle = idle;
Nick Piggin4866cde2005-06-25 14:57:23 -07006950#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
6951 idle->oncpu = 1;
6952#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07006953 spin_unlock_irqrestore(&rq->lock, flags);
6954
6955 /* Set the preempt count _outside_ the spinlocks! */
Linus Torvalds8e3e0762008-05-10 20:58:02 -07006956#if defined(CONFIG_PREEMPT)
6957 task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
6958#else
Al Viroa1261f52005-11-13 16:06:55 -08006959 task_thread_info(idle)->preempt_count = 0;
Linus Torvalds8e3e0762008-05-10 20:58:02 -07006960#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02006961 /*
6962 * The idle tasks have their own, simple scheduling class:
6963 */
6964 idle->sched_class = &idle_sched_class;
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01006965 ftrace_graph_init_task(idle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006966}
6967
6968/*
6969 * In a system that switches off the HZ timer nohz_cpu_mask
6970 * indicates which cpus entered this state. This is used
6971 * in the rcu update to wait only for active cpus. For system
6972 * which do not switch off the HZ timer nohz_cpu_mask should
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10306973 * always be CPU_BITS_NONE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006974 */
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10306975cpumask_var_t nohz_cpu_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006976
Ingo Molnar19978ca2007-11-09 22:39:38 +01006977/*
6978 * Increase the granularity value when there are more CPUs,
6979 * because with more CPUs the 'effective latency' as visible
6980 * to users decreases. But the relationship is not linear,
6981 * so pick a second-best guess by going with the log2 of the
6982 * number of CPUs.
6983 *
6984 * This idea comes from the SD scheduler of Con Kolivas:
6985 */
6986static inline void sched_init_granularity(void)
6987{
6988 unsigned int factor = 1 + ilog2(num_online_cpus());
6989 const unsigned long limit = 200000000;
6990
6991 sysctl_sched_min_granularity *= factor;
6992 if (sysctl_sched_min_granularity > limit)
6993 sysctl_sched_min_granularity = limit;
6994
6995 sysctl_sched_latency *= factor;
6996 if (sysctl_sched_latency > limit)
6997 sysctl_sched_latency = limit;
6998
6999 sysctl_sched_wakeup_granularity *= factor;
Peter Zijlstra55cd5342008-08-04 08:54:26 +02007000
7001 sysctl_sched_shares_ratelimit *= factor;
Ingo Molnar19978ca2007-11-09 22:39:38 +01007002}
7003
Linus Torvalds1da177e2005-04-16 15:20:36 -07007004#ifdef CONFIG_SMP
7005/*
7006 * This is how migration works:
7007 *
Ingo Molnar70b97a72006-07-03 00:25:42 -07007008 * 1) we queue a struct migration_req structure in the source CPU's
Linus Torvalds1da177e2005-04-16 15:20:36 -07007009 * runqueue and wake up that CPU's migration thread.
7010 * 2) we down() the locked semaphore => thread blocks.
7011 * 3) migration thread wakes up (implicitly it forces the migrated
7012 * thread off the CPU)
7013 * 4) it gets the migration request and checks whether the migrated
7014 * task is still in the wrong runqueue.
7015 * 5) if it's in the wrong runqueue then the migration thread removes
7016 * it and puts it into the right queue.
7017 * 6) migration thread up()s the semaphore.
7018 * 7) we wake up and the migration is done.
7019 */
7020
7021/*
7022 * Change a given task's CPU affinity. Migrate the thread to a
7023 * proper CPU and schedule it away if the CPU it's executing on
7024 * is removed from the allowed bitmask.
7025 *
7026 * NOTE: the caller must have a valid reference to the task, the
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007027 * task must not exit() & deallocate itself prematurely. The
Linus Torvalds1da177e2005-04-16 15:20:36 -07007028 * call is not atomic; no spinlocks may be held.
7029 */
Rusty Russell96f874e2008-11-25 02:35:14 +10307030int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007031{
Ingo Molnar70b97a72006-07-03 00:25:42 -07007032 struct migration_req req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007033 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07007034 struct rq *rq;
Ingo Molnar48f24c42006-07-03 00:25:40 -07007035 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007036
7037 rq = task_rq_lock(p, &flags);
Rusty Russell96f874e2008-11-25 02:35:14 +10307038 if (!cpumask_intersects(new_mask, cpu_online_mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007039 ret = -EINVAL;
7040 goto out;
7041 }
7042
David Rientjes9985b0b2008-06-05 12:57:11 -07007043 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
Rusty Russell96f874e2008-11-25 02:35:14 +10307044 !cpumask_equal(&p->cpus_allowed, new_mask))) {
David Rientjes9985b0b2008-06-05 12:57:11 -07007045 ret = -EINVAL;
7046 goto out;
7047 }
7048
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01007049 if (p->sched_class->set_cpus_allowed)
Mike Traviscd8ba7c2008-03-26 14:23:49 -07007050 p->sched_class->set_cpus_allowed(p, new_mask);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01007051 else {
Rusty Russell96f874e2008-11-25 02:35:14 +10307052 cpumask_copy(&p->cpus_allowed, new_mask);
7053 p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01007054 }
7055
Linus Torvalds1da177e2005-04-16 15:20:36 -07007056 /* Can the task run on the task's current CPU? If so, we're done */
Rusty Russell96f874e2008-11-25 02:35:14 +10307057 if (cpumask_test_cpu(task_cpu(p), new_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007058 goto out;
7059
Rusty Russell1e5ce4f2008-11-25 02:35:03 +10307060 if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007061 /* Need help from migration thread: drop lock and wait. */
Peter Zijlstra693525e2009-07-21 13:56:38 +02007062 struct task_struct *mt = rq->migration_thread;
7063
7064 get_task_struct(mt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007065 task_rq_unlock(rq, &flags);
7066 wake_up_process(rq->migration_thread);
Peter Zijlstra693525e2009-07-21 13:56:38 +02007067 put_task_struct(mt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007068 wait_for_completion(&req.done);
7069 tlb_migrate_finish(p->mm);
7070 return 0;
7071 }
7072out:
7073 task_rq_unlock(rq, &flags);
Ingo Molnar48f24c42006-07-03 00:25:40 -07007074
Linus Torvalds1da177e2005-04-16 15:20:36 -07007075 return ret;
7076}
Mike Traviscd8ba7c2008-03-26 14:23:49 -07007077EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007078
7079/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007080 * Move (not current) task off this cpu, onto dest cpu. We're doing
Linus Torvalds1da177e2005-04-16 15:20:36 -07007081 * this because either it can't run here any more (set_cpus_allowed()
7082 * away from this CPU, or CPU going down), or because we're
7083 * attempting to rebalance this task on exec (sched_exec).
7084 *
7085 * So we race with normal scheduler movements, but that's OK, as long
7086 * as the task is no longer on this CPU.
Kirill Korotaevefc30812006-06-27 02:54:32 -07007087 *
7088 * Returns non-zero if task was successfully migrated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007089 */
Kirill Korotaevefc30812006-06-27 02:54:32 -07007090static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007091{
Ingo Molnar70b97a72006-07-03 00:25:42 -07007092 struct rq *rq_dest, *rq_src;
Ingo Molnardd41f592007-07-09 18:51:59 +02007093 int ret = 0, on_rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007094
Max Krasnyanskye761b772008-07-15 04:43:49 -07007095 if (unlikely(!cpu_active(dest_cpu)))
Kirill Korotaevefc30812006-06-27 02:54:32 -07007096 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007097
7098 rq_src = cpu_rq(src_cpu);
7099 rq_dest = cpu_rq(dest_cpu);
7100
7101 double_rq_lock(rq_src, rq_dest);
7102 /* Already moved. */
7103 if (task_cpu(p) != src_cpu)
Linus Torvaldsb1e38732008-07-10 11:25:03 -07007104 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007105 /* Affinity changed (again). */
Rusty Russell96f874e2008-11-25 02:35:14 +10307106 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
Linus Torvaldsb1e38732008-07-10 11:25:03 -07007107 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007108
Ingo Molnardd41f592007-07-09 18:51:59 +02007109 on_rq = p->se.on_rq;
Ingo Molnar6e82a3b2007-08-09 11:16:51 +02007110 if (on_rq)
Ingo Molnar2e1cb742007-08-09 11:16:49 +02007111 deactivate_task(rq_src, p, 0);
Ingo Molnar6e82a3b2007-08-09 11:16:51 +02007112
Linus Torvalds1da177e2005-04-16 15:20:36 -07007113 set_task_cpu(p, dest_cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02007114 if (on_rq) {
7115 activate_task(rq_dest, p, 0);
Peter Zijlstra15afe092008-09-20 23:38:02 +02007116 check_preempt_curr(rq_dest, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007117 }
Linus Torvaldsb1e38732008-07-10 11:25:03 -07007118done:
Kirill Korotaevefc30812006-06-27 02:54:32 -07007119 ret = 1;
Linus Torvaldsb1e38732008-07-10 11:25:03 -07007120fail:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007121 double_rq_unlock(rq_src, rq_dest);
Kirill Korotaevefc30812006-06-27 02:54:32 -07007122 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007123}
7124
Paul E. McKenney03b042b2009-06-25 09:08:16 -07007125#define RCU_MIGRATION_IDLE 0
7126#define RCU_MIGRATION_NEED_QS 1
7127#define RCU_MIGRATION_GOT_QS 2
7128#define RCU_MIGRATION_MUST_SYNC 3
7129
Linus Torvalds1da177e2005-04-16 15:20:36 -07007130/*
7131 * migration_thread - this is a highprio system thread that performs
7132 * thread migration by bumping thread off CPU then 'pushing' onto
7133 * another runqueue.
7134 */
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07007135static int migration_thread(void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007136{
Paul E. McKenney03b042b2009-06-25 09:08:16 -07007137 int badcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007138 int cpu = (long)data;
Ingo Molnar70b97a72006-07-03 00:25:42 -07007139 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007140
7141 rq = cpu_rq(cpu);
7142 BUG_ON(rq->migration_thread != current);
7143
7144 set_current_state(TASK_INTERRUPTIBLE);
7145 while (!kthread_should_stop()) {
Ingo Molnar70b97a72006-07-03 00:25:42 -07007146 struct migration_req *req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007147 struct list_head *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007148
Linus Torvalds1da177e2005-04-16 15:20:36 -07007149 spin_lock_irq(&rq->lock);
7150
7151 if (cpu_is_offline(cpu)) {
7152 spin_unlock_irq(&rq->lock);
Oleg Nesterov371cbb32009-06-17 16:27:45 -07007153 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007154 }
7155
7156 if (rq->active_balance) {
7157 active_load_balance(rq, cpu);
7158 rq->active_balance = 0;
7159 }
7160
7161 head = &rq->migration_queue;
7162
7163 if (list_empty(head)) {
7164 spin_unlock_irq(&rq->lock);
7165 schedule();
7166 set_current_state(TASK_INTERRUPTIBLE);
7167 continue;
7168 }
Ingo Molnar70b97a72006-07-03 00:25:42 -07007169 req = list_entry(head->next, struct migration_req, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007170 list_del_init(head->next);
7171
Paul E. McKenney03b042b2009-06-25 09:08:16 -07007172 if (req->task != NULL) {
7173 spin_unlock(&rq->lock);
7174 __migrate_task(req->task, cpu, req->dest_cpu);
7175 } else if (likely(cpu == (badcpu = smp_processor_id()))) {
7176 req->dest_cpu = RCU_MIGRATION_GOT_QS;
7177 spin_unlock(&rq->lock);
7178 } else {
7179 req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
7180 spin_unlock(&rq->lock);
7181 WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
7182 }
Nick Piggin674311d2005-06-25 14:57:27 -07007183 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007184
7185 complete(&req->done);
7186 }
7187 __set_current_state(TASK_RUNNING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007188
Linus Torvalds1da177e2005-04-16 15:20:36 -07007189 return 0;
7190}
7191
7192#ifdef CONFIG_HOTPLUG_CPU
Oleg Nesterovf7b4cdd2007-10-16 23:30:56 -07007193
7194static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
7195{
7196 int ret;
7197
7198 local_irq_disable();
7199 ret = __migrate_task(p, src_cpu, dest_cpu);
7200 local_irq_enable();
7201 return ret;
7202}
7203
Kirill Korotaev054b9102006-12-10 02:20:11 -08007204/*
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +02007205 * Figure out where task on dead CPU should go, use force if necessary.
Kirill Korotaev054b9102006-12-10 02:20:11 -08007206 */
Ingo Molnar48f24c42006-07-03 00:25:40 -07007207static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007208{
Ingo Molnar70b97a72006-07-03 00:25:42 -07007209 int dest_cpu;
Mike Travis6ca09df2008-12-31 18:08:45 -08007210 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007211
Rusty Russelle76bd8d2008-11-25 02:35:11 +10307212again:
7213 /* Look for allowed, online CPU in same node. */
7214 for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
7215 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
7216 goto move;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007217
Rusty Russelle76bd8d2008-11-25 02:35:11 +10307218 /* Any allowed, online CPU? */
7219 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
7220 if (dest_cpu < nr_cpu_ids)
7221 goto move;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007222
Rusty Russelle76bd8d2008-11-25 02:35:11 +10307223 /* No more Mr. Nice Guy. */
7224 if (dest_cpu >= nr_cpu_ids) {
Rusty Russelle76bd8d2008-11-25 02:35:11 +10307225 cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
7226 dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
Mike Travisf9a86fc2008-04-04 18:11:07 -07007227
Rusty Russelle76bd8d2008-11-25 02:35:11 +10307228 /*
7229 * Don't tell them about moving exiting tasks or
7230 * kernel threads (both mm NULL), since they never
7231 * leave kernel.
7232 */
7233 if (p->mm && printk_ratelimit()) {
7234 printk(KERN_INFO "process %d (%s) no "
7235 "longer affine to cpu%d\n",
7236 task_pid_nr(p), p->comm, dead_cpu);
Andi Kleen3a5c3592007-10-15 17:00:14 +02007237 }
Rusty Russelle76bd8d2008-11-25 02:35:11 +10307238 }
7239
7240move:
7241 /* It can have affinity changed while we were choosing. */
7242 if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
7243 goto again;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007244}
7245
7246/*
7247 * While a dead CPU has no uninterruptible tasks queued at this point,
7248 * it might still have a nonzero ->nr_uninterruptible counter, because
7249 * for performance reasons the counter is not stricly tracking tasks to
7250 * their home CPUs. So we just add the counter to another CPU's counter,
7251 * to keep the global sum constant after CPU-down:
7252 */
Ingo Molnar70b97a72006-07-03 00:25:42 -07007253static void migrate_nr_uninterruptible(struct rq *rq_src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007254{
Rusty Russell1e5ce4f2008-11-25 02:35:03 +10307255 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007256 unsigned long flags;
7257
7258 local_irq_save(flags);
7259 double_rq_lock(rq_src, rq_dest);
7260 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
7261 rq_src->nr_uninterruptible = 0;
7262 double_rq_unlock(rq_src, rq_dest);
7263 local_irq_restore(flags);
7264}
7265
7266/* Run through task list and migrate tasks from the dead cpu. */
7267static void migrate_live_tasks(int src_cpu)
7268{
Ingo Molnar48f24c42006-07-03 00:25:40 -07007269 struct task_struct *p, *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007270
Oleg Nesterovf7b4cdd2007-10-16 23:30:56 -07007271 read_lock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007272
Ingo Molnar48f24c42006-07-03 00:25:40 -07007273 do_each_thread(t, p) {
7274 if (p == current)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007275 continue;
7276
Ingo Molnar48f24c42006-07-03 00:25:40 -07007277 if (task_cpu(p) == src_cpu)
7278 move_task_off_dead_cpu(src_cpu, p);
7279 } while_each_thread(t, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007280
Oleg Nesterovf7b4cdd2007-10-16 23:30:56 -07007281 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007282}
7283
Ingo Molnardd41f592007-07-09 18:51:59 +02007284/*
7285 * Schedules idle task to be the next runnable task on current CPU.
Dmitry Adamushko94bc9a72007-11-15 20:57:40 +01007286 * It does so by boosting its priority to highest possible.
7287 * Used by CPU offline code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007288 */
7289void sched_idle_next(void)
7290{
Ingo Molnar48f24c42006-07-03 00:25:40 -07007291 int this_cpu = smp_processor_id();
Ingo Molnar70b97a72006-07-03 00:25:42 -07007292 struct rq *rq = cpu_rq(this_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007293 struct task_struct *p = rq->idle;
7294 unsigned long flags;
7295
7296 /* cpu has to be offline */
Ingo Molnar48f24c42006-07-03 00:25:40 -07007297 BUG_ON(cpu_online(this_cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007298
Ingo Molnar48f24c42006-07-03 00:25:40 -07007299 /*
7300 * Strictly not necessary since rest of the CPUs are stopped by now
7301 * and interrupts disabled on the current cpu.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007302 */
7303 spin_lock_irqsave(&rq->lock, flags);
7304
Ingo Molnardd41f592007-07-09 18:51:59 +02007305 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
Ingo Molnar48f24c42006-07-03 00:25:40 -07007306
Dmitry Adamushko94bc9a72007-11-15 20:57:40 +01007307 update_rq_clock(rq);
7308 activate_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007309
7310 spin_unlock_irqrestore(&rq->lock, flags);
7311}
7312
Ingo Molnar48f24c42006-07-03 00:25:40 -07007313/*
7314 * Ensures that the idle task is using init_mm right before its cpu goes
Linus Torvalds1da177e2005-04-16 15:20:36 -07007315 * offline.
7316 */
7317void idle_task_exit(void)
7318{
7319 struct mm_struct *mm = current->active_mm;
7320
7321 BUG_ON(cpu_online(smp_processor_id()));
7322
7323 if (mm != &init_mm)
7324 switch_mm(mm, &init_mm, current);
7325 mmdrop(mm);
7326}
7327
Kirill Korotaev054b9102006-12-10 02:20:11 -08007328/* called under rq->lock with disabled interrupts */
Ingo Molnar36c8b582006-07-03 00:25:41 -07007329static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007330{
Ingo Molnar70b97a72006-07-03 00:25:42 -07007331 struct rq *rq = cpu_rq(dead_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007332
7333 /* Must be exiting, otherwise would be on tasklist. */
Eugene Teo270f7222007-10-18 23:40:38 -07007334 BUG_ON(!p->exit_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007335
7336 /* Cannot have done final schedule yet: would have vanished. */
Oleg Nesterovc394cc92006-09-29 02:01:11 -07007337 BUG_ON(p->state == TASK_DEAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007338
Ingo Molnar48f24c42006-07-03 00:25:40 -07007339 get_task_struct(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007340
7341 /*
7342 * Drop lock around migration; if someone else moves it,
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007343 * that's OK. No task can be added to this CPU, so iteration is
Linus Torvalds1da177e2005-04-16 15:20:36 -07007344 * fine.
7345 */
Oleg Nesterovf7b4cdd2007-10-16 23:30:56 -07007346 spin_unlock_irq(&rq->lock);
Ingo Molnar48f24c42006-07-03 00:25:40 -07007347 move_task_off_dead_cpu(dead_cpu, p);
Oleg Nesterovf7b4cdd2007-10-16 23:30:56 -07007348 spin_lock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007349
Ingo Molnar48f24c42006-07-03 00:25:40 -07007350 put_task_struct(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007351}
7352
7353/* release_task() removes task from tasklist, so we won't find dead tasks. */
7354static void migrate_dead_tasks(unsigned int dead_cpu)
7355{
Ingo Molnar70b97a72006-07-03 00:25:42 -07007356 struct rq *rq = cpu_rq(dead_cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02007357 struct task_struct *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007358
Ingo Molnardd41f592007-07-09 18:51:59 +02007359 for ( ; ; ) {
7360 if (!rq->nr_running)
7361 break;
Ingo Molnara8e504d2007-08-09 11:16:47 +02007362 update_rq_clock(rq);
Wang Chenb67802e2009-03-02 13:55:26 +08007363 next = pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02007364 if (!next)
7365 break;
Dmitry Adamushko79c53792008-06-29 00:16:56 +02007366 next->sched_class->put_prev_task(rq, next);
Ingo Molnardd41f592007-07-09 18:51:59 +02007367 migrate_dead(dead_cpu, next);
Nick Piggine692ab52007-07-26 13:40:43 +02007368
Linus Torvalds1da177e2005-04-16 15:20:36 -07007369 }
7370}
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02007371
7372/*
7373 * remove the tasks which were accounted by rq from calc_load_tasks.
7374 */
7375static void calc_global_load_remove(struct rq *rq)
7376{
7377 atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
Thomas Gleixnera468d382009-07-17 14:15:46 +02007378 rq->calc_load_active = 0;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02007379}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007380#endif /* CONFIG_HOTPLUG_CPU */
7381
Nick Piggine692ab52007-07-26 13:40:43 +02007382#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
7383
7384static struct ctl_table sd_ctl_dir[] = {
Alexey Dobriyane0361852007-08-09 11:16:46 +02007385 {
7386 .procname = "sched_domain",
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02007387 .mode = 0555,
Alexey Dobriyane0361852007-08-09 11:16:46 +02007388 },
Ingo Molnar38605ca2007-10-29 21:18:11 +01007389 {0, },
Nick Piggine692ab52007-07-26 13:40:43 +02007390};
7391
7392static struct ctl_table sd_ctl_root[] = {
Alexey Dobriyane0361852007-08-09 11:16:46 +02007393 {
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02007394 .ctl_name = CTL_KERN,
Alexey Dobriyane0361852007-08-09 11:16:46 +02007395 .procname = "kernel",
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02007396 .mode = 0555,
Alexey Dobriyane0361852007-08-09 11:16:46 +02007397 .child = sd_ctl_dir,
7398 },
Ingo Molnar38605ca2007-10-29 21:18:11 +01007399 {0, },
Nick Piggine692ab52007-07-26 13:40:43 +02007400};
7401
7402static struct ctl_table *sd_alloc_ctl_entry(int n)
7403{
7404 struct ctl_table *entry =
Milton Miller5cf9f062007-10-15 17:00:19 +02007405 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
Nick Piggine692ab52007-07-26 13:40:43 +02007406
Nick Piggine692ab52007-07-26 13:40:43 +02007407 return entry;
7408}
7409
Milton Miller6382bc92007-10-15 17:00:19 +02007410static void sd_free_ctl_entry(struct ctl_table **tablep)
7411{
Milton Millercd790072007-10-17 16:55:11 +02007412 struct ctl_table *entry;
Milton Miller6382bc92007-10-15 17:00:19 +02007413
Milton Millercd790072007-10-17 16:55:11 +02007414 /*
7415 * In the intermediate directories, both the child directory and
7416 * procname are dynamically allocated and could fail but the mode
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007417 * will always be set. In the lowest directory the names are
Milton Millercd790072007-10-17 16:55:11 +02007418 * static strings and all have proc handlers.
7419 */
7420 for (entry = *tablep; entry->mode; entry++) {
Milton Miller6382bc92007-10-15 17:00:19 +02007421 if (entry->child)
7422 sd_free_ctl_entry(&entry->child);
Milton Millercd790072007-10-17 16:55:11 +02007423 if (entry->proc_handler == NULL)
7424 kfree(entry->procname);
7425 }
Milton Miller6382bc92007-10-15 17:00:19 +02007426
7427 kfree(*tablep);
7428 *tablep = NULL;
7429}
7430
Nick Piggine692ab52007-07-26 13:40:43 +02007431static void
Alexey Dobriyane0361852007-08-09 11:16:46 +02007432set_table_entry(struct ctl_table *entry,
Nick Piggine692ab52007-07-26 13:40:43 +02007433 const char *procname, void *data, int maxlen,
7434 mode_t mode, proc_handler *proc_handler)
7435{
Nick Piggine692ab52007-07-26 13:40:43 +02007436 entry->procname = procname;
7437 entry->data = data;
7438 entry->maxlen = maxlen;
7439 entry->mode = mode;
7440 entry->proc_handler = proc_handler;
7441}
7442
7443static struct ctl_table *
7444sd_alloc_ctl_domain_table(struct sched_domain *sd)
7445{
Ingo Molnara5d8c342008-10-09 11:35:51 +02007446 struct ctl_table *table = sd_alloc_ctl_entry(13);
Nick Piggine692ab52007-07-26 13:40:43 +02007447
Milton Millerad1cdc12007-10-15 17:00:19 +02007448 if (table == NULL)
7449 return NULL;
7450
Alexey Dobriyane0361852007-08-09 11:16:46 +02007451 set_table_entry(&table[0], "min_interval", &sd->min_interval,
Nick Piggine692ab52007-07-26 13:40:43 +02007452 sizeof(long), 0644, proc_doulongvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02007453 set_table_entry(&table[1], "max_interval", &sd->max_interval,
Nick Piggine692ab52007-07-26 13:40:43 +02007454 sizeof(long), 0644, proc_doulongvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02007455 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02007456 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02007457 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02007458 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02007459 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02007460 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02007461 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02007462 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02007463 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02007464 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02007465 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
Nick Piggine692ab52007-07-26 13:40:43 +02007466 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02007467 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
Nick Piggine692ab52007-07-26 13:40:43 +02007468 sizeof(int), 0644, proc_dointvec_minmax);
Zou Nan haiace8b3d2007-10-15 17:00:14 +02007469 set_table_entry(&table[9], "cache_nice_tries",
Nick Piggine692ab52007-07-26 13:40:43 +02007470 &sd->cache_nice_tries,
7471 sizeof(int), 0644, proc_dointvec_minmax);
Zou Nan haiace8b3d2007-10-15 17:00:14 +02007472 set_table_entry(&table[10], "flags", &sd->flags,
Nick Piggine692ab52007-07-26 13:40:43 +02007473 sizeof(int), 0644, proc_dointvec_minmax);
Ingo Molnara5d8c342008-10-09 11:35:51 +02007474 set_table_entry(&table[11], "name", sd->name,
7475 CORENAME_MAX_SIZE, 0444, proc_dostring);
7476 /* &table[12] is terminator */
Nick Piggine692ab52007-07-26 13:40:43 +02007477
7478 return table;
7479}
7480
Ingo Molnar9a4e7152007-11-28 15:52:56 +01007481static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
Nick Piggine692ab52007-07-26 13:40:43 +02007482{
7483 struct ctl_table *entry, *table;
7484 struct sched_domain *sd;
7485 int domain_num = 0, i;
7486 char buf[32];
7487
7488 for_each_domain(cpu, sd)
7489 domain_num++;
7490 entry = table = sd_alloc_ctl_entry(domain_num + 1);
Milton Millerad1cdc12007-10-15 17:00:19 +02007491 if (table == NULL)
7492 return NULL;
Nick Piggine692ab52007-07-26 13:40:43 +02007493
7494 i = 0;
7495 for_each_domain(cpu, sd) {
7496 snprintf(buf, 32, "domain%d", i);
Nick Piggine692ab52007-07-26 13:40:43 +02007497 entry->procname = kstrdup(buf, GFP_KERNEL);
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02007498 entry->mode = 0555;
Nick Piggine692ab52007-07-26 13:40:43 +02007499 entry->child = sd_alloc_ctl_domain_table(sd);
7500 entry++;
7501 i++;
7502 }
7503 return table;
7504}
7505
7506static struct ctl_table_header *sd_sysctl_header;
Milton Miller6382bc92007-10-15 17:00:19 +02007507static void register_sched_domain_sysctl(void)
Nick Piggine692ab52007-07-26 13:40:43 +02007508{
7509 int i, cpu_num = num_online_cpus();
7510 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
7511 char buf[32];
7512
Milton Miller73785472007-10-24 18:23:48 +02007513 WARN_ON(sd_ctl_dir[0].child);
7514 sd_ctl_dir[0].child = entry;
7515
Milton Millerad1cdc12007-10-15 17:00:19 +02007516 if (entry == NULL)
7517 return;
7518
Milton Miller97b6ea72007-10-15 17:00:19 +02007519 for_each_online_cpu(i) {
Nick Piggine692ab52007-07-26 13:40:43 +02007520 snprintf(buf, 32, "cpu%d", i);
Nick Piggine692ab52007-07-26 13:40:43 +02007521 entry->procname = kstrdup(buf, GFP_KERNEL);
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02007522 entry->mode = 0555;
Nick Piggine692ab52007-07-26 13:40:43 +02007523 entry->child = sd_alloc_ctl_cpu_table(i);
Milton Miller97b6ea72007-10-15 17:00:19 +02007524 entry++;
Nick Piggine692ab52007-07-26 13:40:43 +02007525 }
Milton Miller73785472007-10-24 18:23:48 +02007526
7527 WARN_ON(sd_sysctl_header);
Nick Piggine692ab52007-07-26 13:40:43 +02007528 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
7529}
Milton Miller6382bc92007-10-15 17:00:19 +02007530
Milton Miller73785472007-10-24 18:23:48 +02007531/* may be called multiple times per register */
Milton Miller6382bc92007-10-15 17:00:19 +02007532static void unregister_sched_domain_sysctl(void)
7533{
Milton Miller73785472007-10-24 18:23:48 +02007534 if (sd_sysctl_header)
7535 unregister_sysctl_table(sd_sysctl_header);
Milton Miller6382bc92007-10-15 17:00:19 +02007536 sd_sysctl_header = NULL;
Milton Miller73785472007-10-24 18:23:48 +02007537 if (sd_ctl_dir[0].child)
7538 sd_free_ctl_entry(&sd_ctl_dir[0].child);
Milton Miller6382bc92007-10-15 17:00:19 +02007539}
Nick Piggine692ab52007-07-26 13:40:43 +02007540#else
Milton Miller6382bc92007-10-15 17:00:19 +02007541static void register_sched_domain_sysctl(void)
7542{
7543}
7544static void unregister_sched_domain_sysctl(void)
Nick Piggine692ab52007-07-26 13:40:43 +02007545{
7546}
7547#endif
7548
Gregory Haskins1f11eb62008-06-04 15:04:05 -04007549static void set_rq_online(struct rq *rq)
7550{
7551 if (!rq->online) {
7552 const struct sched_class *class;
7553
Rusty Russellc6c49272008-11-25 02:35:05 +10307554 cpumask_set_cpu(rq->cpu, rq->rd->online);
Gregory Haskins1f11eb62008-06-04 15:04:05 -04007555 rq->online = 1;
7556
7557 for_each_class(class) {
7558 if (class->rq_online)
7559 class->rq_online(rq);
7560 }
7561 }
7562}
7563
7564static void set_rq_offline(struct rq *rq)
7565{
7566 if (rq->online) {
7567 const struct sched_class *class;
7568
7569 for_each_class(class) {
7570 if (class->rq_offline)
7571 class->rq_offline(rq);
7572 }
7573
Rusty Russellc6c49272008-11-25 02:35:05 +10307574 cpumask_clear_cpu(rq->cpu, rq->rd->online);
Gregory Haskins1f11eb62008-06-04 15:04:05 -04007575 rq->online = 0;
7576 }
7577}
7578
Linus Torvalds1da177e2005-04-16 15:20:36 -07007579/*
7580 * migration_call - callback that gets triggered when a CPU is added.
7581 * Here we can start up the necessary migration thread for the new CPU.
7582 */
Ingo Molnar48f24c42006-07-03 00:25:40 -07007583static int __cpuinit
7584migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007585{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007586 struct task_struct *p;
Ingo Molnar48f24c42006-07-03 00:25:40 -07007587 int cpu = (long)hcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007588 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07007589 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007590
7591 switch (action) {
Gautham R Shenoy5be93612007-05-09 02:34:04 -07007592
Linus Torvalds1da177e2005-04-16 15:20:36 -07007593 case CPU_UP_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007594 case CPU_UP_PREPARE_FROZEN:
Ingo Molnardd41f592007-07-09 18:51:59 +02007595 p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007596 if (IS_ERR(p))
7597 return NOTIFY_BAD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007598 kthread_bind(p, cpu);
7599 /* Must be high prio: stop_machine expects to yield to it. */
7600 rq = task_rq_lock(p, &flags);
Ingo Molnardd41f592007-07-09 18:51:59 +02007601 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007602 task_rq_unlock(rq, &flags);
Oleg Nesterov371cbb32009-06-17 16:27:45 -07007603 get_task_struct(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007604 cpu_rq(cpu)->migration_thread = p;
Thomas Gleixnera468d382009-07-17 14:15:46 +02007605 rq->calc_load_update = calc_load_update;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007606 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07007607
Linus Torvalds1da177e2005-04-16 15:20:36 -07007608 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007609 case CPU_ONLINE_FROZEN:
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +02007610 /* Strictly unnecessary, as first user will wake it. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007611 wake_up_process(cpu_rq(cpu)->migration_thread);
Gregory Haskins1f94ef52008-03-10 16:52:41 -04007612
7613 /* Update our root-domain */
7614 rq = cpu_rq(cpu);
7615 spin_lock_irqsave(&rq->lock, flags);
7616 if (rq->rd) {
Rusty Russellc6c49272008-11-25 02:35:05 +10307617 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
Gregory Haskins1f11eb62008-06-04 15:04:05 -04007618
7619 set_rq_online(rq);
Gregory Haskins1f94ef52008-03-10 16:52:41 -04007620 }
7621 spin_unlock_irqrestore(&rq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007622 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07007623
Linus Torvalds1da177e2005-04-16 15:20:36 -07007624#ifdef CONFIG_HOTPLUG_CPU
7625 case CPU_UP_CANCELED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007626 case CPU_UP_CANCELED_FROZEN:
Heiko Carstensfc75cdf2006-06-25 05:49:10 -07007627 if (!cpu_rq(cpu)->migration_thread)
7628 break;
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007629 /* Unbind it from offline cpu so it can run. Fall thru. */
Heiko Carstensa4c4af72005-11-07 00:58:38 -08007630 kthread_bind(cpu_rq(cpu)->migration_thread,
Rusty Russell1e5ce4f2008-11-25 02:35:03 +10307631 cpumask_any(cpu_online_mask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007632 kthread_stop(cpu_rq(cpu)->migration_thread);
Oleg Nesterov371cbb32009-06-17 16:27:45 -07007633 put_task_struct(cpu_rq(cpu)->migration_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007634 cpu_rq(cpu)->migration_thread = NULL;
7635 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07007636
Linus Torvalds1da177e2005-04-16 15:20:36 -07007637 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007638 case CPU_DEAD_FROZEN:
Cliff Wickman470fd642007-10-18 23:40:46 -07007639 cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007640 migrate_live_tasks(cpu);
7641 rq = cpu_rq(cpu);
7642 kthread_stop(rq->migration_thread);
Oleg Nesterov371cbb32009-06-17 16:27:45 -07007643 put_task_struct(rq->migration_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007644 rq->migration_thread = NULL;
7645 /* Idle task back to normal (off runqueue, low prio) */
Oleg Nesterovd2da2722007-10-16 23:30:56 -07007646 spin_lock_irq(&rq->lock);
Ingo Molnara8e504d2007-08-09 11:16:47 +02007647 update_rq_clock(rq);
Ingo Molnar2e1cb742007-08-09 11:16:49 +02007648 deactivate_task(rq, rq->idle, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007649 rq->idle->static_prio = MAX_PRIO;
Ingo Molnardd41f592007-07-09 18:51:59 +02007650 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
7651 rq->idle->sched_class = &idle_sched_class;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007652 migrate_dead_tasks(cpu);
Oleg Nesterovd2da2722007-10-16 23:30:56 -07007653 spin_unlock_irq(&rq->lock);
Cliff Wickman470fd642007-10-18 23:40:46 -07007654 cpuset_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007655 migrate_nr_uninterruptible(rq);
7656 BUG_ON(rq->nr_running != 0);
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02007657 calc_global_load_remove(rq);
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007658 /*
7659 * No need to migrate the tasks: it was best-effort if
7660 * they didn't take sched_hotcpu_mutex. Just wake up
7661 * the requestors.
7662 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007663 spin_lock_irq(&rq->lock);
7664 while (!list_empty(&rq->migration_queue)) {
Ingo Molnar70b97a72006-07-03 00:25:42 -07007665 struct migration_req *req;
7666
Linus Torvalds1da177e2005-04-16 15:20:36 -07007667 req = list_entry(rq->migration_queue.next,
Ingo Molnar70b97a72006-07-03 00:25:42 -07007668 struct migration_req, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007669 list_del_init(&req->list);
Brian King9a2bd242008-12-09 08:47:00 -06007670 spin_unlock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007671 complete(&req->done);
Brian King9a2bd242008-12-09 08:47:00 -06007672 spin_lock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007673 }
7674 spin_unlock_irq(&rq->lock);
7675 break;
Gregory Haskins57d885f2008-01-25 21:08:18 +01007676
Gregory Haskins08f503b2008-03-10 17:59:11 -04007677 case CPU_DYING:
7678 case CPU_DYING_FROZEN:
Gregory Haskins57d885f2008-01-25 21:08:18 +01007679 /* Update our root-domain */
7680 rq = cpu_rq(cpu);
7681 spin_lock_irqsave(&rq->lock, flags);
7682 if (rq->rd) {
Rusty Russellc6c49272008-11-25 02:35:05 +10307683 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
Gregory Haskins1f11eb62008-06-04 15:04:05 -04007684 set_rq_offline(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01007685 }
7686 spin_unlock_irqrestore(&rq->lock, flags);
7687 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007688#endif
7689 }
7690 return NOTIFY_OK;
7691}
7692
Paul Mackerrasf38b0822009-06-02 21:05:16 +10007693/*
7694 * Register at high priority so that task migration (migrate_all_tasks)
7695 * happens before everything else. This has to be lower priority than
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007696 * the notifier in the perf_event subsystem, though.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007697 */
Chandra Seetharaman26c21432006-06-27 02:54:10 -07007698static struct notifier_block __cpuinitdata migration_notifier = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007699 .notifier_call = migration_call,
7700 .priority = 10
7701};
7702
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07007703static int __init migration_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007704{
7705 void *cpu = (void *)(long)smp_processor_id();
Akinobu Mita07dccf32006-09-29 02:00:22 -07007706 int err;
Ingo Molnar48f24c42006-07-03 00:25:40 -07007707
7708 /* Start one for the boot CPU: */
Akinobu Mita07dccf32006-09-29 02:00:22 -07007709 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
7710 BUG_ON(err == NOTIFY_BAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007711 migration_call(&migration_notifier, CPU_ONLINE, cpu);
7712 register_cpu_notifier(&migration_notifier);
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07007713
Thomas Gleixnera004cd42009-07-21 09:54:05 +02007714 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007715}
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07007716early_initcall(migration_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007717#endif
7718
7719#ifdef CONFIG_SMP
Christoph Lameter476f3532007-05-06 14:48:58 -07007720
Ingo Molnar3e9830d2007-10-15 17:00:13 +02007721#ifdef CONFIG_SCHED_DEBUG
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02007722
Mike Travis7c16ec52008-04-04 18:11:11 -07007723static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
Rusty Russell96f874e2008-11-25 02:35:14 +10307724 struct cpumask *groupmask)
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02007725{
7726 struct sched_group *group = sd->groups;
Mike Travis434d53b2008-04-04 18:11:04 -07007727 char str[256];
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02007728
Rusty Russell968ea6d2008-12-13 21:55:51 +10307729 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
Rusty Russell96f874e2008-11-25 02:35:14 +10307730 cpumask_clear(groupmask);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02007731
7732 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
7733
7734 if (!(sd->flags & SD_LOAD_BALANCE)) {
7735 printk("does not load-balance\n");
7736 if (sd->parent)
7737 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
7738 " has parent");
7739 return -1;
7740 }
7741
Li Zefaneefd7962008-11-04 16:15:37 +08007742 printk(KERN_CONT "span %s level %s\n", str, sd->name);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02007743
Rusty Russell758b2cd2008-11-25 02:35:04 +10307744 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02007745 printk(KERN_ERR "ERROR: domain->span does not contain "
7746 "CPU%d\n", cpu);
7747 }
Rusty Russell758b2cd2008-11-25 02:35:04 +10307748 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02007749 printk(KERN_ERR "ERROR: domain->groups does not contain"
7750 " CPU%d\n", cpu);
7751 }
7752
7753 printk(KERN_DEBUG "%*s groups:", level + 1, "");
7754 do {
7755 if (!group) {
7756 printk("\n");
7757 printk(KERN_ERR "ERROR: group is NULL\n");
7758 break;
7759 }
7760
Peter Zijlstra18a38852009-09-01 10:34:39 +02007761 if (!group->cpu_power) {
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02007762 printk(KERN_CONT "\n");
7763 printk(KERN_ERR "ERROR: domain->cpu_power not "
7764 "set\n");
7765 break;
7766 }
7767
Rusty Russell758b2cd2008-11-25 02:35:04 +10307768 if (!cpumask_weight(sched_group_cpus(group))) {
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02007769 printk(KERN_CONT "\n");
7770 printk(KERN_ERR "ERROR: empty group\n");
7771 break;
7772 }
7773
Rusty Russell758b2cd2008-11-25 02:35:04 +10307774 if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02007775 printk(KERN_CONT "\n");
7776 printk(KERN_ERR "ERROR: repeated CPUs\n");
7777 break;
7778 }
7779
Rusty Russell758b2cd2008-11-25 02:35:04 +10307780 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02007781
Rusty Russell968ea6d2008-12-13 21:55:51 +10307782 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
Gautham R Shenoy381512c2009-04-14 09:09:36 +05307783
7784 printk(KERN_CONT " %s", str);
Peter Zijlstra18a38852009-09-01 10:34:39 +02007785 if (group->cpu_power != SCHED_LOAD_SCALE) {
7786 printk(KERN_CONT " (cpu_power = %d)",
7787 group->cpu_power);
Gautham R Shenoy381512c2009-04-14 09:09:36 +05307788 }
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02007789
7790 group = group->next;
7791 } while (group != sd->groups);
7792 printk(KERN_CONT "\n");
7793
Rusty Russell758b2cd2008-11-25 02:35:04 +10307794 if (!cpumask_equal(sched_domain_span(sd), groupmask))
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02007795 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
7796
Rusty Russell758b2cd2008-11-25 02:35:04 +10307797 if (sd->parent &&
7798 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02007799 printk(KERN_ERR "ERROR: parent span is not a superset "
7800 "of domain->span\n");
7801 return 0;
7802}
7803
Linus Torvalds1da177e2005-04-16 15:20:36 -07007804static void sched_domain_debug(struct sched_domain *sd, int cpu)
7805{
Rusty Russelld5dd3db2008-11-25 02:35:12 +10307806 cpumask_var_t groupmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007807 int level = 0;
7808
Nick Piggin41c7ce92005-06-25 14:57:24 -07007809 if (!sd) {
7810 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
7811 return;
7812 }
7813
Linus Torvalds1da177e2005-04-16 15:20:36 -07007814 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
7815
Rusty Russelld5dd3db2008-11-25 02:35:12 +10307816 if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) {
Mike Travis7c16ec52008-04-04 18:11:11 -07007817 printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
7818 return;
7819 }
7820
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02007821 for (;;) {
Mike Travis7c16ec52008-04-04 18:11:11 -07007822 if (sched_domain_debug_one(sd, cpu, level, groupmask))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007823 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007824 level++;
7825 sd = sd->parent;
Miguel Ojeda Sandonis33859f72006-12-10 02:20:38 -08007826 if (!sd)
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02007827 break;
7828 }
Rusty Russelld5dd3db2008-11-25 02:35:12 +10307829 free_cpumask_var(groupmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007830}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007831#else /* !CONFIG_SCHED_DEBUG */
Ingo Molnar48f24c42006-07-03 00:25:40 -07007832# define sched_domain_debug(sd, cpu) do { } while (0)
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007833#endif /* CONFIG_SCHED_DEBUG */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007834
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007835static int sd_degenerate(struct sched_domain *sd)
Suresh Siddha245af2c2005-06-25 14:57:25 -07007836{
Rusty Russell758b2cd2008-11-25 02:35:04 +10307837 if (cpumask_weight(sched_domain_span(sd)) == 1)
Suresh Siddha245af2c2005-06-25 14:57:25 -07007838 return 1;
7839
7840 /* Following flags need at least 2 groups */
7841 if (sd->flags & (SD_LOAD_BALANCE |
7842 SD_BALANCE_NEWIDLE |
7843 SD_BALANCE_FORK |
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007844 SD_BALANCE_EXEC |
7845 SD_SHARE_CPUPOWER |
7846 SD_SHARE_PKG_RESOURCES)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07007847 if (sd->groups != sd->groups->next)
7848 return 0;
7849 }
7850
7851 /* Following flags don't use groups */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02007852 if (sd->flags & (SD_WAKE_AFFINE))
Suresh Siddha245af2c2005-06-25 14:57:25 -07007853 return 0;
7854
7855 return 1;
7856}
7857
Ingo Molnar48f24c42006-07-03 00:25:40 -07007858static int
7859sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
Suresh Siddha245af2c2005-06-25 14:57:25 -07007860{
7861 unsigned long cflags = sd->flags, pflags = parent->flags;
7862
7863 if (sd_degenerate(parent))
7864 return 1;
7865
Rusty Russell758b2cd2008-11-25 02:35:04 +10307866 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
Suresh Siddha245af2c2005-06-25 14:57:25 -07007867 return 0;
7868
Suresh Siddha245af2c2005-06-25 14:57:25 -07007869 /* Flags needing groups don't count if only 1 group in parent */
7870 if (parent->groups == parent->groups->next) {
7871 pflags &= ~(SD_LOAD_BALANCE |
7872 SD_BALANCE_NEWIDLE |
7873 SD_BALANCE_FORK |
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007874 SD_BALANCE_EXEC |
7875 SD_SHARE_CPUPOWER |
7876 SD_SHARE_PKG_RESOURCES);
Ken Chen54364992008-12-07 18:47:37 -08007877 if (nr_node_ids == 1)
7878 pflags &= ~SD_SERIALIZE;
Suresh Siddha245af2c2005-06-25 14:57:25 -07007879 }
7880 if (~cflags & pflags)
7881 return 0;
7882
7883 return 1;
7884}
7885
Rusty Russellc6c49272008-11-25 02:35:05 +10307886static void free_rootdomain(struct root_domain *rd)
7887{
Rusty Russell68e74562008-11-25 02:35:13 +10307888 cpupri_cleanup(&rd->cpupri);
7889
Rusty Russellc6c49272008-11-25 02:35:05 +10307890 free_cpumask_var(rd->rto_mask);
7891 free_cpumask_var(rd->online);
7892 free_cpumask_var(rd->span);
7893 kfree(rd);
7894}
7895
Gregory Haskins57d885f2008-01-25 21:08:18 +01007896static void rq_attach_root(struct rq *rq, struct root_domain *rd)
7897{
Ingo Molnara0490fa2009-02-12 11:35:40 +01007898 struct root_domain *old_rd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01007899 unsigned long flags;
Gregory Haskins57d885f2008-01-25 21:08:18 +01007900
7901 spin_lock_irqsave(&rq->lock, flags);
7902
7903 if (rq->rd) {
Ingo Molnara0490fa2009-02-12 11:35:40 +01007904 old_rd = rq->rd;
Gregory Haskins57d885f2008-01-25 21:08:18 +01007905
Rusty Russellc6c49272008-11-25 02:35:05 +10307906 if (cpumask_test_cpu(rq->cpu, old_rd->online))
Gregory Haskins1f11eb62008-06-04 15:04:05 -04007907 set_rq_offline(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01007908
Rusty Russellc6c49272008-11-25 02:35:05 +10307909 cpumask_clear_cpu(rq->cpu, old_rd->span);
Gregory Haskinsdc938522008-01-25 21:08:26 +01007910
Ingo Molnara0490fa2009-02-12 11:35:40 +01007911 /*
7912 * If we dont want to free the old_rt yet then
7913 * set old_rd to NULL to skip the freeing later
7914 * in this function:
7915 */
7916 if (!atomic_dec_and_test(&old_rd->refcount))
7917 old_rd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01007918 }
7919
7920 atomic_inc(&rd->refcount);
7921 rq->rd = rd;
7922
Rusty Russellc6c49272008-11-25 02:35:05 +10307923 cpumask_set_cpu(rq->cpu, rd->span);
Gregory Haskins00aec932009-07-30 10:57:23 -04007924 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
Gregory Haskins1f11eb62008-06-04 15:04:05 -04007925 set_rq_online(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01007926
7927 spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnara0490fa2009-02-12 11:35:40 +01007928
7929 if (old_rd)
7930 free_rootdomain(old_rd);
Gregory Haskins57d885f2008-01-25 21:08:18 +01007931}
7932
Li Zefanfd5e1b52009-06-15 13:34:19 +08007933static int init_rootdomain(struct root_domain *rd, bool bootmem)
Gregory Haskins57d885f2008-01-25 21:08:18 +01007934{
Pekka Enberg36b7b6d2009-06-10 23:42:36 +03007935 gfp_t gfp = GFP_KERNEL;
7936
Gregory Haskins57d885f2008-01-25 21:08:18 +01007937 memset(rd, 0, sizeof(*rd));
7938
Pekka Enberg36b7b6d2009-06-10 23:42:36 +03007939 if (bootmem)
7940 gfp = GFP_NOWAIT;
Gregory Haskins6e0534f2008-05-12 21:21:01 +02007941
Pekka Enberg36b7b6d2009-06-10 23:42:36 +03007942 if (!alloc_cpumask_var(&rd->span, gfp))
Li Zefan0c910d22009-01-06 17:39:06 +08007943 goto out;
Pekka Enberg36b7b6d2009-06-10 23:42:36 +03007944 if (!alloc_cpumask_var(&rd->online, gfp))
Rusty Russellc6c49272008-11-25 02:35:05 +10307945 goto free_span;
Pekka Enberg36b7b6d2009-06-10 23:42:36 +03007946 if (!alloc_cpumask_var(&rd->rto_mask, gfp))
Rusty Russellc6c49272008-11-25 02:35:05 +10307947 goto free_online;
Gregory Haskins6e0534f2008-05-12 21:21:01 +02007948
Pekka Enberg0fb53022009-06-11 08:41:22 +03007949 if (cpupri_init(&rd->cpupri, bootmem) != 0)
Rusty Russell68e74562008-11-25 02:35:13 +10307950 goto free_rto_mask;
Rusty Russellc6c49272008-11-25 02:35:05 +10307951 return 0;
7952
Rusty Russell68e74562008-11-25 02:35:13 +10307953free_rto_mask:
7954 free_cpumask_var(rd->rto_mask);
Rusty Russellc6c49272008-11-25 02:35:05 +10307955free_online:
7956 free_cpumask_var(rd->online);
7957free_span:
7958 free_cpumask_var(rd->span);
Li Zefan0c910d22009-01-06 17:39:06 +08007959out:
Rusty Russellc6c49272008-11-25 02:35:05 +10307960 return -ENOMEM;
Gregory Haskins57d885f2008-01-25 21:08:18 +01007961}
7962
7963static void init_defrootdomain(void)
7964{
Rusty Russellc6c49272008-11-25 02:35:05 +10307965 init_rootdomain(&def_root_domain, true);
7966
Gregory Haskins57d885f2008-01-25 21:08:18 +01007967 atomic_set(&def_root_domain.refcount, 1);
7968}
7969
Gregory Haskinsdc938522008-01-25 21:08:26 +01007970static struct root_domain *alloc_rootdomain(void)
Gregory Haskins57d885f2008-01-25 21:08:18 +01007971{
7972 struct root_domain *rd;
7973
7974 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
7975 if (!rd)
7976 return NULL;
7977
Rusty Russellc6c49272008-11-25 02:35:05 +10307978 if (init_rootdomain(rd, false) != 0) {
7979 kfree(rd);
7980 return NULL;
7981 }
Gregory Haskins57d885f2008-01-25 21:08:18 +01007982
7983 return rd;
7984}
7985
Linus Torvalds1da177e2005-04-16 15:20:36 -07007986/*
Ingo Molnar0eab9142008-01-25 21:08:19 +01007987 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
Linus Torvalds1da177e2005-04-16 15:20:36 -07007988 * hold the hotplug lock.
7989 */
Ingo Molnar0eab9142008-01-25 21:08:19 +01007990static void
7991cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007992{
Ingo Molnar70b97a72006-07-03 00:25:42 -07007993 struct rq *rq = cpu_rq(cpu);
Suresh Siddha245af2c2005-06-25 14:57:25 -07007994 struct sched_domain *tmp;
7995
7996 /* Remove the sched domains which do not contribute to scheduling. */
Li Zefanf29c9b12008-11-06 09:45:16 +08007997 for (tmp = sd; tmp; ) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07007998 struct sched_domain *parent = tmp->parent;
7999 if (!parent)
8000 break;
Li Zefanf29c9b12008-11-06 09:45:16 +08008001
Siddha, Suresh B1a848872006-10-03 01:14:08 -07008002 if (sd_parent_degenerate(tmp, parent)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07008003 tmp->parent = parent->parent;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07008004 if (parent->parent)
8005 parent->parent->child = tmp;
Li Zefanf29c9b12008-11-06 09:45:16 +08008006 } else
8007 tmp = tmp->parent;
Suresh Siddha245af2c2005-06-25 14:57:25 -07008008 }
8009
Siddha, Suresh B1a848872006-10-03 01:14:08 -07008010 if (sd && sd_degenerate(sd)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07008011 sd = sd->parent;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07008012 if (sd)
8013 sd->child = NULL;
8014 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008015
8016 sched_domain_debug(sd, cpu);
8017
Gregory Haskins57d885f2008-01-25 21:08:18 +01008018 rq_attach_root(rq, rd);
Nick Piggin674311d2005-06-25 14:57:27 -07008019 rcu_assign_pointer(rq->sd, sd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008020}
8021
8022/* cpus with isolated domains */
Rusty Russelldcc30a32008-11-25 02:35:12 +10308023static cpumask_var_t cpu_isolated_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008024
8025/* Setup the mask of cpus configured for isolated domains */
8026static int __init isolated_cpu_setup(char *str)
8027{
Rusty Russell968ea6d2008-12-13 21:55:51 +10308028 cpulist_parse(str, cpu_isolated_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008029 return 1;
8030}
8031
Ingo Molnar8927f492007-10-15 17:00:13 +02008032__setup("isolcpus=", isolated_cpu_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008033
8034/*
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08008035 * init_sched_build_groups takes the cpumask we wish to span, and a pointer
8036 * to a function which identifies what group(along with sched group) a CPU
Rusty Russell96f874e2008-11-25 02:35:14 +10308037 * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
8038 * (due to the fact that we keep track of groups covered with a struct cpumask).
Linus Torvalds1da177e2005-04-16 15:20:36 -07008039 *
8040 * init_sched_build_groups will build a circular linked list of the groups
8041 * covered by the given span, and will set each group's ->cpumask correctly,
8042 * and ->cpu_power to 0.
8043 */
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07008044static void
Rusty Russell96f874e2008-11-25 02:35:14 +10308045init_sched_build_groups(const struct cpumask *span,
8046 const struct cpumask *cpu_map,
8047 int (*group_fn)(int cpu, const struct cpumask *cpu_map,
Mike Travis7c16ec52008-04-04 18:11:11 -07008048 struct sched_group **sg,
Rusty Russell96f874e2008-11-25 02:35:14 +10308049 struct cpumask *tmpmask),
8050 struct cpumask *covered, struct cpumask *tmpmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008051{
8052 struct sched_group *first = NULL, *last = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008053 int i;
8054
Rusty Russell96f874e2008-11-25 02:35:14 +10308055 cpumask_clear(covered);
Mike Travis7c16ec52008-04-04 18:11:11 -07008056
Rusty Russellabcd0832008-11-25 02:35:02 +10308057 for_each_cpu(i, span) {
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08008058 struct sched_group *sg;
Mike Travis7c16ec52008-04-04 18:11:11 -07008059 int group = group_fn(i, cpu_map, &sg, tmpmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008060 int j;
8061
Rusty Russell758b2cd2008-11-25 02:35:04 +10308062 if (cpumask_test_cpu(i, covered))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008063 continue;
8064
Rusty Russell758b2cd2008-11-25 02:35:04 +10308065 cpumask_clear(sched_group_cpus(sg));
Peter Zijlstra18a38852009-09-01 10:34:39 +02008066 sg->cpu_power = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008067
Rusty Russellabcd0832008-11-25 02:35:02 +10308068 for_each_cpu(j, span) {
Mike Travis7c16ec52008-04-04 18:11:11 -07008069 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008070 continue;
8071
Rusty Russell96f874e2008-11-25 02:35:14 +10308072 cpumask_set_cpu(j, covered);
Rusty Russell758b2cd2008-11-25 02:35:04 +10308073 cpumask_set_cpu(j, sched_group_cpus(sg));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008074 }
8075 if (!first)
8076 first = sg;
8077 if (last)
8078 last->next = sg;
8079 last = sg;
8080 }
8081 last->next = first;
8082}
8083
John Hawkes9c1cfda2005-09-06 15:18:14 -07008084#define SD_NODES_PER_DOMAIN 16
Linus Torvalds1da177e2005-04-16 15:20:36 -07008085
John Hawkes9c1cfda2005-09-06 15:18:14 -07008086#ifdef CONFIG_NUMA
akpm@osdl.org198e2f12006-01-12 01:05:30 -08008087
John Hawkes9c1cfda2005-09-06 15:18:14 -07008088/**
8089 * find_next_best_node - find the next node to include in a sched_domain
8090 * @node: node whose sched_domain we're building
8091 * @used_nodes: nodes already in the sched_domain
8092 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008093 * Find the next node to include in a given scheduling domain. Simply
John Hawkes9c1cfda2005-09-06 15:18:14 -07008094 * finds the closest node not already in the @used_nodes map.
8095 *
8096 * Should use nodemask_t.
8097 */
Mike Travisc5f59f02008-04-04 18:11:10 -07008098static int find_next_best_node(int node, nodemask_t *used_nodes)
John Hawkes9c1cfda2005-09-06 15:18:14 -07008099{
8100 int i, n, val, min_val, best_node = 0;
8101
8102 min_val = INT_MAX;
8103
Mike Travis076ac2a2008-05-12 21:21:12 +02008104 for (i = 0; i < nr_node_ids; i++) {
John Hawkes9c1cfda2005-09-06 15:18:14 -07008105 /* Start at @node */
Mike Travis076ac2a2008-05-12 21:21:12 +02008106 n = (node + i) % nr_node_ids;
John Hawkes9c1cfda2005-09-06 15:18:14 -07008107
8108 if (!nr_cpus_node(n))
8109 continue;
8110
8111 /* Skip already used nodes */
Mike Travisc5f59f02008-04-04 18:11:10 -07008112 if (node_isset(n, *used_nodes))
John Hawkes9c1cfda2005-09-06 15:18:14 -07008113 continue;
8114
8115 /* Simple min distance search */
8116 val = node_distance(node, n);
8117
8118 if (val < min_val) {
8119 min_val = val;
8120 best_node = n;
8121 }
8122 }
8123
Mike Travisc5f59f02008-04-04 18:11:10 -07008124 node_set(best_node, *used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07008125 return best_node;
8126}
8127
8128/**
8129 * sched_domain_node_span - get a cpumask for a node's sched_domain
8130 * @node: node whose cpumask we're constructing
Randy Dunlap73486722008-04-22 10:07:22 -07008131 * @span: resulting cpumask
John Hawkes9c1cfda2005-09-06 15:18:14 -07008132 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008133 * Given a node, construct a good cpumask for its sched_domain to span. It
John Hawkes9c1cfda2005-09-06 15:18:14 -07008134 * should be one that prevents unnecessary balancing, but also spreads tasks
8135 * out optimally.
8136 */
Rusty Russell96f874e2008-11-25 02:35:14 +10308137static void sched_domain_node_span(int node, struct cpumask *span)
John Hawkes9c1cfda2005-09-06 15:18:14 -07008138{
Mike Travisc5f59f02008-04-04 18:11:10 -07008139 nodemask_t used_nodes;
Ingo Molnar48f24c42006-07-03 00:25:40 -07008140 int i;
John Hawkes9c1cfda2005-09-06 15:18:14 -07008141
Mike Travis6ca09df2008-12-31 18:08:45 -08008142 cpumask_clear(span);
Mike Travisc5f59f02008-04-04 18:11:10 -07008143 nodes_clear(used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07008144
Mike Travis6ca09df2008-12-31 18:08:45 -08008145 cpumask_or(span, span, cpumask_of_node(node));
Mike Travisc5f59f02008-04-04 18:11:10 -07008146 node_set(node, used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07008147
8148 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
Mike Travisc5f59f02008-04-04 18:11:10 -07008149 int next_node = find_next_best_node(node, &used_nodes);
Ingo Molnar48f24c42006-07-03 00:25:40 -07008150
Mike Travis6ca09df2008-12-31 18:08:45 -08008151 cpumask_or(span, span, cpumask_of_node(next_node));
John Hawkes9c1cfda2005-09-06 15:18:14 -07008152 }
John Hawkes9c1cfda2005-09-06 15:18:14 -07008153}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008154#endif /* CONFIG_NUMA */
John Hawkes9c1cfda2005-09-06 15:18:14 -07008155
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07008156int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07008157
John Hawkes9c1cfda2005-09-06 15:18:14 -07008158/*
Rusty Russell6c99e9a2008-11-25 02:35:04 +10308159 * The cpus mask in sched_group and sched_domain hangs off the end.
Ingo Molnar4200efd2009-05-19 09:22:19 +02008160 *
8161 * ( See the the comments in include/linux/sched.h:struct sched_group
8162 * and struct sched_domain. )
Rusty Russell6c99e9a2008-11-25 02:35:04 +10308163 */
8164struct static_sched_group {
8165 struct sched_group sg;
8166 DECLARE_BITMAP(cpus, CONFIG_NR_CPUS);
8167};
8168
8169struct static_sched_domain {
8170 struct sched_domain sd;
8171 DECLARE_BITMAP(span, CONFIG_NR_CPUS);
8172};
8173
Andreas Herrmann49a02c52009-08-18 12:51:52 +02008174struct s_data {
8175#ifdef CONFIG_NUMA
8176 int sd_allnodes;
8177 cpumask_var_t domainspan;
8178 cpumask_var_t covered;
8179 cpumask_var_t notcovered;
8180#endif
8181 cpumask_var_t nodemask;
8182 cpumask_var_t this_sibling_map;
8183 cpumask_var_t this_core_map;
8184 cpumask_var_t send_covered;
8185 cpumask_var_t tmpmask;
8186 struct sched_group **sched_group_nodes;
8187 struct root_domain *rd;
8188};
8189
Andreas Herrmann2109b992009-08-18 12:53:00 +02008190enum s_alloc {
8191 sa_sched_groups = 0,
8192 sa_rootdomain,
8193 sa_tmpmask,
8194 sa_send_covered,
8195 sa_this_core_map,
8196 sa_this_sibling_map,
8197 sa_nodemask,
8198 sa_sched_group_nodes,
8199#ifdef CONFIG_NUMA
8200 sa_notcovered,
8201 sa_covered,
8202 sa_domainspan,
8203#endif
8204 sa_none,
8205};
8206
Rusty Russell6c99e9a2008-11-25 02:35:04 +10308207/*
Ingo Molnar48f24c42006-07-03 00:25:40 -07008208 * SMT sched-domains:
John Hawkes9c1cfda2005-09-06 15:18:14 -07008209 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008210#ifdef CONFIG_SCHED_SMT
Rusty Russell6c99e9a2008-11-25 02:35:04 +10308211static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
8212static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus);
Ingo Molnar48f24c42006-07-03 00:25:40 -07008213
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008214static int
Rusty Russell96f874e2008-11-25 02:35:14 +10308215cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
8216 struct sched_group **sg, struct cpumask *unused)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008217{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08008218 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10308219 *sg = &per_cpu(sched_group_cpus, cpu).sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008220 return cpu;
8221}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008222#endif /* CONFIG_SCHED_SMT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008223
Ingo Molnar48f24c42006-07-03 00:25:40 -07008224/*
8225 * multi-core sched-domains:
8226 */
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08008227#ifdef CONFIG_SCHED_MC
Rusty Russell6c99e9a2008-11-25 02:35:04 +10308228static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
8229static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008230#endif /* CONFIG_SCHED_MC */
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08008231
8232#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008233static int
Rusty Russell96f874e2008-11-25 02:35:14 +10308234cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
8235 struct sched_group **sg, struct cpumask *mask)
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08008236{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08008237 int group;
Mike Travis7c16ec52008-04-04 18:11:11 -07008238
Rusty Russellc69fc562009-03-13 14:49:46 +10308239 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10308240 group = cpumask_first(mask);
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08008241 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10308242 *sg = &per_cpu(sched_group_core, group).sg;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08008243 return group;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08008244}
8245#elif defined(CONFIG_SCHED_MC)
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008246static int
Rusty Russell96f874e2008-11-25 02:35:14 +10308247cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
8248 struct sched_group **sg, struct cpumask *unused)
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08008249{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08008250 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10308251 *sg = &per_cpu(sched_group_core, cpu).sg;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08008252 return cpu;
8253}
8254#endif
8255
Rusty Russell6c99e9a2008-11-25 02:35:04 +10308256static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
8257static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
Ingo Molnar48f24c42006-07-03 00:25:40 -07008258
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008259static int
Rusty Russell96f874e2008-11-25 02:35:14 +10308260cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
8261 struct sched_group **sg, struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008262{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08008263 int group;
Ingo Molnar48f24c42006-07-03 00:25:40 -07008264#ifdef CONFIG_SCHED_MC
Mike Travis6ca09df2008-12-31 18:08:45 -08008265 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10308266 group = cpumask_first(mask);
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08008267#elif defined(CONFIG_SCHED_SMT)
Rusty Russellc69fc562009-03-13 14:49:46 +10308268 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10308269 group = cpumask_first(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008270#else
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08008271 group = cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008272#endif
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08008273 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10308274 *sg = &per_cpu(sched_group_phys, group).sg;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08008275 return group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008276}
8277
8278#ifdef CONFIG_NUMA
John Hawkes9c1cfda2005-09-06 15:18:14 -07008279/*
8280 * The init_sched_build_groups can't handle what we want to do with node
8281 * groups, so roll our own. Now each node has its own list of groups which
8282 * gets dynamically allocated.
8283 */
Rusty Russell62ea9ce2009-01-11 01:04:16 +01008284static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
Mike Travis434d53b2008-04-04 18:11:04 -07008285static struct sched_group ***sched_group_nodes_bycpu;
John Hawkes9c1cfda2005-09-06 15:18:14 -07008286
Rusty Russell62ea9ce2009-01-11 01:04:16 +01008287static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
Rusty Russell6c99e9a2008-11-25 02:35:04 +10308288static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07008289
Rusty Russell96f874e2008-11-25 02:35:14 +10308290static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
8291 struct sched_group **sg,
8292 struct cpumask *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008293{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08008294 int group;
8295
Mike Travis6ca09df2008-12-31 18:08:45 -08008296 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10308297 group = cpumask_first(nodemask);
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08008298
8299 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10308300 *sg = &per_cpu(sched_group_allnodes, group).sg;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08008301 return group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008302}
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08008303
Siddha, Suresh B08069032006-03-27 01:15:23 -08008304static void init_numa_sched_groups_power(struct sched_group *group_head)
8305{
8306 struct sched_group *sg = group_head;
8307 int j;
8308
8309 if (!sg)
8310 return;
Andi Kleen3a5c3592007-10-15 17:00:14 +02008311 do {
Rusty Russell758b2cd2008-11-25 02:35:04 +10308312 for_each_cpu(j, sched_group_cpus(sg)) {
Andi Kleen3a5c3592007-10-15 17:00:14 +02008313 struct sched_domain *sd;
Siddha, Suresh B08069032006-03-27 01:15:23 -08008314
Rusty Russell6c99e9a2008-11-25 02:35:04 +10308315 sd = &per_cpu(phys_domains, j).sd;
Miao Xie13318a72009-04-15 09:59:10 +08008316 if (j != group_first_cpu(sd->groups)) {
Andi Kleen3a5c3592007-10-15 17:00:14 +02008317 /*
8318 * Only add "power" once for each
8319 * physical package.
8320 */
8321 continue;
8322 }
8323
Peter Zijlstra18a38852009-09-01 10:34:39 +02008324 sg->cpu_power += sd->groups->cpu_power;
Siddha, Suresh B08069032006-03-27 01:15:23 -08008325 }
Andi Kleen3a5c3592007-10-15 17:00:14 +02008326 sg = sg->next;
8327 } while (sg != group_head);
Siddha, Suresh B08069032006-03-27 01:15:23 -08008328}
Andreas Herrmann0601a882009-08-18 13:01:11 +02008329
8330static int build_numa_sched_groups(struct s_data *d,
8331 const struct cpumask *cpu_map, int num)
8332{
8333 struct sched_domain *sd;
8334 struct sched_group *sg, *prev;
8335 int n, j;
8336
8337 cpumask_clear(d->covered);
8338 cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map);
8339 if (cpumask_empty(d->nodemask)) {
8340 d->sched_group_nodes[num] = NULL;
8341 goto out;
8342 }
8343
8344 sched_domain_node_span(num, d->domainspan);
8345 cpumask_and(d->domainspan, d->domainspan, cpu_map);
8346
8347 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
8348 GFP_KERNEL, num);
8349 if (!sg) {
8350 printk(KERN_WARNING "Can not alloc domain group for node %d\n",
8351 num);
8352 return -ENOMEM;
8353 }
8354 d->sched_group_nodes[num] = sg;
8355
8356 for_each_cpu(j, d->nodemask) {
8357 sd = &per_cpu(node_domains, j).sd;
8358 sd->groups = sg;
8359 }
8360
Peter Zijlstra18a38852009-09-01 10:34:39 +02008361 sg->cpu_power = 0;
Andreas Herrmann0601a882009-08-18 13:01:11 +02008362 cpumask_copy(sched_group_cpus(sg), d->nodemask);
8363 sg->next = sg;
8364 cpumask_or(d->covered, d->covered, d->nodemask);
8365
8366 prev = sg;
8367 for (j = 0; j < nr_node_ids; j++) {
8368 n = (num + j) % nr_node_ids;
8369 cpumask_complement(d->notcovered, d->covered);
8370 cpumask_and(d->tmpmask, d->notcovered, cpu_map);
8371 cpumask_and(d->tmpmask, d->tmpmask, d->domainspan);
8372 if (cpumask_empty(d->tmpmask))
8373 break;
8374 cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n));
8375 if (cpumask_empty(d->tmpmask))
8376 continue;
8377 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
8378 GFP_KERNEL, num);
8379 if (!sg) {
8380 printk(KERN_WARNING
8381 "Can not alloc domain group for node %d\n", j);
8382 return -ENOMEM;
8383 }
Peter Zijlstra18a38852009-09-01 10:34:39 +02008384 sg->cpu_power = 0;
Andreas Herrmann0601a882009-08-18 13:01:11 +02008385 cpumask_copy(sched_group_cpus(sg), d->tmpmask);
8386 sg->next = prev->next;
8387 cpumask_or(d->covered, d->covered, d->tmpmask);
8388 prev->next = sg;
8389 prev = sg;
8390 }
8391out:
8392 return 0;
8393}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008394#endif /* CONFIG_NUMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008395
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07008396#ifdef CONFIG_NUMA
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07008397/* Free memory allocated for various sched_group structures */
Rusty Russell96f874e2008-11-25 02:35:14 +10308398static void free_sched_groups(const struct cpumask *cpu_map,
8399 struct cpumask *nodemask)
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07008400{
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07008401 int cpu, i;
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07008402
Rusty Russellabcd0832008-11-25 02:35:02 +10308403 for_each_cpu(cpu, cpu_map) {
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07008404 struct sched_group **sched_group_nodes
8405 = sched_group_nodes_bycpu[cpu];
8406
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07008407 if (!sched_group_nodes)
8408 continue;
8409
Mike Travis076ac2a2008-05-12 21:21:12 +02008410 for (i = 0; i < nr_node_ids; i++) {
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07008411 struct sched_group *oldsg, *sg = sched_group_nodes[i];
8412
Mike Travis6ca09df2008-12-31 18:08:45 -08008413 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10308414 if (cpumask_empty(nodemask))
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07008415 continue;
8416
8417 if (sg == NULL)
8418 continue;
8419 sg = sg->next;
8420next_sg:
8421 oldsg = sg;
8422 sg = sg->next;
8423 kfree(oldsg);
8424 if (oldsg != sched_group_nodes[i])
8425 goto next_sg;
8426 }
8427 kfree(sched_group_nodes);
8428 sched_group_nodes_bycpu[cpu] = NULL;
8429 }
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07008430}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008431#else /* !CONFIG_NUMA */
Rusty Russell96f874e2008-11-25 02:35:14 +10308432static void free_sched_groups(const struct cpumask *cpu_map,
8433 struct cpumask *nodemask)
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07008434{
8435}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008436#endif /* CONFIG_NUMA */
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07008437
Linus Torvalds1da177e2005-04-16 15:20:36 -07008438/*
Siddha, Suresh B89c47102006-10-03 01:14:09 -07008439 * Initialize sched groups cpu_power.
8440 *
8441 * cpu_power indicates the capacity of sched group, which is used while
8442 * distributing the load between different sched groups in a sched domain.
8443 * Typically cpu_power for all the groups in a sched domain will be same unless
8444 * there are asymmetries in the topology. If there are asymmetries, group
8445 * having more cpu_power will pickup more load compared to the group having
8446 * less cpu_power.
Siddha, Suresh B89c47102006-10-03 01:14:09 -07008447 */
8448static void init_sched_groups_power(int cpu, struct sched_domain *sd)
8449{
8450 struct sched_domain *child;
8451 struct sched_group *group;
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02008452 long power;
8453 int weight;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07008454
8455 WARN_ON(!sd || !sd->groups);
8456
Miao Xie13318a72009-04-15 09:59:10 +08008457 if (cpu != group_first_cpu(sd->groups))
Siddha, Suresh B89c47102006-10-03 01:14:09 -07008458 return;
8459
8460 child = sd->child;
8461
Peter Zijlstra18a38852009-09-01 10:34:39 +02008462 sd->groups->cpu_power = 0;
Eric Dumazet5517d862007-05-08 00:32:57 -07008463
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02008464 if (!child) {
8465 power = SCHED_LOAD_SCALE;
8466 weight = cpumask_weight(sched_domain_span(sd));
8467 /*
8468 * SMT siblings share the power of a single core.
Peter Zijlstraa52bfd72009-09-01 10:34:35 +02008469 * Usually multiple threads get a better yield out of
8470 * that one core than a single thread would have,
8471 * reflect that in sd->smt_gain.
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02008472 */
Peter Zijlstraa52bfd72009-09-01 10:34:35 +02008473 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
8474 power *= sd->smt_gain;
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02008475 power /= weight;
Peter Zijlstraa52bfd72009-09-01 10:34:35 +02008476 power >>= SCHED_LOAD_SHIFT;
8477 }
Peter Zijlstra18a38852009-09-01 10:34:39 +02008478 sd->groups->cpu_power += power;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07008479 return;
8480 }
8481
Siddha, Suresh B89c47102006-10-03 01:14:09 -07008482 /*
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02008483 * Add cpu_power of each child group to this groups cpu_power.
Siddha, Suresh B89c47102006-10-03 01:14:09 -07008484 */
8485 group = child->groups;
8486 do {
Peter Zijlstra18a38852009-09-01 10:34:39 +02008487 sd->groups->cpu_power += group->cpu_power;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07008488 group = group->next;
8489 } while (group != child->groups);
8490}
8491
8492/*
Mike Travis7c16ec52008-04-04 18:11:11 -07008493 * Initializers for schedule domains
8494 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
8495 */
8496
Ingo Molnara5d8c342008-10-09 11:35:51 +02008497#ifdef CONFIG_SCHED_DEBUG
8498# define SD_INIT_NAME(sd, type) sd->name = #type
8499#else
8500# define SD_INIT_NAME(sd, type) do { } while (0)
8501#endif
8502
Mike Travis7c16ec52008-04-04 18:11:11 -07008503#define SD_INIT(sd, type) sd_init_##type(sd)
Ingo Molnara5d8c342008-10-09 11:35:51 +02008504
Mike Travis7c16ec52008-04-04 18:11:11 -07008505#define SD_INIT_FUNC(type) \
8506static noinline void sd_init_##type(struct sched_domain *sd) \
8507{ \
8508 memset(sd, 0, sizeof(*sd)); \
8509 *sd = SD_##type##_INIT; \
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09008510 sd->level = SD_LV_##type; \
Ingo Molnara5d8c342008-10-09 11:35:51 +02008511 SD_INIT_NAME(sd, type); \
Mike Travis7c16ec52008-04-04 18:11:11 -07008512}
8513
8514SD_INIT_FUNC(CPU)
8515#ifdef CONFIG_NUMA
8516 SD_INIT_FUNC(ALLNODES)
8517 SD_INIT_FUNC(NODE)
8518#endif
8519#ifdef CONFIG_SCHED_SMT
8520 SD_INIT_FUNC(SIBLING)
8521#endif
8522#ifdef CONFIG_SCHED_MC
8523 SD_INIT_FUNC(MC)
8524#endif
8525
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09008526static int default_relax_domain_level = -1;
8527
8528static int __init setup_relax_domain_level(char *str)
8529{
Li Zefan30e0e172008-05-13 10:27:17 +08008530 unsigned long val;
8531
8532 val = simple_strtoul(str, NULL, 0);
8533 if (val < SD_LV_MAX)
8534 default_relax_domain_level = val;
8535
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09008536 return 1;
8537}
8538__setup("relax_domain_level=", setup_relax_domain_level);
8539
8540static void set_domain_attribute(struct sched_domain *sd,
8541 struct sched_domain_attr *attr)
8542{
8543 int request;
8544
8545 if (!attr || attr->relax_domain_level < 0) {
8546 if (default_relax_domain_level < 0)
8547 return;
8548 else
8549 request = default_relax_domain_level;
8550 } else
8551 request = attr->relax_domain_level;
8552 if (request < sd->level) {
8553 /* turn off idle balance on this domain */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02008554 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09008555 } else {
8556 /* turn on idle balance on this domain */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02008557 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09008558 }
8559}
8560
Andreas Herrmann2109b992009-08-18 12:53:00 +02008561static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
8562 const struct cpumask *cpu_map)
8563{
8564 switch (what) {
8565 case sa_sched_groups:
8566 free_sched_groups(cpu_map, d->tmpmask); /* fall through */
8567 d->sched_group_nodes = NULL;
8568 case sa_rootdomain:
8569 free_rootdomain(d->rd); /* fall through */
8570 case sa_tmpmask:
8571 free_cpumask_var(d->tmpmask); /* fall through */
8572 case sa_send_covered:
8573 free_cpumask_var(d->send_covered); /* fall through */
8574 case sa_this_core_map:
8575 free_cpumask_var(d->this_core_map); /* fall through */
8576 case sa_this_sibling_map:
8577 free_cpumask_var(d->this_sibling_map); /* fall through */
8578 case sa_nodemask:
8579 free_cpumask_var(d->nodemask); /* fall through */
8580 case sa_sched_group_nodes:
8581#ifdef CONFIG_NUMA
8582 kfree(d->sched_group_nodes); /* fall through */
8583 case sa_notcovered:
8584 free_cpumask_var(d->notcovered); /* fall through */
8585 case sa_covered:
8586 free_cpumask_var(d->covered); /* fall through */
8587 case sa_domainspan:
8588 free_cpumask_var(d->domainspan); /* fall through */
8589#endif
8590 case sa_none:
8591 break;
8592 }
8593}
8594
8595static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
8596 const struct cpumask *cpu_map)
8597{
8598#ifdef CONFIG_NUMA
8599 if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL))
8600 return sa_none;
8601 if (!alloc_cpumask_var(&d->covered, GFP_KERNEL))
8602 return sa_domainspan;
8603 if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL))
8604 return sa_covered;
8605 /* Allocate the per-node list of sched groups */
8606 d->sched_group_nodes = kcalloc(nr_node_ids,
8607 sizeof(struct sched_group *), GFP_KERNEL);
8608 if (!d->sched_group_nodes) {
8609 printk(KERN_WARNING "Can not alloc sched group node list\n");
8610 return sa_notcovered;
8611 }
8612 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
8613#endif
8614 if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
8615 return sa_sched_group_nodes;
8616 if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL))
8617 return sa_nodemask;
8618 if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
8619 return sa_this_sibling_map;
8620 if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
8621 return sa_this_core_map;
8622 if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
8623 return sa_send_covered;
8624 d->rd = alloc_rootdomain();
8625 if (!d->rd) {
8626 printk(KERN_WARNING "Cannot alloc root domain\n");
8627 return sa_tmpmask;
8628 }
8629 return sa_rootdomain;
8630}
8631
Andreas Herrmann7f4588f2009-08-18 12:54:06 +02008632static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
8633 const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i)
8634{
8635 struct sched_domain *sd = NULL;
8636#ifdef CONFIG_NUMA
8637 struct sched_domain *parent;
8638
8639 d->sd_allnodes = 0;
8640 if (cpumask_weight(cpu_map) >
8641 SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) {
8642 sd = &per_cpu(allnodes_domains, i).sd;
8643 SD_INIT(sd, ALLNODES);
8644 set_domain_attribute(sd, attr);
8645 cpumask_copy(sched_domain_span(sd), cpu_map);
8646 cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask);
8647 d->sd_allnodes = 1;
8648 }
8649 parent = sd;
8650
8651 sd = &per_cpu(node_domains, i).sd;
8652 SD_INIT(sd, NODE);
8653 set_domain_attribute(sd, attr);
8654 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
8655 sd->parent = parent;
8656 if (parent)
8657 parent->child = sd;
8658 cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map);
8659#endif
8660 return sd;
8661}
8662
Andreas Herrmann87cce662009-08-18 12:54:55 +02008663static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
8664 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
8665 struct sched_domain *parent, int i)
8666{
8667 struct sched_domain *sd;
8668 sd = &per_cpu(phys_domains, i).sd;
8669 SD_INIT(sd, CPU);
8670 set_domain_attribute(sd, attr);
8671 cpumask_copy(sched_domain_span(sd), d->nodemask);
8672 sd->parent = parent;
8673 if (parent)
8674 parent->child = sd;
8675 cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask);
8676 return sd;
8677}
8678
Andreas Herrmann410c4082009-08-18 12:56:14 +02008679static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
8680 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
8681 struct sched_domain *parent, int i)
8682{
8683 struct sched_domain *sd = parent;
8684#ifdef CONFIG_SCHED_MC
8685 sd = &per_cpu(core_domains, i).sd;
8686 SD_INIT(sd, MC);
8687 set_domain_attribute(sd, attr);
8688 cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i));
8689 sd->parent = parent;
8690 parent->child = sd;
8691 cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask);
8692#endif
8693 return sd;
8694}
8695
Andreas Herrmannd8173532009-08-18 12:57:03 +02008696static struct sched_domain *__build_smt_sched_domain(struct s_data *d,
8697 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
8698 struct sched_domain *parent, int i)
8699{
8700 struct sched_domain *sd = parent;
8701#ifdef CONFIG_SCHED_SMT
8702 sd = &per_cpu(cpu_domains, i).sd;
8703 SD_INIT(sd, SIBLING);
8704 set_domain_attribute(sd, attr);
8705 cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i));
8706 sd->parent = parent;
8707 parent->child = sd;
8708 cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask);
8709#endif
8710 return sd;
8711}
8712
Andreas Herrmann0e8e85c2009-08-18 12:57:51 +02008713static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
8714 const struct cpumask *cpu_map, int cpu)
8715{
8716 switch (l) {
8717#ifdef CONFIG_SCHED_SMT
8718 case SD_LV_SIBLING: /* set up CPU (sibling) groups */
8719 cpumask_and(d->this_sibling_map, cpu_map,
8720 topology_thread_cpumask(cpu));
8721 if (cpu == cpumask_first(d->this_sibling_map))
8722 init_sched_build_groups(d->this_sibling_map, cpu_map,
8723 &cpu_to_cpu_group,
8724 d->send_covered, d->tmpmask);
8725 break;
8726#endif
Andreas Herrmanna2af04c2009-08-18 12:58:38 +02008727#ifdef CONFIG_SCHED_MC
8728 case SD_LV_MC: /* set up multi-core groups */
8729 cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu));
8730 if (cpu == cpumask_first(d->this_core_map))
8731 init_sched_build_groups(d->this_core_map, cpu_map,
8732 &cpu_to_core_group,
8733 d->send_covered, d->tmpmask);
8734 break;
8735#endif
Andreas Herrmann86548092009-08-18 12:59:28 +02008736 case SD_LV_CPU: /* set up physical groups */
8737 cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map);
8738 if (!cpumask_empty(d->nodemask))
8739 init_sched_build_groups(d->nodemask, cpu_map,
8740 &cpu_to_phys_group,
8741 d->send_covered, d->tmpmask);
8742 break;
Andreas Herrmannde616e32009-08-18 13:00:13 +02008743#ifdef CONFIG_NUMA
8744 case SD_LV_ALLNODES:
8745 init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group,
8746 d->send_covered, d->tmpmask);
8747 break;
8748#endif
Andreas Herrmann0e8e85c2009-08-18 12:57:51 +02008749 default:
8750 break;
8751 }
8752}
8753
Mike Travis7c16ec52008-04-04 18:11:11 -07008754/*
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07008755 * Build sched domains for a given set of cpus and attach the sched domains
8756 * to the individual cpus
Linus Torvalds1da177e2005-04-16 15:20:36 -07008757 */
Rusty Russell96f874e2008-11-25 02:35:14 +10308758static int __build_sched_domains(const struct cpumask *cpu_map,
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09008759 struct sched_domain_attr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008760{
Andreas Herrmann2109b992009-08-18 12:53:00 +02008761 enum s_alloc alloc_state = sa_none;
Andreas Herrmann49a02c52009-08-18 12:51:52 +02008762 struct s_data d;
Andreas Herrmann294b0c92009-08-18 13:02:29 +02008763 struct sched_domain *sd;
Andreas Herrmann2109b992009-08-18 12:53:00 +02008764 int i;
John Hawkesd1b55132005-09-06 15:18:14 -07008765#ifdef CONFIG_NUMA
Andreas Herrmann49a02c52009-08-18 12:51:52 +02008766 d.sd_allnodes = 0;
Rusty Russell3404c8d2008-11-25 02:35:03 +10308767#endif
8768
Andreas Herrmann2109b992009-08-18 12:53:00 +02008769 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
8770 if (alloc_state != sa_rootdomain)
8771 goto error;
8772 alloc_state = sa_sched_groups;
Mike Travis7c16ec52008-04-04 18:11:11 -07008773
Linus Torvalds1da177e2005-04-16 15:20:36 -07008774 /*
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07008775 * Set up domains for cpus specified by the cpu_map.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008776 */
Rusty Russellabcd0832008-11-25 02:35:02 +10308777 for_each_cpu(i, cpu_map) {
Andreas Herrmann49a02c52009-08-18 12:51:52 +02008778 cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
8779 cpu_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008780
Andreas Herrmann7f4588f2009-08-18 12:54:06 +02008781 sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
Andreas Herrmann87cce662009-08-18 12:54:55 +02008782 sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
Andreas Herrmann410c4082009-08-18 12:56:14 +02008783 sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
Andreas Herrmannd8173532009-08-18 12:57:03 +02008784 sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008785 }
8786
Rusty Russellabcd0832008-11-25 02:35:02 +10308787 for_each_cpu(i, cpu_map) {
Andreas Herrmann0e8e85c2009-08-18 12:57:51 +02008788 build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
Andreas Herrmanna2af04c2009-08-18 12:58:38 +02008789 build_sched_groups(&d, SD_LV_MC, cpu_map, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008790 }
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08008791
Linus Torvalds1da177e2005-04-16 15:20:36 -07008792 /* Set up physical groups */
Andreas Herrmann86548092009-08-18 12:59:28 +02008793 for (i = 0; i < nr_node_ids; i++)
8794 build_sched_groups(&d, SD_LV_CPU, cpu_map, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008795
8796#ifdef CONFIG_NUMA
8797 /* Set up node groups */
Andreas Herrmannde616e32009-08-18 13:00:13 +02008798 if (d.sd_allnodes)
8799 build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
John Hawkes9c1cfda2005-09-06 15:18:14 -07008800
Andreas Herrmann0601a882009-08-18 13:01:11 +02008801 for (i = 0; i < nr_node_ids; i++)
8802 if (build_numa_sched_groups(&d, cpu_map, i))
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07008803 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008804#endif
8805
8806 /* Calculate CPU power for physical packages and nodes */
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07008807#ifdef CONFIG_SCHED_SMT
Rusty Russellabcd0832008-11-25 02:35:02 +10308808 for_each_cpu(i, cpu_map) {
Andreas Herrmann294b0c92009-08-18 13:02:29 +02008809 sd = &per_cpu(cpu_domains, i).sd;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07008810 init_sched_groups_power(i, sd);
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07008811 }
8812#endif
8813#ifdef CONFIG_SCHED_MC
Rusty Russellabcd0832008-11-25 02:35:02 +10308814 for_each_cpu(i, cpu_map) {
Andreas Herrmann294b0c92009-08-18 13:02:29 +02008815 sd = &per_cpu(core_domains, i).sd;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07008816 init_sched_groups_power(i, sd);
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07008817 }
8818#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07008819
Rusty Russellabcd0832008-11-25 02:35:02 +10308820 for_each_cpu(i, cpu_map) {
Andreas Herrmann294b0c92009-08-18 13:02:29 +02008821 sd = &per_cpu(phys_domains, i).sd;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07008822 init_sched_groups_power(i, sd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008823 }
8824
John Hawkes9c1cfda2005-09-06 15:18:14 -07008825#ifdef CONFIG_NUMA
Mike Travis076ac2a2008-05-12 21:21:12 +02008826 for (i = 0; i < nr_node_ids; i++)
Andreas Herrmann49a02c52009-08-18 12:51:52 +02008827 init_numa_sched_groups_power(d.sched_group_nodes[i]);
John Hawkes9c1cfda2005-09-06 15:18:14 -07008828
Andreas Herrmann49a02c52009-08-18 12:51:52 +02008829 if (d.sd_allnodes) {
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08008830 struct sched_group *sg;
Siddha, Suresh Bf712c0c2006-07-30 03:02:59 -07008831
Rusty Russell96f874e2008-11-25 02:35:14 +10308832 cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
Andreas Herrmann49a02c52009-08-18 12:51:52 +02008833 d.tmpmask);
Siddha, Suresh Bf712c0c2006-07-30 03:02:59 -07008834 init_numa_sched_groups_power(sg);
8835 }
John Hawkes9c1cfda2005-09-06 15:18:14 -07008836#endif
8837
Linus Torvalds1da177e2005-04-16 15:20:36 -07008838 /* Attach the domains */
Rusty Russellabcd0832008-11-25 02:35:02 +10308839 for_each_cpu(i, cpu_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008840#ifdef CONFIG_SCHED_SMT
Rusty Russell6c99e9a2008-11-25 02:35:04 +10308841 sd = &per_cpu(cpu_domains, i).sd;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08008842#elif defined(CONFIG_SCHED_MC)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10308843 sd = &per_cpu(core_domains, i).sd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008844#else
Rusty Russell6c99e9a2008-11-25 02:35:04 +10308845 sd = &per_cpu(phys_domains, i).sd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008846#endif
Andreas Herrmann49a02c52009-08-18 12:51:52 +02008847 cpu_attach_domain(sd, d.rd, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008848 }
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07008849
Andreas Herrmann2109b992009-08-18 12:53:00 +02008850 d.sched_group_nodes = NULL; /* don't free this we still need it */
8851 __free_domain_allocs(&d, sa_tmpmask, cpu_map);
8852 return 0;
Rusty Russell3404c8d2008-11-25 02:35:03 +10308853
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07008854error:
Andreas Herrmann2109b992009-08-18 12:53:00 +02008855 __free_domain_allocs(&d, alloc_state, cpu_map);
8856 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008857}
Paul Jackson029190c2007-10-18 23:40:20 -07008858
Rusty Russell96f874e2008-11-25 02:35:14 +10308859static int build_sched_domains(const struct cpumask *cpu_map)
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09008860{
8861 return __build_sched_domains(cpu_map, NULL);
8862}
8863
Rusty Russellacc3f5d2009-11-03 14:53:40 +10308864static cpumask_var_t *doms_cur; /* current sched domains */
Paul Jackson029190c2007-10-18 23:40:20 -07008865static int ndoms_cur; /* number of sched domains in 'doms_cur' */
Ingo Molnar4285f5942008-05-16 17:47:14 +02008866static struct sched_domain_attr *dattr_cur;
8867 /* attribues of custom domains in 'doms_cur' */
Paul Jackson029190c2007-10-18 23:40:20 -07008868
8869/*
8870 * Special case: If a kmalloc of a doms_cur partition (array of
Rusty Russell42128232008-11-25 02:35:12 +10308871 * cpumask) fails, then fallback to a single sched domain,
8872 * as determined by the single cpumask fallback_doms.
Paul Jackson029190c2007-10-18 23:40:20 -07008873 */
Rusty Russell42128232008-11-25 02:35:12 +10308874static cpumask_var_t fallback_doms;
Paul Jackson029190c2007-10-18 23:40:20 -07008875
Heiko Carstensee79d1b2008-12-09 18:49:50 +01008876/*
8877 * arch_update_cpu_topology lets virtualized architectures update the
8878 * cpu core maps. It is supposed to return 1 if the topology changed
8879 * or 0 if it stayed the same.
8880 */
8881int __attribute__((weak)) arch_update_cpu_topology(void)
Heiko Carstens22e52b02008-03-12 18:31:59 +01008882{
Heiko Carstensee79d1b2008-12-09 18:49:50 +01008883 return 0;
Heiko Carstens22e52b02008-03-12 18:31:59 +01008884}
8885
Rusty Russellacc3f5d2009-11-03 14:53:40 +10308886cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
8887{
8888 int i;
8889 cpumask_var_t *doms;
8890
8891 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
8892 if (!doms)
8893 return NULL;
8894 for (i = 0; i < ndoms; i++) {
8895 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
8896 free_sched_domains(doms, i);
8897 return NULL;
8898 }
8899 }
8900 return doms;
8901}
8902
8903void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
8904{
8905 unsigned int i;
8906 for (i = 0; i < ndoms; i++)
8907 free_cpumask_var(doms[i]);
8908 kfree(doms);
8909}
8910
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07008911/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008912 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
Paul Jackson029190c2007-10-18 23:40:20 -07008913 * For now this just excludes isolated cpus, but could be used to
8914 * exclude other special cases in the future.
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07008915 */
Rusty Russell96f874e2008-11-25 02:35:14 +10308916static int arch_init_sched_domains(const struct cpumask *cpu_map)
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07008917{
Milton Miller73785472007-10-24 18:23:48 +02008918 int err;
8919
Heiko Carstens22e52b02008-03-12 18:31:59 +01008920 arch_update_cpu_topology();
Paul Jackson029190c2007-10-18 23:40:20 -07008921 ndoms_cur = 1;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10308922 doms_cur = alloc_sched_domains(ndoms_cur);
Paul Jackson029190c2007-10-18 23:40:20 -07008923 if (!doms_cur)
Rusty Russellacc3f5d2009-11-03 14:53:40 +10308924 doms_cur = &fallback_doms;
8925 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09008926 dattr_cur = NULL;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10308927 err = build_sched_domains(doms_cur[0]);
Milton Miller6382bc92007-10-15 17:00:19 +02008928 register_sched_domain_sysctl();
Milton Miller73785472007-10-24 18:23:48 +02008929
8930 return err;
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07008931}
8932
Rusty Russell96f874e2008-11-25 02:35:14 +10308933static void arch_destroy_sched_domains(const struct cpumask *cpu_map,
8934 struct cpumask *tmpmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008935{
Mike Travis7c16ec52008-04-04 18:11:11 -07008936 free_sched_groups(cpu_map, tmpmask);
John Hawkes9c1cfda2005-09-06 15:18:14 -07008937}
Linus Torvalds1da177e2005-04-16 15:20:36 -07008938
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07008939/*
8940 * Detach sched domains from a group of cpus specified in cpu_map
8941 * These cpus will now be attached to the NULL domain
8942 */
Rusty Russell96f874e2008-11-25 02:35:14 +10308943static void detach_destroy_domains(const struct cpumask *cpu_map)
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07008944{
Rusty Russell96f874e2008-11-25 02:35:14 +10308945 /* Save because hotplug lock held. */
8946 static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS);
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07008947 int i;
8948
Rusty Russellabcd0832008-11-25 02:35:02 +10308949 for_each_cpu(i, cpu_map)
Gregory Haskins57d885f2008-01-25 21:08:18 +01008950 cpu_attach_domain(NULL, &def_root_domain, i);
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07008951 synchronize_sched();
Rusty Russell96f874e2008-11-25 02:35:14 +10308952 arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask));
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07008953}
8954
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09008955/* handle null as "default" */
8956static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
8957 struct sched_domain_attr *new, int idx_new)
8958{
8959 struct sched_domain_attr tmp;
8960
8961 /* fast path */
8962 if (!new && !cur)
8963 return 1;
8964
8965 tmp = SD_ATTR_INIT;
8966 return !memcmp(cur ? (cur + idx_cur) : &tmp,
8967 new ? (new + idx_new) : &tmp,
8968 sizeof(struct sched_domain_attr));
8969}
8970
Paul Jackson029190c2007-10-18 23:40:20 -07008971/*
8972 * Partition sched domains as specified by the 'ndoms_new'
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008973 * cpumasks in the array doms_new[] of cpumasks. This compares
Paul Jackson029190c2007-10-18 23:40:20 -07008974 * doms_new[] to the current sched domain partitioning, doms_cur[].
8975 * It destroys each deleted domain and builds each new domain.
8976 *
Rusty Russellacc3f5d2009-11-03 14:53:40 +10308977 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008978 * The masks don't intersect (don't overlap.) We should setup one
8979 * sched domain for each mask. CPUs not in any of the cpumasks will
8980 * not be load balanced. If the same cpumask appears both in the
Paul Jackson029190c2007-10-18 23:40:20 -07008981 * current 'doms_cur' domains and in the new 'doms_new', we can leave
8982 * it as it is.
8983 *
Rusty Russellacc3f5d2009-11-03 14:53:40 +10308984 * The passed in 'doms_new' should be allocated using
8985 * alloc_sched_domains. This routine takes ownership of it and will
8986 * free_sched_domains it when done with it. If the caller failed the
8987 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
8988 * and partition_sched_domains() will fallback to the single partition
8989 * 'fallback_doms', it also forces the domains to be rebuilt.
Paul Jackson029190c2007-10-18 23:40:20 -07008990 *
Rusty Russell96f874e2008-11-25 02:35:14 +10308991 * If doms_new == NULL it will be replaced with cpu_online_mask.
Li Zefan700018e2008-11-18 14:02:03 +08008992 * ndoms_new == 0 is a special case for destroying existing domains,
8993 * and it will not create the default domain.
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07008994 *
Paul Jackson029190c2007-10-18 23:40:20 -07008995 * Call with hotplug lock held
8996 */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10308997void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09008998 struct sched_domain_attr *dattr_new)
Paul Jackson029190c2007-10-18 23:40:20 -07008999{
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07009000 int i, j, n;
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01009001 int new_topology;
Paul Jackson029190c2007-10-18 23:40:20 -07009002
Heiko Carstens712555e2008-04-28 11:33:07 +02009003 mutex_lock(&sched_domains_mutex);
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +01009004
Milton Miller73785472007-10-24 18:23:48 +02009005 /* always unregister in case we don't destroy any domains */
9006 unregister_sched_domain_sysctl();
9007
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01009008 /* Let architecture update cpu core mappings. */
9009 new_topology = arch_update_cpu_topology();
9010
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07009011 n = doms_new ? ndoms_new : 0;
Paul Jackson029190c2007-10-18 23:40:20 -07009012
9013 /* Destroy deleted domains */
9014 for (i = 0; i < ndoms_cur; i++) {
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01009015 for (j = 0; j < n && !new_topology; j++) {
Rusty Russellacc3f5d2009-11-03 14:53:40 +10309016 if (cpumask_equal(doms_cur[i], doms_new[j])
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09009017 && dattrs_equal(dattr_cur, i, dattr_new, j))
Paul Jackson029190c2007-10-18 23:40:20 -07009018 goto match1;
9019 }
9020 /* no match - a current sched domain not in new doms_new[] */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10309021 detach_destroy_domains(doms_cur[i]);
Paul Jackson029190c2007-10-18 23:40:20 -07009022match1:
9023 ;
9024 }
9025
Max Krasnyanskye761b772008-07-15 04:43:49 -07009026 if (doms_new == NULL) {
9027 ndoms_cur = 0;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10309028 doms_new = &fallback_doms;
9029 cpumask_andnot(doms_new[0], cpu_online_mask, cpu_isolated_map);
Li Zefanfaa2f982008-11-04 16:20:23 +08009030 WARN_ON_ONCE(dattr_new);
Max Krasnyanskye761b772008-07-15 04:43:49 -07009031 }
9032
Paul Jackson029190c2007-10-18 23:40:20 -07009033 /* Build new domains */
9034 for (i = 0; i < ndoms_new; i++) {
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01009035 for (j = 0; j < ndoms_cur && !new_topology; j++) {
Rusty Russellacc3f5d2009-11-03 14:53:40 +10309036 if (cpumask_equal(doms_new[i], doms_cur[j])
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09009037 && dattrs_equal(dattr_new, i, dattr_cur, j))
Paul Jackson029190c2007-10-18 23:40:20 -07009038 goto match2;
9039 }
9040 /* no match - add a new doms_new */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10309041 __build_sched_domains(doms_new[i],
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09009042 dattr_new ? dattr_new + i : NULL);
Paul Jackson029190c2007-10-18 23:40:20 -07009043match2:
9044 ;
9045 }
9046
9047 /* Remember the new sched domains */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10309048 if (doms_cur != &fallback_doms)
9049 free_sched_domains(doms_cur, ndoms_cur);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09009050 kfree(dattr_cur); /* kfree(NULL) is safe */
Paul Jackson029190c2007-10-18 23:40:20 -07009051 doms_cur = doms_new;
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09009052 dattr_cur = dattr_new;
Paul Jackson029190c2007-10-18 23:40:20 -07009053 ndoms_cur = ndoms_new;
Milton Miller73785472007-10-24 18:23:48 +02009054
9055 register_sched_domain_sysctl();
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +01009056
Heiko Carstens712555e2008-04-28 11:33:07 +02009057 mutex_unlock(&sched_domains_mutex);
Paul Jackson029190c2007-10-18 23:40:20 -07009058}
9059
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07009060#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
Li Zefanc70f22d2009-01-05 19:07:50 +08009061static void arch_reinit_sched_domains(void)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07009062{
Gautham R Shenoy95402b32008-01-25 21:08:02 +01009063 get_online_cpus();
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07009064
9065 /* Destroy domains first to force the rebuild */
9066 partition_sched_domains(0, NULL, NULL);
9067
Max Krasnyanskye761b772008-07-15 04:43:49 -07009068 rebuild_sched_domains();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01009069 put_online_cpus();
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07009070}
9071
9072static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
9073{
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05309074 unsigned int level = 0;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07009075
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05309076 if (sscanf(buf, "%u", &level) != 1)
9077 return -EINVAL;
9078
9079 /*
9080 * level is always be positive so don't check for
9081 * level < POWERSAVINGS_BALANCE_NONE which is 0
9082 * What happens on 0 or 1 byte write,
9083 * need to check for count as well?
9084 */
9085
9086 if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07009087 return -EINVAL;
9088
9089 if (smt)
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05309090 sched_smt_power_savings = level;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07009091 else
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05309092 sched_mc_power_savings = level;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07009093
Li Zefanc70f22d2009-01-05 19:07:50 +08009094 arch_reinit_sched_domains();
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07009095
Li Zefanc70f22d2009-01-05 19:07:50 +08009096 return count;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07009097}
9098
Adrian Bunk6707de002007-08-12 18:08:19 +02009099#ifdef CONFIG_SCHED_MC
Andi Kleenf718cd42008-07-29 22:33:52 -07009100static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
9101 char *page)
Adrian Bunk6707de002007-08-12 18:08:19 +02009102{
9103 return sprintf(page, "%u\n", sched_mc_power_savings);
9104}
Andi Kleenf718cd42008-07-29 22:33:52 -07009105static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
Adrian Bunk6707de002007-08-12 18:08:19 +02009106 const char *buf, size_t count)
9107{
9108 return sched_power_savings_store(buf, count, 0);
9109}
Andi Kleenf718cd42008-07-29 22:33:52 -07009110static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
9111 sched_mc_power_savings_show,
9112 sched_mc_power_savings_store);
Adrian Bunk6707de002007-08-12 18:08:19 +02009113#endif
9114
9115#ifdef CONFIG_SCHED_SMT
Andi Kleenf718cd42008-07-29 22:33:52 -07009116static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
9117 char *page)
Adrian Bunk6707de002007-08-12 18:08:19 +02009118{
9119 return sprintf(page, "%u\n", sched_smt_power_savings);
9120}
Andi Kleenf718cd42008-07-29 22:33:52 -07009121static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
Adrian Bunk6707de002007-08-12 18:08:19 +02009122 const char *buf, size_t count)
9123{
9124 return sched_power_savings_store(buf, count, 1);
9125}
Andi Kleenf718cd42008-07-29 22:33:52 -07009126static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
9127 sched_smt_power_savings_show,
Adrian Bunk6707de002007-08-12 18:08:19 +02009128 sched_smt_power_savings_store);
9129#endif
9130
Li Zefan39aac642009-01-05 19:18:02 +08009131int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07009132{
9133 int err = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07009134
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07009135#ifdef CONFIG_SCHED_SMT
9136 if (smt_capable())
9137 err = sysfs_create_file(&cls->kset.kobj,
9138 &attr_sched_smt_power_savings.attr);
9139#endif
9140#ifdef CONFIG_SCHED_MC
9141 if (!err && mc_capable())
9142 err = sysfs_create_file(&cls->kset.kobj,
9143 &attr_sched_mc_power_savings.attr);
9144#endif
9145 return err;
9146}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02009147#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07009148
Max Krasnyanskye761b772008-07-15 04:43:49 -07009149#ifndef CONFIG_CPUSETS
Linus Torvalds1da177e2005-04-16 15:20:36 -07009150/*
Max Krasnyanskye761b772008-07-15 04:43:49 -07009151 * Add online and remove offline CPUs from the scheduler domains.
9152 * When cpusets are enabled they take over this function.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009153 */
9154static int update_sched_domains(struct notifier_block *nfb,
9155 unsigned long action, void *hcpu)
9156{
Max Krasnyanskye761b772008-07-15 04:43:49 -07009157 switch (action) {
9158 case CPU_ONLINE:
9159 case CPU_ONLINE_FROZEN:
9160 case CPU_DEAD:
9161 case CPU_DEAD_FROZEN:
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07009162 partition_sched_domains(1, NULL, NULL);
Max Krasnyanskye761b772008-07-15 04:43:49 -07009163 return NOTIFY_OK;
9164
9165 default:
9166 return NOTIFY_DONE;
9167 }
9168}
9169#endif
9170
9171static int update_runtime(struct notifier_block *nfb,
9172 unsigned long action, void *hcpu)
9173{
Peter Zijlstra7def2be2008-06-05 14:49:58 +02009174 int cpu = (int)(long)hcpu;
9175
Linus Torvalds1da177e2005-04-16 15:20:36 -07009176 switch (action) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009177 case CPU_DOWN_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07009178 case CPU_DOWN_PREPARE_FROZEN:
Peter Zijlstra7def2be2008-06-05 14:49:58 +02009179 disable_runtime(cpu_rq(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07009180 return NOTIFY_OK;
9181
Linus Torvalds1da177e2005-04-16 15:20:36 -07009182 case CPU_DOWN_FAILED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07009183 case CPU_DOWN_FAILED_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07009184 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07009185 case CPU_ONLINE_FROZEN:
Peter Zijlstra7def2be2008-06-05 14:49:58 +02009186 enable_runtime(cpu_rq(cpu));
Max Krasnyanskye761b772008-07-15 04:43:49 -07009187 return NOTIFY_OK;
9188
Linus Torvalds1da177e2005-04-16 15:20:36 -07009189 default:
9190 return NOTIFY_DONE;
9191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009192}
Linus Torvalds1da177e2005-04-16 15:20:36 -07009193
9194void __init sched_init_smp(void)
9195{
Rusty Russelldcc30a32008-11-25 02:35:12 +10309196 cpumask_var_t non_isolated_cpus;
9197
9198 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
Yong Zhangcb5fd132009-09-14 20:20:16 +08009199 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
Nick Piggin5c1e1762006-10-03 01:14:04 -07009200
Mike Travis434d53b2008-04-04 18:11:04 -07009201#if defined(CONFIG_NUMA)
9202 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
9203 GFP_KERNEL);
9204 BUG_ON(sched_group_nodes_bycpu == NULL);
9205#endif
Gautham R Shenoy95402b32008-01-25 21:08:02 +01009206 get_online_cpus();
Heiko Carstens712555e2008-04-28 11:33:07 +02009207 mutex_lock(&sched_domains_mutex);
Rusty Russelldcc30a32008-11-25 02:35:12 +10309208 arch_init_sched_domains(cpu_online_mask);
9209 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
9210 if (cpumask_empty(non_isolated_cpus))
9211 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
Heiko Carstens712555e2008-04-28 11:33:07 +02009212 mutex_unlock(&sched_domains_mutex);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01009213 put_online_cpus();
Max Krasnyanskye761b772008-07-15 04:43:49 -07009214
9215#ifndef CONFIG_CPUSETS
Linus Torvalds1da177e2005-04-16 15:20:36 -07009216 /* XXX: Theoretical race here - CPU may be hotplugged now */
9217 hotcpu_notifier(update_sched_domains, 0);
Max Krasnyanskye761b772008-07-15 04:43:49 -07009218#endif
9219
9220 /* RT runtime code needs to handle some hotplug events */
9221 hotcpu_notifier(update_runtime, 0);
9222
Peter Zijlstrab328ca12008-04-29 10:02:46 +02009223 init_hrtick();
Nick Piggin5c1e1762006-10-03 01:14:04 -07009224
9225 /* Move init over to a non-isolated CPU */
Rusty Russelldcc30a32008-11-25 02:35:12 +10309226 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
Nick Piggin5c1e1762006-10-03 01:14:04 -07009227 BUG();
Ingo Molnar19978ca2007-11-09 22:39:38 +01009228 sched_init_granularity();
Rusty Russelldcc30a32008-11-25 02:35:12 +10309229 free_cpumask_var(non_isolated_cpus);
Rusty Russell42128232008-11-25 02:35:12 +10309230
Rusty Russell0e3900e2008-11-25 02:35:13 +10309231 init_sched_rt_class();
Linus Torvalds1da177e2005-04-16 15:20:36 -07009232}
9233#else
9234void __init sched_init_smp(void)
9235{
Ingo Molnar19978ca2007-11-09 22:39:38 +01009236 sched_init_granularity();
Linus Torvalds1da177e2005-04-16 15:20:36 -07009237}
9238#endif /* CONFIG_SMP */
9239
Arun R Bharadwajcd1bb942009-04-16 12:15:34 +05309240const_debug unsigned int sysctl_timer_migration = 1;
9241
Linus Torvalds1da177e2005-04-16 15:20:36 -07009242int in_sched_functions(unsigned long addr)
9243{
Linus Torvalds1da177e2005-04-16 15:20:36 -07009244 return in_lock_functions(addr) ||
9245 (addr >= (unsigned long)__sched_text_start
9246 && addr < (unsigned long)__sched_text_end);
9247}
9248
Alexey Dobriyana9957442007-10-15 17:00:13 +02009249static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02009250{
9251 cfs_rq->tasks_timeline = RB_ROOT;
Peter Zijlstra4a55bd52008-04-19 19:45:00 +02009252 INIT_LIST_HEAD(&cfs_rq->tasks);
Ingo Molnardd41f592007-07-09 18:51:59 +02009253#ifdef CONFIG_FAIR_GROUP_SCHED
9254 cfs_rq->rq = rq;
9255#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02009256 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
Ingo Molnardd41f592007-07-09 18:51:59 +02009257}
9258
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01009259static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
9260{
9261 struct rt_prio_array *array;
9262 int i;
9263
9264 array = &rt_rq->active;
9265 for (i = 0; i < MAX_RT_PRIO; i++) {
9266 INIT_LIST_HEAD(array->queue + i);
9267 __clear_bit(i, array->bitmap);
9268 }
9269 /* delimiter for bitsearch: */
9270 __set_bit(MAX_RT_PRIO, array->bitmap);
9271
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009272#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskinse864c492008-12-29 09:39:49 -05009273 rt_rq->highest_prio.curr = MAX_RT_PRIO;
Gregory Haskins398a1532009-01-14 09:10:04 -05009274#ifdef CONFIG_SMP
Gregory Haskinse864c492008-12-29 09:39:49 -05009275 rt_rq->highest_prio.next = MAX_RT_PRIO;
Peter Zijlstra48d5e252008-01-25 21:08:31 +01009276#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01009277#endif
9278#ifdef CONFIG_SMP
9279 rt_rq->rt_nr_migratory = 0;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01009280 rt_rq->overloaded = 0;
Fabio Checconic20b08e2009-06-15 20:56:38 +02009281 plist_head_init(&rt_rq->pushable_tasks, &rq->lock);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01009282#endif
9283
9284 rt_rq->rt_time = 0;
9285 rt_rq->rt_throttled = 0;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02009286 rt_rq->rt_runtime = 0;
9287 spin_lock_init(&rt_rq->rt_runtime_lock);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009288
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009289#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +01009290 rt_rq->rt_nr_boosted = 0;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009291 rt_rq->rq = rq;
9292#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01009293}
9294
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009295#ifdef CONFIG_FAIR_GROUP_SCHED
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009296static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
9297 struct sched_entity *se, int cpu, int add,
9298 struct sched_entity *parent)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009299{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009300 struct rq *rq = cpu_rq(cpu);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009301 tg->cfs_rq[cpu] = cfs_rq;
9302 init_cfs_rq(cfs_rq, rq);
9303 cfs_rq->tg = tg;
9304 if (add)
9305 list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
9306
9307 tg->se[cpu] = se;
Dhaval Giani354d60c2008-04-19 19:44:59 +02009308 /* se could be NULL for init_task_group */
9309 if (!se)
9310 return;
9311
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009312 if (!parent)
9313 se->cfs_rq = &rq->cfs;
9314 else
9315 se->cfs_rq = parent->my_q;
9316
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009317 se->my_q = cfs_rq;
9318 se->load.weight = tg->shares;
Peter Zijlstrae05510d2008-05-05 23:56:17 +02009319 se->load.inv_weight = 0;
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009320 se->parent = parent;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009321}
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009322#endif
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009323
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009324#ifdef CONFIG_RT_GROUP_SCHED
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009325static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
9326 struct sched_rt_entity *rt_se, int cpu, int add,
9327 struct sched_rt_entity *parent)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009328{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009329 struct rq *rq = cpu_rq(cpu);
9330
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009331 tg->rt_rq[cpu] = rt_rq;
9332 init_rt_rq(rt_rq, rq);
9333 rt_rq->tg = tg;
9334 rt_rq->rt_se = rt_se;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02009335 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009336 if (add)
9337 list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
9338
9339 tg->rt_se[cpu] = rt_se;
Dhaval Giani354d60c2008-04-19 19:44:59 +02009340 if (!rt_se)
9341 return;
9342
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009343 if (!parent)
9344 rt_se->rt_rq = &rq->rt;
9345 else
9346 rt_se->rt_rq = parent->my_q;
9347
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009348 rt_se->my_q = rt_rq;
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009349 rt_se->parent = parent;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009350 INIT_LIST_HEAD(&rt_se->run_list);
9351}
9352#endif
9353
Linus Torvalds1da177e2005-04-16 15:20:36 -07009354void __init sched_init(void)
9355{
Ingo Molnardd41f592007-07-09 18:51:59 +02009356 int i, j;
Mike Travis434d53b2008-04-04 18:11:04 -07009357 unsigned long alloc_size = 0, ptr;
9358
9359#ifdef CONFIG_FAIR_GROUP_SCHED
9360 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
9361#endif
9362#ifdef CONFIG_RT_GROUP_SCHED
9363 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
9364#endif
Peter Zijlstraeff766a2008-04-19 19:45:00 +02009365#ifdef CONFIG_USER_SCHED
9366 alloc_size *= 2;
9367#endif
Rusty Russelldf7c8e82009-03-19 15:22:20 +10309368#ifdef CONFIG_CPUMASK_OFFSTACK
Rusty Russell8c083f02009-03-19 15:22:20 +10309369 alloc_size += num_possible_cpus() * cpumask_size();
Rusty Russelldf7c8e82009-03-19 15:22:20 +10309370#endif
Mike Travis434d53b2008-04-04 18:11:04 -07009371 if (alloc_size) {
Pekka Enberg36b7b6d2009-06-10 23:42:36 +03009372 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
Mike Travis434d53b2008-04-04 18:11:04 -07009373
9374#ifdef CONFIG_FAIR_GROUP_SCHED
9375 init_task_group.se = (struct sched_entity **)ptr;
9376 ptr += nr_cpu_ids * sizeof(void **);
9377
9378 init_task_group.cfs_rq = (struct cfs_rq **)ptr;
9379 ptr += nr_cpu_ids * sizeof(void **);
Peter Zijlstraeff766a2008-04-19 19:45:00 +02009380
9381#ifdef CONFIG_USER_SCHED
9382 root_task_group.se = (struct sched_entity **)ptr;
9383 ptr += nr_cpu_ids * sizeof(void **);
9384
9385 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
9386 ptr += nr_cpu_ids * sizeof(void **);
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02009387#endif /* CONFIG_USER_SCHED */
9388#endif /* CONFIG_FAIR_GROUP_SCHED */
Mike Travis434d53b2008-04-04 18:11:04 -07009389#ifdef CONFIG_RT_GROUP_SCHED
9390 init_task_group.rt_se = (struct sched_rt_entity **)ptr;
9391 ptr += nr_cpu_ids * sizeof(void **);
9392
9393 init_task_group.rt_rq = (struct rt_rq **)ptr;
Peter Zijlstraeff766a2008-04-19 19:45:00 +02009394 ptr += nr_cpu_ids * sizeof(void **);
9395
9396#ifdef CONFIG_USER_SCHED
9397 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
9398 ptr += nr_cpu_ids * sizeof(void **);
9399
9400 root_task_group.rt_rq = (struct rt_rq **)ptr;
9401 ptr += nr_cpu_ids * sizeof(void **);
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02009402#endif /* CONFIG_USER_SCHED */
9403#endif /* CONFIG_RT_GROUP_SCHED */
Rusty Russelldf7c8e82009-03-19 15:22:20 +10309404#ifdef CONFIG_CPUMASK_OFFSTACK
9405 for_each_possible_cpu(i) {
9406 per_cpu(load_balance_tmpmask, i) = (void *)ptr;
9407 ptr += cpumask_size();
9408 }
9409#endif /* CONFIG_CPUMASK_OFFSTACK */
Mike Travis434d53b2008-04-04 18:11:04 -07009410 }
Ingo Molnardd41f592007-07-09 18:51:59 +02009411
Gregory Haskins57d885f2008-01-25 21:08:18 +01009412#ifdef CONFIG_SMP
9413 init_defrootdomain();
9414#endif
9415
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02009416 init_rt_bandwidth(&def_rt_bandwidth,
9417 global_rt_period(), global_rt_runtime());
9418
9419#ifdef CONFIG_RT_GROUP_SCHED
9420 init_rt_bandwidth(&init_task_group.rt_bandwidth,
9421 global_rt_period(), global_rt_runtime());
Peter Zijlstraeff766a2008-04-19 19:45:00 +02009422#ifdef CONFIG_USER_SCHED
9423 init_rt_bandwidth(&root_task_group.rt_bandwidth,
9424 global_rt_period(), RUNTIME_INF);
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02009425#endif /* CONFIG_USER_SCHED */
9426#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02009427
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009428#ifdef CONFIG_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009429 list_add(&init_task_group.list, &task_groups);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02009430 INIT_LIST_HEAD(&init_task_group.children);
9431
9432#ifdef CONFIG_USER_SCHED
9433 INIT_LIST_HEAD(&root_task_group.children);
9434 init_task_group.parent = &root_task_group;
9435 list_add(&init_task_group.siblings, &root_task_group.children);
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02009436#endif /* CONFIG_USER_SCHED */
9437#endif /* CONFIG_GROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009438
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08009439 for_each_possible_cpu(i) {
Ingo Molnar70b97a72006-07-03 00:25:42 -07009440 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009441
9442 rq = cpu_rq(i);
9443 spin_lock_init(&rq->lock);
Nick Piggin78979862005-06-25 14:57:13 -07009444 rq->nr_running = 0;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02009445 rq->calc_load_active = 0;
9446 rq->calc_load_update = jiffies + LOAD_FREQ;
Ingo Molnardd41f592007-07-09 18:51:59 +02009447 init_cfs_rq(&rq->cfs, rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01009448 init_rt_rq(&rq->rt, rq);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009449#ifdef CONFIG_FAIR_GROUP_SCHED
9450 init_task_group.shares = init_task_group_load;
9451 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
Dhaval Giani354d60c2008-04-19 19:44:59 +02009452#ifdef CONFIG_CGROUP_SCHED
9453 /*
9454 * How much cpu bandwidth does init_task_group get?
9455 *
9456 * In case of task-groups formed thr' the cgroup filesystem, it
9457 * gets 100% of the cpu resources in the system. This overall
9458 * system cpu resource is divided among the tasks of
9459 * init_task_group and its child task-groups in a fair manner,
9460 * based on each entity's (task or task-group's) weight
9461 * (se->load.weight).
9462 *
9463 * In other words, if init_task_group has 10 tasks of weight
9464 * 1024) and two child groups A0 and A1 (of weight 1024 each),
9465 * then A0's share of the cpu resource is:
9466 *
Ingo Molnar0d905bc2009-05-04 19:13:30 +02009467 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
Dhaval Giani354d60c2008-04-19 19:44:59 +02009468 *
9469 * We achieve this by letting init_task_group's tasks sit
9470 * directly in rq->cfs (i.e init_task_group->se[] = NULL).
9471 */
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009472 init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
Dhaval Giani354d60c2008-04-19 19:44:59 +02009473#elif defined CONFIG_USER_SCHED
Peter Zijlstraeff766a2008-04-19 19:45:00 +02009474 root_task_group.shares = NICE_0_LOAD;
9475 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL);
Dhaval Giani354d60c2008-04-19 19:44:59 +02009476 /*
9477 * In case of task-groups formed thr' the user id of tasks,
9478 * init_task_group represents tasks belonging to root user.
9479 * Hence it forms a sibling of all subsequent groups formed.
9480 * In this case, init_task_group gets only a fraction of overall
9481 * system cpu resource, based on the weight assigned to root
9482 * user's cpu share (INIT_TASK_GROUP_LOAD). This is accomplished
9483 * by letting tasks of init_task_group sit in a separate cfs_rq
Anirban Sinha84e9dab2009-08-28 22:40:43 -07009484 * (init_tg_cfs_rq) and having one entity represent this group of
Dhaval Giani354d60c2008-04-19 19:44:59 +02009485 * tasks in rq->cfs (i.e init_task_group->se[] != NULL).
9486 */
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009487 init_tg_cfs_entry(&init_task_group,
Anirban Sinha84e9dab2009-08-28 22:40:43 -07009488 &per_cpu(init_tg_cfs_rq, i),
Peter Zijlstraeff766a2008-04-19 19:45:00 +02009489 &per_cpu(init_sched_entity, i), i, 1,
9490 root_task_group.se[i]);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009491
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009492#endif
Dhaval Giani354d60c2008-04-19 19:44:59 +02009493#endif /* CONFIG_FAIR_GROUP_SCHED */
9494
9495 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009496#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009497 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
Dhaval Giani354d60c2008-04-19 19:44:59 +02009498#ifdef CONFIG_CGROUP_SCHED
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009499 init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
Dhaval Giani354d60c2008-04-19 19:44:59 +02009500#elif defined CONFIG_USER_SCHED
Peter Zijlstraeff766a2008-04-19 19:45:00 +02009501 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009502 init_tg_rt_entry(&init_task_group,
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009503 &per_cpu(init_rt_rq, i),
Peter Zijlstraeff766a2008-04-19 19:45:00 +02009504 &per_cpu(init_sched_rt_entity, i), i, 1,
9505 root_task_group.rt_se[i]);
Dhaval Giani354d60c2008-04-19 19:44:59 +02009506#endif
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009507#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07009508
Ingo Molnardd41f592007-07-09 18:51:59 +02009509 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
9510 rq->cpu_load[j] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009511#ifdef CONFIG_SMP
Nick Piggin41c7ce92005-06-25 14:57:24 -07009512 rq->sd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01009513 rq->rd = NULL;
Gregory Haskins3f029d32009-07-29 11:08:47 -04009514 rq->post_schedule = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009515 rq->active_balance = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02009516 rq->next_balance = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009517 rq->push_cpu = 0;
Christoph Lameter0a2966b2006-09-25 23:30:51 -07009518 rq->cpu = i;
Gregory Haskins1f11eb62008-06-04 15:04:05 -04009519 rq->online = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009520 rq->migration_thread = NULL;
Mike Galbraitheae0c9d2009-11-10 03:50:02 +01009521 rq->idle_stamp = 0;
9522 rq->avg_idle = 2*sysctl_sched_migration_cost;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009523 INIT_LIST_HEAD(&rq->migration_queue);
Gregory Haskinsdc938522008-01-25 21:08:26 +01009524 rq_attach_root(rq, &def_root_domain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009525#endif
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01009526 init_rq_hrtick(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009527 atomic_set(&rq->nr_iowait, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009528 }
9529
Peter Williams2dd73a42006-06-27 02:54:34 -07009530 set_load_weight(&init_task);
Heiko Carstensb50f60c2006-07-30 03:03:52 -07009531
Avi Kivitye107be32007-07-26 13:40:43 +02009532#ifdef CONFIG_PREEMPT_NOTIFIERS
9533 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
9534#endif
9535
Christoph Lameterc9819f42006-12-10 02:20:25 -08009536#ifdef CONFIG_SMP
Carlos R. Mafra962cf362008-05-15 11:15:37 -03009537 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
Christoph Lameterc9819f42006-12-10 02:20:25 -08009538#endif
9539
Heiko Carstensb50f60c2006-07-30 03:03:52 -07009540#ifdef CONFIG_RT_MUTEXES
9541 plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
9542#endif
9543
Linus Torvalds1da177e2005-04-16 15:20:36 -07009544 /*
9545 * The boot idle thread does lazy MMU switching as well:
9546 */
9547 atomic_inc(&init_mm.mm_count);
9548 enter_lazy_tlb(&init_mm, current);
9549
9550 /*
9551 * Make us the idle thread. Technically, schedule() should not be
9552 * called from this thread, however somewhere below it might be,
9553 * but because we are the idle thread, we just pick up running again
9554 * when this runqueue becomes "idle".
9555 */
9556 init_idle(current, smp_processor_id());
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02009557
9558 calc_load_update = jiffies + LOAD_FREQ;
9559
Ingo Molnardd41f592007-07-09 18:51:59 +02009560 /*
9561 * During early bootup we pretend to be a normal task:
9562 */
9563 current->sched_class = &fair_sched_class;
Ingo Molnar6892b752008-02-13 14:02:36 +01009564
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10309565 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
Pekka Enberg4bdddf82009-06-11 08:35:27 +03009566 alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
Rusty Russellbf4d83f2008-11-25 09:57:51 +10309567#ifdef CONFIG_SMP
Rusty Russell7d1e6a92008-11-25 02:35:09 +10309568#ifdef CONFIG_NO_HZ
Pekka Enberg4bdddf82009-06-11 08:35:27 +03009569 alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
9570 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
Rusty Russell7d1e6a92008-11-25 02:35:09 +10309571#endif
Pekka Enberg4bdddf82009-06-11 08:35:27 +03009572 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
Rusty Russellbf4d83f2008-11-25 09:57:51 +10309573#endif /* SMP */
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10309574
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009575 perf_event_init();
Ingo Molnar0d905bc2009-05-04 19:13:30 +02009576
Ingo Molnar6892b752008-02-13 14:02:36 +01009577 scheduler_running = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009578}
9579
9580#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02009581static inline int preempt_count_equals(int preempt_offset)
9582{
9583 int nested = preempt_count() & ~PREEMPT_ACTIVE;
9584
9585 return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
9586}
9587
9588void __might_sleep(char *file, int line, int preempt_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009589{
Ingo Molnar48f24c42006-07-03 00:25:40 -07009590#ifdef in_atomic
Linus Torvalds1da177e2005-04-16 15:20:36 -07009591 static unsigned long prev_jiffy; /* ratelimiting */
9592
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02009593 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
9594 system_state != SYSTEM_RUNNING || oops_in_progress)
Ingo Molnaraef745f2008-08-28 11:34:43 +02009595 return;
9596 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
9597 return;
9598 prev_jiffy = jiffies;
9599
9600 printk(KERN_ERR
9601 "BUG: sleeping function called from invalid context at %s:%d\n",
9602 file, line);
9603 printk(KERN_ERR
9604 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
9605 in_atomic(), irqs_disabled(),
9606 current->pid, current->comm);
9607
9608 debug_show_held_locks(current);
9609 if (irqs_disabled())
9610 print_irqtrace_events(current);
9611 dump_stack();
Linus Torvalds1da177e2005-04-16 15:20:36 -07009612#endif
9613}
9614EXPORT_SYMBOL(__might_sleep);
9615#endif
9616
9617#ifdef CONFIG_MAGIC_SYSRQ
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02009618static void normalize_task(struct rq *rq, struct task_struct *p)
9619{
9620 int on_rq;
Peter Zijlstra3e51f332008-05-03 18:29:28 +02009621
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02009622 update_rq_clock(rq);
9623 on_rq = p->se.on_rq;
9624 if (on_rq)
9625 deactivate_task(rq, p, 0);
9626 __setscheduler(rq, p, SCHED_NORMAL, 0);
9627 if (on_rq) {
9628 activate_task(rq, p, 0);
9629 resched_task(rq->curr);
9630 }
9631}
9632
Linus Torvalds1da177e2005-04-16 15:20:36 -07009633void normalize_rt_tasks(void)
9634{
Ingo Molnara0f98a12007-06-17 18:37:45 +02009635 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009636 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07009637 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009638
Peter Zijlstra4cf5d772008-02-13 15:45:39 +01009639 read_lock_irqsave(&tasklist_lock, flags);
Ingo Molnara0f98a12007-06-17 18:37:45 +02009640 do_each_thread(g, p) {
Ingo Molnar178be792007-10-15 17:00:18 +02009641 /*
9642 * Only normalize user tasks:
9643 */
9644 if (!p->mm)
9645 continue;
9646
Ingo Molnardd41f592007-07-09 18:51:59 +02009647 p->se.exec_start = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02009648#ifdef CONFIG_SCHEDSTATS
9649 p->se.wait_start = 0;
9650 p->se.sleep_start = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02009651 p->se.block_start = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02009652#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02009653
9654 if (!rt_task(p)) {
9655 /*
9656 * Renice negative nice level userspace
9657 * tasks back to 0:
9658 */
9659 if (TASK_NICE(p) < 0 && p->mm)
9660 set_user_nice(p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009661 continue;
Ingo Molnardd41f592007-07-09 18:51:59 +02009662 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009663
Peter Zijlstra4cf5d772008-02-13 15:45:39 +01009664 spin_lock(&p->pi_lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -07009665 rq = __task_rq_lock(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009666
Ingo Molnar178be792007-10-15 17:00:18 +02009667 normalize_task(rq, p);
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02009668
Ingo Molnarb29739f2006-06-27 02:54:51 -07009669 __task_rq_unlock(rq);
Peter Zijlstra4cf5d772008-02-13 15:45:39 +01009670 spin_unlock(&p->pi_lock);
Ingo Molnara0f98a12007-06-17 18:37:45 +02009671 } while_each_thread(g, p);
9672
Peter Zijlstra4cf5d772008-02-13 15:45:39 +01009673 read_unlock_irqrestore(&tasklist_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009674}
9675
9676#endif /* CONFIG_MAGIC_SYSRQ */
Linus Torvalds1df5c102005-09-12 07:59:21 -07009677
9678#ifdef CONFIG_IA64
9679/*
9680 * These functions are only useful for the IA64 MCA handling.
9681 *
9682 * They can only be called when the whole system has been
9683 * stopped - every CPU needs to be quiescent, and no scheduling
9684 * activity can take place. Using them for anything else would
9685 * be a serious bug, and as a result, they aren't even visible
9686 * under any other configuration.
9687 */
9688
9689/**
9690 * curr_task - return the current task for a given cpu.
9691 * @cpu: the processor in question.
9692 *
9693 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
9694 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07009695struct task_struct *curr_task(int cpu)
Linus Torvalds1df5c102005-09-12 07:59:21 -07009696{
9697 return cpu_curr(cpu);
9698}
9699
9700/**
9701 * set_curr_task - set the current task for a given cpu.
9702 * @cpu: the processor in question.
9703 * @p: the task pointer to set.
9704 *
9705 * Description: This function must only be used when non-maskable interrupts
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01009706 * are serviced on a separate stack. It allows the architecture to switch the
9707 * notion of the current task on a cpu in a non-blocking manner. This function
Linus Torvalds1df5c102005-09-12 07:59:21 -07009708 * must be called with all CPU's synchronized, and interrupts disabled, the
9709 * and caller must save the original value of the current task (see
9710 * curr_task() above) and restore that value before reenabling interrupts and
9711 * re-starting the system.
9712 *
9713 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
9714 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07009715void set_curr_task(int cpu, struct task_struct *p)
Linus Torvalds1df5c102005-09-12 07:59:21 -07009716{
9717 cpu_curr(cpu) = p;
9718}
9719
9720#endif
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009721
Peter Zijlstrabccbe082008-02-13 15:45:40 +01009722#ifdef CONFIG_FAIR_GROUP_SCHED
9723static void free_fair_sched_group(struct task_group *tg)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009724{
9725 int i;
9726
9727 for_each_possible_cpu(i) {
9728 if (tg->cfs_rq)
9729 kfree(tg->cfs_rq[i]);
9730 if (tg->se)
9731 kfree(tg->se[i]);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009732 }
9733
9734 kfree(tg->cfs_rq);
9735 kfree(tg->se);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009736}
9737
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009738static
9739int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009740{
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009741 struct cfs_rq *cfs_rq;
Li Zefaneab17222008-10-29 17:03:22 +08009742 struct sched_entity *se;
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02009743 struct rq *rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009744 int i;
9745
Mike Travis434d53b2008-04-04 18:11:04 -07009746 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009747 if (!tg->cfs_rq)
9748 goto err;
Mike Travis434d53b2008-04-04 18:11:04 -07009749 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009750 if (!tg->se)
9751 goto err;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009752
9753 tg->shares = NICE_0_LOAD;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009754
9755 for_each_possible_cpu(i) {
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02009756 rq = cpu_rq(i);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009757
Li Zefaneab17222008-10-29 17:03:22 +08009758 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
9759 GFP_KERNEL, cpu_to_node(i));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009760 if (!cfs_rq)
9761 goto err;
9762
Li Zefaneab17222008-10-29 17:03:22 +08009763 se = kzalloc_node(sizeof(struct sched_entity),
9764 GFP_KERNEL, cpu_to_node(i));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009765 if (!se)
9766 goto err;
9767
Li Zefaneab17222008-10-29 17:03:22 +08009768 init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01009769 }
9770
9771 return 1;
9772
9773 err:
9774 return 0;
9775}
9776
9777static inline void register_fair_sched_group(struct task_group *tg, int cpu)
9778{
9779 list_add_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list,
9780 &cpu_rq(cpu)->leaf_cfs_rq_list);
9781}
9782
9783static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
9784{
9785 list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list);
9786}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02009787#else /* !CONFG_FAIR_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01009788static inline void free_fair_sched_group(struct task_group *tg)
9789{
9790}
9791
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009792static inline
9793int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01009794{
9795 return 1;
9796}
9797
9798static inline void register_fair_sched_group(struct task_group *tg, int cpu)
9799{
9800}
9801
9802static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
9803{
9804}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02009805#endif /* CONFIG_FAIR_GROUP_SCHED */
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009806
9807#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstrabccbe082008-02-13 15:45:40 +01009808static void free_rt_sched_group(struct task_group *tg)
9809{
9810 int i;
9811
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02009812 destroy_rt_bandwidth(&tg->rt_bandwidth);
9813
Peter Zijlstrabccbe082008-02-13 15:45:40 +01009814 for_each_possible_cpu(i) {
9815 if (tg->rt_rq)
9816 kfree(tg->rt_rq[i]);
9817 if (tg->rt_se)
9818 kfree(tg->rt_se[i]);
9819 }
9820
9821 kfree(tg->rt_rq);
9822 kfree(tg->rt_se);
9823}
9824
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009825static
9826int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01009827{
9828 struct rt_rq *rt_rq;
Li Zefaneab17222008-10-29 17:03:22 +08009829 struct sched_rt_entity *rt_se;
Peter Zijlstrabccbe082008-02-13 15:45:40 +01009830 struct rq *rq;
9831 int i;
9832
Mike Travis434d53b2008-04-04 18:11:04 -07009833 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01009834 if (!tg->rt_rq)
9835 goto err;
Mike Travis434d53b2008-04-04 18:11:04 -07009836 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01009837 if (!tg->rt_se)
9838 goto err;
9839
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02009840 init_rt_bandwidth(&tg->rt_bandwidth,
9841 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01009842
9843 for_each_possible_cpu(i) {
9844 rq = cpu_rq(i);
9845
Li Zefaneab17222008-10-29 17:03:22 +08009846 rt_rq = kzalloc_node(sizeof(struct rt_rq),
9847 GFP_KERNEL, cpu_to_node(i));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009848 if (!rt_rq)
9849 goto err;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009850
Li Zefaneab17222008-10-29 17:03:22 +08009851 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
9852 GFP_KERNEL, cpu_to_node(i));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009853 if (!rt_se)
9854 goto err;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009855
Li Zefaneab17222008-10-29 17:03:22 +08009856 init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009857 }
9858
Peter Zijlstrabccbe082008-02-13 15:45:40 +01009859 return 1;
9860
9861 err:
9862 return 0;
9863}
9864
9865static inline void register_rt_sched_group(struct task_group *tg, int cpu)
9866{
9867 list_add_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list,
9868 &cpu_rq(cpu)->leaf_rt_rq_list);
9869}
9870
9871static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
9872{
9873 list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list);
9874}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02009875#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01009876static inline void free_rt_sched_group(struct task_group *tg)
9877{
9878}
9879
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009880static inline
9881int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01009882{
9883 return 1;
9884}
9885
9886static inline void register_rt_sched_group(struct task_group *tg, int cpu)
9887{
9888}
9889
9890static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
9891{
9892}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02009893#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01009894
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02009895#ifdef CONFIG_GROUP_SCHED
Peter Zijlstrabccbe082008-02-13 15:45:40 +01009896static void free_sched_group(struct task_group *tg)
9897{
9898 free_fair_sched_group(tg);
9899 free_rt_sched_group(tg);
9900 kfree(tg);
9901}
9902
9903/* allocate runqueue etc for a new task group */
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009904struct task_group *sched_create_group(struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01009905{
9906 struct task_group *tg;
9907 unsigned long flags;
9908 int i;
9909
9910 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
9911 if (!tg)
9912 return ERR_PTR(-ENOMEM);
9913
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009914 if (!alloc_fair_sched_group(tg, parent))
Peter Zijlstrabccbe082008-02-13 15:45:40 +01009915 goto err;
9916
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009917 if (!alloc_rt_sched_group(tg, parent))
Peter Zijlstrabccbe082008-02-13 15:45:40 +01009918 goto err;
9919
Peter Zijlstra8ed36992008-02-13 15:45:39 +01009920 spin_lock_irqsave(&task_group_lock, flags);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02009921 for_each_possible_cpu(i) {
Peter Zijlstrabccbe082008-02-13 15:45:40 +01009922 register_fair_sched_group(tg, i);
9923 register_rt_sched_group(tg, i);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02009924 }
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009925 list_add_rcu(&tg->list, &task_groups);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02009926
9927 WARN_ON(!parent); /* root should already exist */
9928
9929 tg->parent = parent;
Peter Zijlstraf473aa52008-04-19 19:45:00 +02009930 INIT_LIST_HEAD(&tg->children);
Zhang, Yanmin09f27242030-08-14 15:56:40 +08009931 list_add_rcu(&tg->siblings, &parent->children);
Peter Zijlstra8ed36992008-02-13 15:45:39 +01009932 spin_unlock_irqrestore(&task_group_lock, flags);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009933
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02009934 return tg;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009935
9936err:
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009937 free_sched_group(tg);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009938 return ERR_PTR(-ENOMEM);
9939}
9940
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02009941/* rcu callback to free various structures associated with a task group */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009942static void free_sched_group_rcu(struct rcu_head *rhp)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009943{
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009944 /* now it should be safe to free those cfs_rqs */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009945 free_sched_group(container_of(rhp, struct task_group, rcu));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009946}
9947
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02009948/* Destroy runqueue etc associated with a task group */
Ingo Molnar4cf86d72007-10-15 17:00:14 +02009949void sched_destroy_group(struct task_group *tg)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009950{
Peter Zijlstra8ed36992008-02-13 15:45:39 +01009951 unsigned long flags;
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02009952 int i;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009953
Peter Zijlstra8ed36992008-02-13 15:45:39 +01009954 spin_lock_irqsave(&task_group_lock, flags);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02009955 for_each_possible_cpu(i) {
Peter Zijlstrabccbe082008-02-13 15:45:40 +01009956 unregister_fair_sched_group(tg, i);
9957 unregister_rt_sched_group(tg, i);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02009958 }
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009959 list_del_rcu(&tg->list);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02009960 list_del_rcu(&tg->siblings);
Peter Zijlstra8ed36992008-02-13 15:45:39 +01009961 spin_unlock_irqrestore(&task_group_lock, flags);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02009962
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02009963 /* wait for possible concurrent references to cfs_rqs complete */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009964 call_rcu(&tg->rcu, free_sched_group_rcu);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009965}
9966
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02009967/* change task's runqueue when it moves between groups.
Ingo Molnar3a252012007-10-15 17:00:12 +02009968 * The caller of this function should have put the task in its new group
9969 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
9970 * reflect its new group.
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02009971 */
9972void sched_move_task(struct task_struct *tsk)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009973{
9974 int on_rq, running;
9975 unsigned long flags;
9976 struct rq *rq;
9977
9978 rq = task_rq_lock(tsk, &flags);
9979
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009980 update_rq_clock(rq);
9981
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01009982 running = task_current(rq, tsk);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009983 on_rq = tsk->se.on_rq;
9984
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07009985 if (on_rq)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009986 dequeue_task(rq, tsk, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07009987 if (unlikely(running))
9988 tsk->sched_class->put_prev_task(rq, tsk);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009989
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009990 set_task_rq(tsk, task_cpu(tsk));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02009991
Peter Zijlstra810b3812008-02-29 15:21:01 -05009992#ifdef CONFIG_FAIR_GROUP_SCHED
9993 if (tsk->sched_class->moved_group)
9994 tsk->sched_class->moved_group(tsk);
9995#endif
9996
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07009997 if (unlikely(running))
9998 tsk->sched_class->set_curr_task(rq);
9999 if (on_rq)
Dmitry Adamushko7074bad2007-10-15 17:00:07 +020010000 enqueue_task(rq, tsk, 0);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +020010001
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +020010002 task_rq_unlock(rq, &flags);
10003}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +020010004#endif /* CONFIG_GROUP_SCHED */
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +020010005
Peter Zijlstra052f1dc2008-02-13 15:45:40 +010010006#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrac09595f2008-06-27 13:41:14 +020010007static void __set_se_shares(struct sched_entity *se, unsigned long shares)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +020010008{
10009 struct cfs_rq *cfs_rq = se->cfs_rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +020010010 int on_rq;
10011
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +020010012 on_rq = se->on_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +010010013 if (on_rq)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +020010014 dequeue_entity(cfs_rq, se, 0);
10015
10016 se->load.weight = shares;
Peter Zijlstrae05510d2008-05-05 23:56:17 +020010017 se->load.inv_weight = 0;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +020010018
Peter Zijlstra62fb1852008-02-25 17:34:02 +010010019 if (on_rq)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +020010020 enqueue_entity(cfs_rq, se, 0);
Peter Zijlstrac09595f2008-06-27 13:41:14 +020010021}
Peter Zijlstra62fb1852008-02-25 17:34:02 +010010022
Peter Zijlstrac09595f2008-06-27 13:41:14 +020010023static void set_se_shares(struct sched_entity *se, unsigned long shares)
10024{
10025 struct cfs_rq *cfs_rq = se->cfs_rq;
10026 struct rq *rq = cfs_rq->rq;
10027 unsigned long flags;
10028
10029 spin_lock_irqsave(&rq->lock, flags);
10030 __set_se_shares(se, shares);
10031 spin_unlock_irqrestore(&rq->lock, flags);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +020010032}
10033
Peter Zijlstra8ed36992008-02-13 15:45:39 +010010034static DEFINE_MUTEX(shares_mutex);
10035
Ingo Molnar4cf86d72007-10-15 17:00:14 +020010036int sched_group_set_shares(struct task_group *tg, unsigned long shares)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +020010037{
10038 int i;
Peter Zijlstra8ed36992008-02-13 15:45:39 +010010039 unsigned long flags;
Ingo Molnarc61935f2008-01-22 11:24:58 +010010040
Peter Zijlstra62fb1852008-02-25 17:34:02 +010010041 /*
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +020010042 * We can't change the weight of the root cgroup.
10043 */
10044 if (!tg->se[0])
10045 return -EINVAL;
10046
Peter Zijlstra18d95a22008-04-19 19:45:00 +020010047 if (shares < MIN_SHARES)
10048 shares = MIN_SHARES;
Miao Xiecb4ad1f2008-04-28 12:54:56 +080010049 else if (shares > MAX_SHARES)
10050 shares = MAX_SHARES;
Peter Zijlstra62fb1852008-02-25 17:34:02 +010010051
Peter Zijlstra8ed36992008-02-13 15:45:39 +010010052 mutex_lock(&shares_mutex);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +020010053 if (tg->shares == shares)
Dhaval Giani5cb350b2007-10-15 17:00:14 +020010054 goto done;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +020010055
Peter Zijlstra8ed36992008-02-13 15:45:39 +010010056 spin_lock_irqsave(&task_group_lock, flags);
Peter Zijlstrabccbe082008-02-13 15:45:40 +010010057 for_each_possible_cpu(i)
10058 unregister_fair_sched_group(tg, i);
Peter Zijlstraf473aa52008-04-19 19:45:00 +020010059 list_del_rcu(&tg->siblings);
Peter Zijlstra8ed36992008-02-13 15:45:39 +010010060 spin_unlock_irqrestore(&task_group_lock, flags);
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +010010061
10062 /* wait for any ongoing reference to this group to finish */
10063 synchronize_sched();
10064
10065 /*
10066 * Now we are free to modify the group's share on each cpu
10067 * w/o tripping rebalance_share or load_balance_fair.
10068 */
10069 tg->shares = shares;
Peter Zijlstrac09595f2008-06-27 13:41:14 +020010070 for_each_possible_cpu(i) {
10071 /*
10072 * force a rebalance
10073 */
10074 cfs_rq_set_shares(tg->cfs_rq[i], 0);
Miao Xiecb4ad1f2008-04-28 12:54:56 +080010075 set_se_shares(tg->se[i], shares);
Peter Zijlstrac09595f2008-06-27 13:41:14 +020010076 }
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +010010077
10078 /*
10079 * Enable load balance activity on this group, by inserting it back on
10080 * each cpu's rq->leaf_cfs_rq_list.
10081 */
Peter Zijlstra8ed36992008-02-13 15:45:39 +010010082 spin_lock_irqsave(&task_group_lock, flags);
Peter Zijlstrabccbe082008-02-13 15:45:40 +010010083 for_each_possible_cpu(i)
10084 register_fair_sched_group(tg, i);
Peter Zijlstraf473aa52008-04-19 19:45:00 +020010085 list_add_rcu(&tg->siblings, &tg->parent->children);
Peter Zijlstra8ed36992008-02-13 15:45:39 +010010086 spin_unlock_irqrestore(&task_group_lock, flags);
Dhaval Giani5cb350b2007-10-15 17:00:14 +020010087done:
Peter Zijlstra8ed36992008-02-13 15:45:39 +010010088 mutex_unlock(&shares_mutex);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +020010089 return 0;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +020010090}
10091
Dhaval Giani5cb350b2007-10-15 17:00:14 +020010092unsigned long sched_group_shares(struct task_group *tg)
10093{
10094 return tg->shares;
10095}
Peter Zijlstra052f1dc2008-02-13 15:45:40 +010010096#endif
Dhaval Giani5cb350b2007-10-15 17:00:14 +020010097
Peter Zijlstra052f1dc2008-02-13 15:45:40 +010010098#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +010010099/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +010010100 * Ensure that the real time constraints are schedulable.
Peter Zijlstra6f505b12008-01-25 21:08:30 +010010101 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +010010102static DEFINE_MUTEX(rt_constraints_mutex);
10103
10104static unsigned long to_ratio(u64 period, u64 runtime)
10105{
10106 if (runtime == RUNTIME_INF)
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +020010107 return 1ULL << 20;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +010010108
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +020010109 return div64_u64(runtime << 20, period);
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +010010110}
10111
Dhaval Giani521f1a242008-02-28 15:21:56 +053010112/* Must be called with tasklist_lock held */
10113static inline int tg_has_rt_tasks(struct task_group *tg)
10114{
10115 struct task_struct *g, *p;
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +020010116
Dhaval Giani521f1a242008-02-28 15:21:56 +053010117 do_each_thread(g, p) {
10118 if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
10119 return 1;
10120 } while_each_thread(g, p);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +020010121
Dhaval Giani521f1a242008-02-28 15:21:56 +053010122 return 0;
10123}
10124
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +020010125struct rt_schedulable_data {
10126 struct task_group *tg;
10127 u64 rt_period;
10128 u64 rt_runtime;
10129};
10130
10131static int tg_schedulable(struct task_group *tg, void *data)
10132{
10133 struct rt_schedulable_data *d = data;
10134 struct task_group *child;
10135 unsigned long total, sum = 0;
10136 u64 period, runtime;
10137
10138 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
10139 runtime = tg->rt_bandwidth.rt_runtime;
10140
10141 if (tg == d->tg) {
10142 period = d->rt_period;
10143 runtime = d->rt_runtime;
10144 }
10145
Peter Zijlstra98a48262009-01-14 10:56:32 +010010146#ifdef CONFIG_USER_SCHED
10147 if (tg == &root_task_group) {
10148 period = global_rt_period();
10149 runtime = global_rt_runtime();
10150 }
10151#endif
10152
Peter Zijlstra4653f802008-09-23 15:33:44 +020010153 /*
10154 * Cannot have more runtime than the period.
10155 */
10156 if (runtime > period && runtime != RUNTIME_INF)
10157 return -EINVAL;
10158
10159 /*
10160 * Ensure we don't starve existing RT tasks.
10161 */
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +020010162 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
10163 return -EBUSY;
10164
10165 total = to_ratio(period, runtime);
10166
Peter Zijlstra4653f802008-09-23 15:33:44 +020010167 /*
10168 * Nobody can have more than the global setting allows.
10169 */
10170 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
10171 return -EINVAL;
10172
10173 /*
10174 * The sum of our children's runtime should not exceed our own.
10175 */
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +020010176 list_for_each_entry_rcu(child, &tg->children, siblings) {
10177 period = ktime_to_ns(child->rt_bandwidth.rt_period);
10178 runtime = child->rt_bandwidth.rt_runtime;
10179
10180 if (child == d->tg) {
10181 period = d->rt_period;
10182 runtime = d->rt_runtime;
10183 }
10184
10185 sum += to_ratio(period, runtime);
10186 }
10187
10188 if (sum > total)
10189 return -EINVAL;
10190
10191 return 0;
10192}
10193
10194static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
10195{
10196 struct rt_schedulable_data data = {
10197 .tg = tg,
10198 .rt_period = period,
10199 .rt_runtime = runtime,
10200 };
10201
10202 return walk_tg_tree(tg_schedulable, tg_nop, &data);
10203}
10204
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +020010205static int tg_set_bandwidth(struct task_group *tg,
10206 u64 rt_period, u64 rt_runtime)
Peter Zijlstra6f505b12008-01-25 21:08:30 +010010207{
Peter Zijlstraac086bc2008-04-19 19:44:58 +020010208 int i, err = 0;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +010010209
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +010010210 mutex_lock(&rt_constraints_mutex);
Dhaval Giani521f1a242008-02-28 15:21:56 +053010211 read_lock(&tasklist_lock);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +020010212 err = __rt_schedulable(tg, rt_period, rt_runtime);
10213 if (err)
Dhaval Giani521f1a242008-02-28 15:21:56 +053010214 goto unlock;
Peter Zijlstraac086bc2008-04-19 19:44:58 +020010215
10216 spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +020010217 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
10218 tg->rt_bandwidth.rt_runtime = rt_runtime;
Peter Zijlstraac086bc2008-04-19 19:44:58 +020010219
10220 for_each_possible_cpu(i) {
10221 struct rt_rq *rt_rq = tg->rt_rq[i];
10222
10223 spin_lock(&rt_rq->rt_runtime_lock);
10224 rt_rq->rt_runtime = rt_runtime;
10225 spin_unlock(&rt_rq->rt_runtime_lock);
10226 }
10227 spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +010010228 unlock:
Dhaval Giani521f1a242008-02-28 15:21:56 +053010229 read_unlock(&tasklist_lock);
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +010010230 mutex_unlock(&rt_constraints_mutex);
10231
10232 return err;
Peter Zijlstra6f505b12008-01-25 21:08:30 +010010233}
10234
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +020010235int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
10236{
10237 u64 rt_runtime, rt_period;
10238
10239 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
10240 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
10241 if (rt_runtime_us < 0)
10242 rt_runtime = RUNTIME_INF;
10243
10244 return tg_set_bandwidth(tg, rt_period, rt_runtime);
10245}
10246
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +010010247long sched_group_rt_runtime(struct task_group *tg)
10248{
10249 u64 rt_runtime_us;
10250
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +020010251 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +010010252 return -1;
10253
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +020010254 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +010010255 do_div(rt_runtime_us, NSEC_PER_USEC);
10256 return rt_runtime_us;
10257}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +020010258
10259int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
10260{
10261 u64 rt_runtime, rt_period;
10262
10263 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
10264 rt_runtime = tg->rt_bandwidth.rt_runtime;
10265
Raistlin619b0482008-06-26 18:54:09 +020010266 if (rt_period == 0)
10267 return -EINVAL;
10268
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +020010269 return tg_set_bandwidth(tg, rt_period, rt_runtime);
10270}
10271
10272long sched_group_rt_period(struct task_group *tg)
10273{
10274 u64 rt_period_us;
10275
10276 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
10277 do_div(rt_period_us, NSEC_PER_USEC);
10278 return rt_period_us;
10279}
10280
10281static int sched_rt_global_constraints(void)
10282{
Peter Zijlstra4653f802008-09-23 15:33:44 +020010283 u64 runtime, period;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +020010284 int ret = 0;
10285
Hiroshi Shimamotoec5d4982008-09-10 17:00:19 -070010286 if (sysctl_sched_rt_period <= 0)
10287 return -EINVAL;
10288
Peter Zijlstra4653f802008-09-23 15:33:44 +020010289 runtime = global_rt_runtime();
10290 period = global_rt_period();
10291
10292 /*
10293 * Sanity check on the sysctl variables.
10294 */
10295 if (runtime > period && runtime != RUNTIME_INF)
10296 return -EINVAL;
Peter Zijlstra10b612f2008-06-19 14:22:27 +020010297
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +020010298 mutex_lock(&rt_constraints_mutex);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +020010299 read_lock(&tasklist_lock);
Peter Zijlstra4653f802008-09-23 15:33:44 +020010300 ret = __rt_schedulable(NULL, 0, 0);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +020010301 read_unlock(&tasklist_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +020010302 mutex_unlock(&rt_constraints_mutex);
10303
10304 return ret;
10305}
Dhaval Giani54e99122009-02-27 15:13:54 +053010306
10307int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
10308{
10309 /* Don't accept realtime tasks when there is no way for them to run */
10310 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
10311 return 0;
10312
10313 return 1;
10314}
10315
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +020010316#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +020010317static int sched_rt_global_constraints(void)
10318{
Peter Zijlstraac086bc2008-04-19 19:44:58 +020010319 unsigned long flags;
10320 int i;
10321
Hiroshi Shimamotoec5d4982008-09-10 17:00:19 -070010322 if (sysctl_sched_rt_period <= 0)
10323 return -EINVAL;
10324
Peter Zijlstra60aa6052009-05-05 17:50:21 +020010325 /*
10326 * There's always some RT tasks in the root group
10327 * -- migration, kstopmachine etc..
10328 */
10329 if (sysctl_sched_rt_runtime == 0)
10330 return -EBUSY;
10331
Peter Zijlstraac086bc2008-04-19 19:44:58 +020010332 spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
10333 for_each_possible_cpu(i) {
10334 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
10335
10336 spin_lock(&rt_rq->rt_runtime_lock);
10337 rt_rq->rt_runtime = global_rt_runtime();
10338 spin_unlock(&rt_rq->rt_runtime_lock);
10339 }
10340 spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
10341
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +020010342 return 0;
10343}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +020010344#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +020010345
10346int sched_rt_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -070010347 void __user *buffer, size_t *lenp,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +020010348 loff_t *ppos)
10349{
10350 int ret;
10351 int old_period, old_runtime;
10352 static DEFINE_MUTEX(mutex);
10353
10354 mutex_lock(&mutex);
10355 old_period = sysctl_sched_rt_period;
10356 old_runtime = sysctl_sched_rt_runtime;
10357
Alexey Dobriyan8d65af72009-09-23 15:57:19 -070010358 ret = proc_dointvec(table, write, buffer, lenp, ppos);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +020010359
10360 if (!ret && write) {
10361 ret = sched_rt_global_constraints();
10362 if (ret) {
10363 sysctl_sched_rt_period = old_period;
10364 sysctl_sched_rt_runtime = old_runtime;
10365 } else {
10366 def_rt_bandwidth.rt_runtime = global_rt_runtime();
10367 def_rt_bandwidth.rt_period =
10368 ns_to_ktime(global_rt_period());
10369 }
10370 }
10371 mutex_unlock(&mutex);
10372
10373 return ret;
10374}
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010375
Peter Zijlstra052f1dc2008-02-13 15:45:40 +010010376#ifdef CONFIG_CGROUP_SCHED
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010377
10378/* return corresponding task_group object of a cgroup */
Paul Menage2b01dfe2007-10-24 18:23:50 +020010379static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010380{
Paul Menage2b01dfe2007-10-24 18:23:50 +020010381 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
10382 struct task_group, css);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010383}
10384
10385static struct cgroup_subsys_state *
Paul Menage2b01dfe2007-10-24 18:23:50 +020010386cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010387{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +020010388 struct task_group *tg, *parent;
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010389
Paul Menage2b01dfe2007-10-24 18:23:50 +020010390 if (!cgrp->parent) {
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010391 /* This is early initialization for the top cgroup */
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010392 return &init_task_group.css;
10393 }
10394
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +020010395 parent = cgroup_tg(cgrp->parent);
10396 tg = sched_create_group(parent);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010397 if (IS_ERR(tg))
10398 return ERR_PTR(-ENOMEM);
10399
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010400 return &tg->css;
10401}
10402
Ingo Molnar41a2d6c2007-12-05 15:46:09 +010010403static void
10404cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010405{
Paul Menage2b01dfe2007-10-24 18:23:50 +020010406 struct task_group *tg = cgroup_tg(cgrp);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010407
10408 sched_destroy_group(tg);
10409}
10410
Ingo Molnar41a2d6c2007-12-05 15:46:09 +010010411static int
Ben Blumbe367d02009-09-23 15:56:31 -070010412cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010413{
Peter Zijlstrab68aa232008-02-13 15:45:40 +010010414#ifdef CONFIG_RT_GROUP_SCHED
Dhaval Giani54e99122009-02-27 15:13:54 +053010415 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
Peter Zijlstrab68aa232008-02-13 15:45:40 +010010416 return -EINVAL;
10417#else
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010418 /* We don't support RT-tasks being in separate groups */
10419 if (tsk->sched_class != &fair_sched_class)
10420 return -EINVAL;
Peter Zijlstrab68aa232008-02-13 15:45:40 +010010421#endif
Ben Blumbe367d02009-09-23 15:56:31 -070010422 return 0;
10423}
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010424
Ben Blumbe367d02009-09-23 15:56:31 -070010425static int
10426cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
10427 struct task_struct *tsk, bool threadgroup)
10428{
10429 int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
10430 if (retval)
10431 return retval;
10432 if (threadgroup) {
10433 struct task_struct *c;
10434 rcu_read_lock();
10435 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
10436 retval = cpu_cgroup_can_attach_task(cgrp, c);
10437 if (retval) {
10438 rcu_read_unlock();
10439 return retval;
10440 }
10441 }
10442 rcu_read_unlock();
10443 }
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010444 return 0;
10445}
10446
10447static void
Paul Menage2b01dfe2007-10-24 18:23:50 +020010448cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
Ben Blumbe367d02009-09-23 15:56:31 -070010449 struct cgroup *old_cont, struct task_struct *tsk,
10450 bool threadgroup)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010451{
10452 sched_move_task(tsk);
Ben Blumbe367d02009-09-23 15:56:31 -070010453 if (threadgroup) {
10454 struct task_struct *c;
10455 rcu_read_lock();
10456 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
10457 sched_move_task(c);
10458 }
10459 rcu_read_unlock();
10460 }
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010461}
10462
Peter Zijlstra052f1dc2008-02-13 15:45:40 +010010463#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Menagef4c753b2008-04-29 00:59:56 -070010464static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
Paul Menage2b01dfe2007-10-24 18:23:50 +020010465 u64 shareval)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010466{
Paul Menage2b01dfe2007-10-24 18:23:50 +020010467 return sched_group_set_shares(cgroup_tg(cgrp), shareval);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010468}
10469
Paul Menagef4c753b2008-04-29 00:59:56 -070010470static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010471{
Paul Menage2b01dfe2007-10-24 18:23:50 +020010472 struct task_group *tg = cgroup_tg(cgrp);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010473
10474 return (u64) tg->shares;
10475}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +020010476#endif /* CONFIG_FAIR_GROUP_SCHED */
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010477
Peter Zijlstra052f1dc2008-02-13 15:45:40 +010010478#ifdef CONFIG_RT_GROUP_SCHED
Mirco Tischler0c708142008-05-14 16:05:46 -070010479static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
Paul Menage06ecb272008-04-29 01:00:06 -070010480 s64 val)
Peter Zijlstra6f505b12008-01-25 21:08:30 +010010481{
Paul Menage06ecb272008-04-29 01:00:06 -070010482 return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
Peter Zijlstra6f505b12008-01-25 21:08:30 +010010483}
10484
Paul Menage06ecb272008-04-29 01:00:06 -070010485static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
Peter Zijlstra6f505b12008-01-25 21:08:30 +010010486{
Paul Menage06ecb272008-04-29 01:00:06 -070010487 return sched_group_rt_runtime(cgroup_tg(cgrp));
Peter Zijlstra6f505b12008-01-25 21:08:30 +010010488}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +020010489
10490static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
10491 u64 rt_period_us)
10492{
10493 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
10494}
10495
10496static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
10497{
10498 return sched_group_rt_period(cgroup_tg(cgrp));
10499}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +020010500#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +010010501
Paul Menagefe5c7cc2007-10-29 21:18:11 +010010502static struct cftype cpu_files[] = {
Peter Zijlstra052f1dc2008-02-13 15:45:40 +010010503#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Menagefe5c7cc2007-10-29 21:18:11 +010010504 {
10505 .name = "shares",
Paul Menagef4c753b2008-04-29 00:59:56 -070010506 .read_u64 = cpu_shares_read_u64,
10507 .write_u64 = cpu_shares_write_u64,
Paul Menagefe5c7cc2007-10-29 21:18:11 +010010508 },
Peter Zijlstra052f1dc2008-02-13 15:45:40 +010010509#endif
10510#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +010010511 {
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +010010512 .name = "rt_runtime_us",
Paul Menage06ecb272008-04-29 01:00:06 -070010513 .read_s64 = cpu_rt_runtime_read,
10514 .write_s64 = cpu_rt_runtime_write,
Peter Zijlstra6f505b12008-01-25 21:08:30 +010010515 },
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +020010516 {
10517 .name = "rt_period_us",
Paul Menagef4c753b2008-04-29 00:59:56 -070010518 .read_u64 = cpu_rt_period_read_uint,
10519 .write_u64 = cpu_rt_period_write_uint,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +020010520 },
Peter Zijlstra052f1dc2008-02-13 15:45:40 +010010521#endif
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010522};
10523
10524static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
10525{
Paul Menagefe5c7cc2007-10-29 21:18:11 +010010526 return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010527}
10528
10529struct cgroup_subsys cpu_cgroup_subsys = {
Ingo Molnar38605ca2007-10-29 21:18:11 +010010530 .name = "cpu",
10531 .create = cpu_cgroup_create,
10532 .destroy = cpu_cgroup_destroy,
10533 .can_attach = cpu_cgroup_can_attach,
10534 .attach = cpu_cgroup_attach,
10535 .populate = cpu_cgroup_populate,
10536 .subsys_id = cpu_cgroup_subsys_id,
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -070010537 .early_init = 1,
10538};
10539
Peter Zijlstra052f1dc2008-02-13 15:45:40 +010010540#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010541
10542#ifdef CONFIG_CGROUP_CPUACCT
10543
10544/*
10545 * CPU accounting code for task groups.
10546 *
10547 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
10548 * (balbir@in.ibm.com).
10549 */
10550
Bharata B Rao934352f2008-11-10 20:41:13 +053010551/* track cpu usage of a group of tasks and its child groups */
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010552struct cpuacct {
10553 struct cgroup_subsys_state css;
10554 /* cpuusage holds pointer to a u64-type object on every cpu */
10555 u64 *cpuusage;
Bharata B Raoef12fef2009-03-31 10:02:22 +053010556 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
Bharata B Rao934352f2008-11-10 20:41:13 +053010557 struct cpuacct *parent;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010558};
10559
10560struct cgroup_subsys cpuacct_subsys;
10561
10562/* return cpu accounting group corresponding to this container */
Dhaval Giani32cd7562008-02-29 10:02:43 +053010563static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010564{
Dhaval Giani32cd7562008-02-29 10:02:43 +053010565 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010566 struct cpuacct, css);
10567}
10568
10569/* return cpu accounting group to which this task belongs */
10570static inline struct cpuacct *task_ca(struct task_struct *tsk)
10571{
10572 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
10573 struct cpuacct, css);
10574}
10575
10576/* create a new cpu accounting group */
10577static struct cgroup_subsys_state *cpuacct_create(
Dhaval Giani32cd7562008-02-29 10:02:43 +053010578 struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010579{
10580 struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
Bharata B Raoef12fef2009-03-31 10:02:22 +053010581 int i;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010582
10583 if (!ca)
Bharata B Raoef12fef2009-03-31 10:02:22 +053010584 goto out;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010585
10586 ca->cpuusage = alloc_percpu(u64);
Bharata B Raoef12fef2009-03-31 10:02:22 +053010587 if (!ca->cpuusage)
10588 goto out_free_ca;
10589
10590 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
10591 if (percpu_counter_init(&ca->cpustat[i], 0))
10592 goto out_free_counters;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010593
Bharata B Rao934352f2008-11-10 20:41:13 +053010594 if (cgrp->parent)
10595 ca->parent = cgroup_ca(cgrp->parent);
10596
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010597 return &ca->css;
Bharata B Raoef12fef2009-03-31 10:02:22 +053010598
10599out_free_counters:
10600 while (--i >= 0)
10601 percpu_counter_destroy(&ca->cpustat[i]);
10602 free_percpu(ca->cpuusage);
10603out_free_ca:
10604 kfree(ca);
10605out:
10606 return ERR_PTR(-ENOMEM);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010607}
10608
10609/* destroy an existing cpu accounting group */
Ingo Molnar41a2d6c2007-12-05 15:46:09 +010010610static void
Dhaval Giani32cd7562008-02-29 10:02:43 +053010611cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010612{
Dhaval Giani32cd7562008-02-29 10:02:43 +053010613 struct cpuacct *ca = cgroup_ca(cgrp);
Bharata B Raoef12fef2009-03-31 10:02:22 +053010614 int i;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010615
Bharata B Raoef12fef2009-03-31 10:02:22 +053010616 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
10617 percpu_counter_destroy(&ca->cpustat[i]);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010618 free_percpu(ca->cpuusage);
10619 kfree(ca);
10620}
10621
Ken Chen720f5492008-12-15 22:02:01 -080010622static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
10623{
Rusty Russellb36128c2009-02-20 16:29:08 +090010624 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Ken Chen720f5492008-12-15 22:02:01 -080010625 u64 data;
10626
10627#ifndef CONFIG_64BIT
10628 /*
10629 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
10630 */
10631 spin_lock_irq(&cpu_rq(cpu)->lock);
10632 data = *cpuusage;
10633 spin_unlock_irq(&cpu_rq(cpu)->lock);
10634#else
10635 data = *cpuusage;
10636#endif
10637
10638 return data;
10639}
10640
10641static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
10642{
Rusty Russellb36128c2009-02-20 16:29:08 +090010643 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Ken Chen720f5492008-12-15 22:02:01 -080010644
10645#ifndef CONFIG_64BIT
10646 /*
10647 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
10648 */
10649 spin_lock_irq(&cpu_rq(cpu)->lock);
10650 *cpuusage = val;
10651 spin_unlock_irq(&cpu_rq(cpu)->lock);
10652#else
10653 *cpuusage = val;
10654#endif
10655}
10656
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010657/* return total cpu usage (in nanoseconds) of a group */
Dhaval Giani32cd7562008-02-29 10:02:43 +053010658static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010659{
Dhaval Giani32cd7562008-02-29 10:02:43 +053010660 struct cpuacct *ca = cgroup_ca(cgrp);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010661 u64 totalcpuusage = 0;
10662 int i;
10663
Ken Chen720f5492008-12-15 22:02:01 -080010664 for_each_present_cpu(i)
10665 totalcpuusage += cpuacct_cpuusage_read(ca, i);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010666
10667 return totalcpuusage;
10668}
10669
Dhaval Giani0297b802008-02-29 10:02:44 +053010670static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
10671 u64 reset)
10672{
10673 struct cpuacct *ca = cgroup_ca(cgrp);
10674 int err = 0;
10675 int i;
10676
10677 if (reset) {
10678 err = -EINVAL;
10679 goto out;
10680 }
10681
Ken Chen720f5492008-12-15 22:02:01 -080010682 for_each_present_cpu(i)
10683 cpuacct_cpuusage_write(ca, i, 0);
Dhaval Giani0297b802008-02-29 10:02:44 +053010684
Dhaval Giani0297b802008-02-29 10:02:44 +053010685out:
10686 return err;
10687}
10688
Ken Chene9515c32008-12-15 22:04:15 -080010689static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
10690 struct seq_file *m)
10691{
10692 struct cpuacct *ca = cgroup_ca(cgroup);
10693 u64 percpu;
10694 int i;
10695
10696 for_each_present_cpu(i) {
10697 percpu = cpuacct_cpuusage_read(ca, i);
10698 seq_printf(m, "%llu ", (unsigned long long) percpu);
10699 }
10700 seq_printf(m, "\n");
10701 return 0;
10702}
10703
Bharata B Raoef12fef2009-03-31 10:02:22 +053010704static const char *cpuacct_stat_desc[] = {
10705 [CPUACCT_STAT_USER] = "user",
10706 [CPUACCT_STAT_SYSTEM] = "system",
10707};
10708
10709static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
10710 struct cgroup_map_cb *cb)
10711{
10712 struct cpuacct *ca = cgroup_ca(cgrp);
10713 int i;
10714
10715 for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
10716 s64 val = percpu_counter_read(&ca->cpustat[i]);
10717 val = cputime64_to_clock_t(val);
10718 cb->fill(cb, cpuacct_stat_desc[i], val);
10719 }
10720 return 0;
10721}
10722
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010723static struct cftype files[] = {
10724 {
10725 .name = "usage",
Paul Menagef4c753b2008-04-29 00:59:56 -070010726 .read_u64 = cpuusage_read,
10727 .write_u64 = cpuusage_write,
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010728 },
Ken Chene9515c32008-12-15 22:04:15 -080010729 {
10730 .name = "usage_percpu",
10731 .read_seq_string = cpuacct_percpu_seq_read,
10732 },
Bharata B Raoef12fef2009-03-31 10:02:22 +053010733 {
10734 .name = "stat",
10735 .read_map = cpuacct_stats_show,
10736 },
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010737};
10738
Dhaval Giani32cd7562008-02-29 10:02:43 +053010739static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010740{
Dhaval Giani32cd7562008-02-29 10:02:43 +053010741 return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010742}
10743
10744/*
10745 * charge this task's execution time to its accounting group.
10746 *
10747 * called with rq->lock held.
10748 */
10749static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
10750{
10751 struct cpuacct *ca;
Bharata B Rao934352f2008-11-10 20:41:13 +053010752 int cpu;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010753
Li Zefanc40c6f82009-02-26 15:40:15 +080010754 if (unlikely(!cpuacct_subsys.active))
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010755 return;
10756
Bharata B Rao934352f2008-11-10 20:41:13 +053010757 cpu = task_cpu(tsk);
Bharata B Raoa18b83b2009-03-23 10:02:53 +053010758
10759 rcu_read_lock();
10760
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010761 ca = task_ca(tsk);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010762
Bharata B Rao934352f2008-11-10 20:41:13 +053010763 for (; ca; ca = ca->parent) {
Rusty Russellb36128c2009-02-20 16:29:08 +090010764 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010765 *cpuusage += cputime;
10766 }
Bharata B Raoa18b83b2009-03-23 10:02:53 +053010767
10768 rcu_read_unlock();
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010769}
10770
Bharata B Raoef12fef2009-03-31 10:02:22 +053010771/*
10772 * Charge the system/user time to the task's accounting group.
10773 */
10774static void cpuacct_update_stats(struct task_struct *tsk,
10775 enum cpuacct_stat_index idx, cputime_t val)
10776{
10777 struct cpuacct *ca;
10778
10779 if (unlikely(!cpuacct_subsys.active))
10780 return;
10781
10782 rcu_read_lock();
10783 ca = task_ca(tsk);
10784
10785 do {
10786 percpu_counter_add(&ca->cpustat[idx], val);
10787 ca = ca->parent;
10788 } while (ca);
10789 rcu_read_unlock();
10790}
10791
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +010010792struct cgroup_subsys cpuacct_subsys = {
10793 .name = "cpuacct",
10794 .create = cpuacct_create,
10795 .destroy = cpuacct_destroy,
10796 .populate = cpuacct_populate,
10797 .subsys_id = cpuacct_subsys_id,
10798};
10799#endif /* CONFIG_CGROUP_CPUACCT */
Paul E. McKenney03b042b2009-06-25 09:08:16 -070010800
10801#ifndef CONFIG_SMP
10802
10803int rcu_expedited_torture_stats(char *page)
10804{
10805 return 0;
10806}
10807EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
10808
10809void synchronize_sched_expedited(void)
10810{
10811}
10812EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
10813
10814#else /* #ifndef CONFIG_SMP */
10815
10816static DEFINE_PER_CPU(struct migration_req, rcu_migration_req);
10817static DEFINE_MUTEX(rcu_sched_expedited_mutex);
10818
10819#define RCU_EXPEDITED_STATE_POST -2
10820#define RCU_EXPEDITED_STATE_IDLE -1
10821
10822static int rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
10823
10824int rcu_expedited_torture_stats(char *page)
10825{
10826 int cnt = 0;
10827 int cpu;
10828
10829 cnt += sprintf(&page[cnt], "state: %d /", rcu_expedited_state);
10830 for_each_online_cpu(cpu) {
10831 cnt += sprintf(&page[cnt], " %d:%d",
10832 cpu, per_cpu(rcu_migration_req, cpu).dest_cpu);
10833 }
10834 cnt += sprintf(&page[cnt], "\n");
10835 return cnt;
10836}
10837EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
10838
10839static long synchronize_sched_expedited_count;
10840
10841/*
10842 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
10843 * approach to force grace period to end quickly. This consumes
10844 * significant time on all CPUs, and is thus not recommended for
10845 * any sort of common-case code.
10846 *
10847 * Note that it is illegal to call this function while holding any
10848 * lock that is acquired by a CPU-hotplug notifier. Failing to
10849 * observe this restriction will result in deadlock.
10850 */
10851void synchronize_sched_expedited(void)
10852{
10853 int cpu;
10854 unsigned long flags;
10855 bool need_full_sync = 0;
10856 struct rq *rq;
10857 struct migration_req *req;
10858 long snap;
10859 int trycount = 0;
10860
10861 smp_mb(); /* ensure prior mod happens before capturing snap. */
10862 snap = ACCESS_ONCE(synchronize_sched_expedited_count) + 1;
10863 get_online_cpus();
10864 while (!mutex_trylock(&rcu_sched_expedited_mutex)) {
10865 put_online_cpus();
10866 if (trycount++ < 10)
10867 udelay(trycount * num_online_cpus());
10868 else {
10869 synchronize_sched();
10870 return;
10871 }
10872 if (ACCESS_ONCE(synchronize_sched_expedited_count) - snap > 0) {
10873 smp_mb(); /* ensure test happens before caller kfree */
10874 return;
10875 }
10876 get_online_cpus();
10877 }
10878 rcu_expedited_state = RCU_EXPEDITED_STATE_POST;
10879 for_each_online_cpu(cpu) {
10880 rq = cpu_rq(cpu);
10881 req = &per_cpu(rcu_migration_req, cpu);
10882 init_completion(&req->done);
10883 req->task = NULL;
10884 req->dest_cpu = RCU_MIGRATION_NEED_QS;
10885 spin_lock_irqsave(&rq->lock, flags);
10886 list_add(&req->list, &rq->migration_queue);
10887 spin_unlock_irqrestore(&rq->lock, flags);
10888 wake_up_process(rq->migration_thread);
10889 }
10890 for_each_online_cpu(cpu) {
10891 rcu_expedited_state = cpu;
10892 req = &per_cpu(rcu_migration_req, cpu);
10893 rq = cpu_rq(cpu);
10894 wait_for_completion(&req->done);
10895 spin_lock_irqsave(&rq->lock, flags);
10896 if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
10897 need_full_sync = 1;
10898 req->dest_cpu = RCU_MIGRATION_IDLE;
10899 spin_unlock_irqrestore(&rq->lock, flags);
10900 }
10901 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
10902 mutex_unlock(&rcu_sched_expedited_mutex);
10903 put_online_cpus();
10904 if (need_full_sync)
10905 synchronize_sched();
10906}
10907EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
10908
10909#endif /* #else #ifndef CONFIG_SMP */